id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/NMF_Analysis.py |
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import numpy as np
from sklearn.cluster import KMeans
import nimfa
from sklearn.decomposition import NMF
import os.path
from collections import defaultdict
import traceback
import export
from visualization_scripts import Orderedheatmap
#import statistics
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filterRows(input_file,output_file,filterDB=None,logData=False):
orderlst={}
counter=[]
export_object = open(output_file,'w')
firstLine = True
Flag=0
for line in open(input_file,'rU').xreadlines(): ### Original expression file (source IDs)
#for i in filterDB:
flag1=0
data = cleanUpLine(line)
values = string.split(data,'\t')
if firstLine:
firstLine = False
if Flag==0:
export_object.write(line)
else:
#print values[0], filterDB
#sys.exit()
uid = values[0]
if uid in filterDB:
counter=[index for index, value in enumerate(filterDB) if value == uid]
for it in range(0,len(counter)):
orderlst[counter[it]]=line
for i in range(0,len(orderlst)):
try:
export_object.write(orderlst[i])
except Exception:
print i,filterDB[i]
continue
export_object.close()
print 'Filtered rows printed to:',output_file
def FilterGuideGeneFile(Guidefile,Guidefile_block,expressionInputFile,iteration,platform,uniqueIDs,symbolIDs):
""" Filters the original input expression file for Guide3 genes/events. Needed
Since NMF only can deal with positive values [Guide3 has negative values]"""
root_dir = export.findParentDir(expressionInputFile)[:-1]
if 'ExpressionInput' in root_dir:
root_dir = export.findParentDir(root_dir)
if 'Clustering' in Guidefile:
count=1
flag=True
rank_Count=0
prev=0
else:
count=0
val=[]
head=0
for line in open(Guidefile_block,'rU').xreadlines():
if head >count:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
#val.append(q[0])
if flag:
if int(q[1])==prev:
continue
else:
rank_Count+=1
prev=int(q[1])
else:
head+=1
continue
head=0
for line in open(Guidefile,'rU').xreadlines():
line=line.rstrip('\r\n')
q= string.split(line,'\t')
n=len(q)
if head >count:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
uid = q[0]
if uid not in uniqueIDs:
if uid in symbolIDs:
uid = symbolIDs[uid]
val.append(uid)
else:
continue
val.append(uid)
if platform != "PSI" and head==2:
rank_Count=rank_Count+int(q[1])
print rank_Count
head=head+1
else:
head+=1
if platform != "PSI" and q[0]=="column_clusters-flat":
rank_Count=int(q[n-1])
continue
output_dir = root_dir+'/NMF-SVM'
if os.path.exists(output_dir)==False:
export.createExportFolder(output_dir)
output_file = output_dir+'/NMFInput-Round'+str(iteration)+'.txt'
filterRows(expressionInputFile,output_file,filterDB=val)
return output_file,rank_Count
def NMFAnalysis(expressionInputFile,NMFinputDir,Rank,platform,iteration=0,strategy="conservative"):
root_dir = export.findParentDir(NMFinputDir)[:-1]
if 'ExpressionInput' in root_dir:
root_dir = export.findParentDir(root_dir)
if 'NMF-SVM' in root_dir:
root_dir = export.findParentDir(root_dir)
export.findFilename(NMFinputDir)
X=[]
header=[]
head=0
exportnam=root_dir+'/NMF-SVM/NMF/round'+str(iteration)+'NMFsnmf_versionr'+str(Rank)+'.txt'
export_res=export.ExportFile(exportnam)
exportnam_bin=root_dir+'/NMF-SVM/NMF/round'+str(iteration)+'NMFsnmf_binary'+str(Rank)+'.txt'
export_res1=export.ExportFile(exportnam_bin)
exportnam_bint=root_dir+'/NMF-SVM/NMF/round'+str(iteration)+'NMFsnmf_binary_t_'+str(Rank)+'.txt'
export_res5=export.ExportFile(exportnam_bint)
MF_input = root_dir+'/NMF-SVM/ExpressionInput/exp.NMF-MarkerFinder.txt'
export.customFileCopy(expressionInputFile,root_dir+'/NMF-SVM/ExpressionInput/exp.NMF-MarkerFinder.txt')
export_res4=open(string.replace(MF_input,'exp.','groups.'),"w")
export_res7=open(string.replace(MF_input,'exp.','comps.'),"w")
exportnam2=root_dir+'/NMF-SVM/SubtypeAnalyses/round'+str(iteration)+'Metadata'+str(Rank)+'.txt'
export_res2=export.ExportFile(exportnam2)
exportnam3=root_dir+'/NMF-SVM/SubtypeAnalyses/round'+str(iteration)+'Annotation'+str(Rank)+'.txt'
export_res3=export.ExportFile(exportnam3)
#if 'Clustering' in NMFinputDir:
# count=1
# start=2
#else:
count=0
start=1
#print Rank
for line in open(NMFinputDir,'rU').xreadlines():
line=line.rstrip('\r\n')
q= string.split(line,'\t')
if head >count:
val=[]
val2=[]
me=0.0
for i in range(start,len(q)):
try:
val2.append(float(q[i]))
except Exception:
continue
me=np.median(val2)
for i in range(start,len(q)):
try:
val.append(float(q[i]))
except Exception:
val.append(float(me))
#if q[1]==prev:
X.append(val)
else:
export_res1.write(line)
export_res.write(line)
export_res1.write("\n")
#export_res4.write(line)
#export_res4.write("\n")
export_res.write("\n")
header=q
head+=1
continue
group=defaultdict(list)
sh=[]
X=np.array(X)
#print X.shape
mat=[]
#mat=X
mat=zip(*X)
mat=np.array(mat)
#print mat.shape
#model = NMF(n_components=15, init='random', random_state=0)
#W = model.fit_transform(mat)
nmf = nimfa.Snmf(mat,seed="nndsvd", rank=int(Rank), max_iter=20,n_run=1,track_factor=False,theta=0.95)
nmf_fit = nmf()
W = nmf_fit.basis()
W=np.array(W)
#np.savetxt("basismatrix2.txt",W,delimiter="\t")
H=nmf_fit.coef()
H=np.array(H)
# np.savetxt("coefficientmatrix2.txt",H,delimiter="\t")
#print W.shape
sh=W.shape
export_res3.write("uid\tUID\tUID\n")
if int(Rank)==2:
par=1
else:
par=2
#for i in range(sh[1]):
# val=W[:,i]
# me=np.mean(val)
# st=np.std(val)
# export_res2.write(header[i+1])
# for j in range(sh[0]):
# if float(W[i][j])>=float(me+(par*st)):
#
# export_res2.write("\t"+str(1))
# else:
# export_res2.write("\t"+str(0))
#
# export_res2.write("\n")
if platform != 'PSI':
sh=W.shape
Z=[]
export_res5.write("uid")
export_res2.write("uid")
for i in range(sh[1]):
export_res5.write("\t"+'V'+str(i))
export_res2.write("\t"+'V'+str(i))
export_res3.write('V'+str(i)+"\t"+"Covariate"+"\t"+str(1)+"\n")
export_res5.write("\n")
export_res2.write("\n")
export_res3.write("\n")
for i in range(sh[0]):
new_val=[]
val=W[i,:]
export_res2.write(header[i+1])
export_res5.write(header[i+1])
export_res4.write(header[i+1])
flag=True
for j in range(sh[1]):
if W[i][j]==max(val) and flag:
export_res5.write("\t"+str(1))
export_res2.write("\t"+str(1))
new_val.append(1)
export_res4.write("\t"+str(j+1)+"\t"+'V'+str(j))
flag=False
else:
export_res5.write("\t"+str(0))
export_res2.write("\t"+str(0))
new_val.append(0)
Z.append(new_val)
export_res5.write("\n")
export_res2.write("\n")
export_res4.write("\n")
W=zip(*W)
W=np.array(W)
sh=W.shape
Z=zip(*Z)
Z=np.array(Z)
for i in range(sh[0]):
export_res.write('V'+str(i))
export_res1.write('V'+str(i))
for j in range(sh[1]):
export_res.write("\t"+str(W[i][j]))
export_res1.write("\t"+str(Z[i][j]))
export_res.write("\n")
export_res1.write("\n")
export_res.close()
export_res1.close()
export_res2.close()
export_res5.close()
Orderedheatmap.Classify(exportnam_bint)
return exportnam,exportnam_bin,exportnam2,exportnam3
else:
W=zip(*W)
W=np.array(W)
sh=W.shape
Z=[]
for i in range(sh[0]):
new_val=[]
val=W[i,:]
num=sum(i > 0.10 for i in val)
if num >40 or num <3:
compstd=True
else:
compstd=False
me=np.mean(val)
st=np.std(val)
#print 'V'+str(i)
export_res.write('V'+str(i))
export_res1.write('V'+str(i))
for j in range(sh[1]):
if compstd:
if float(W[i][j])>=float(me+(par*st)):
export_res1.write("\t"+str(1))
new_val.append(1)
else:
export_res1.write("\t"+str(0))
new_val.append(0)
else:
if float(W[i][j])>0.1:
export_res1.write("\t"+str(1))
new_val.append(1)
else:
export_res1.write("\t"+str(0))
new_val.append(0)
export_res.write("\t"+str(W[i][j]))
Z.append(new_val)
export_res.write("\n")
export_res1.write("\n")
# Z=zip(*Z)
Z=np.array(Z)
sh=Z.shape
Z_new=[]
val1=[]
Z1=[]
dellst=[]
export_res2.write("uid")
export_res5.write("uid")
for i in range(sh[0]):
indices=[]
val1=Z[i,:]
sum1=sum(val1)
flag=False
indices=[index for index, value in enumerate(val1) if value == 1]
for j in range(sh[0]):
val2=[]
if i!=j:
val2=Z[j,:]
sum2=sum([val2[x] for x in indices])
summ2=sum(val2)
try:
if float(sum2)/float(sum1)>0.5:
if summ2>sum1:
flag=True
#print str(i)
except Exception:
continue
if flag==False:
Z1.append(val1)
export_res2.write("\t"+'V'+str(i))
export_res5.write("\t"+'V'+str(i))
export_res3.write('V'+str(i)+"\t"+"Covariate"+"\t"+str(1)+"\n")
export_res2.write("\n")
export_res5.write("\n")
Z1=np.array(Z1)
Z=Z1
Z=zip(*Z)
Z=np.array(Z)
sh=Z.shape
for i in range(sh[0]):
val1=Z[i,:]
#print sum(val1)
#if sum(val)>2:
if sum(val1)>2:
val=[0 if x==1 else x for x in val1]
else:
val=val1
me=np.mean(val)
st=np.std(val)
export_res2.write(header[i+1])
export_res5.write(header[i+1])
for j in range(sh[1]):
if strategy=="conservative":
export_res2.write("\t"+str(val1[j]))
export_res5.write("\t"+str(val1[j]))
else:
export_res2.write("\t"+str(val[j]))
export_res5.write("\t"+str(val[j]))
export_res2.write("\n")
export_res5.write("\n")
Z_new.append(val)
Z_new=zip(*Z_new)
Z_new=np.array(Z_new)
sh=Z_new.shape
export_res5.close()
Orderedheatmap.Classify(exportnam_bint)
if strategy=="conservative":
return exportnam,exportnam_bin,exportnam2,exportnam3
else:
return exportnam,exportnam_bin,exportnam2,exportnam3
if __name__ == '__main__':
import getopt
mutdict=defaultdict(list)
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
analysisType = []
options, remainder = getopt.getopt(sys.argv[1:],'', ['Guidefile=','Rank=','PSI='])
for opt, arg in options:
if opt == '--Guidefile': Guidefile=arg
elif opt == '--Rank':Rank=arg
elif opt == '--PSI':PSI=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
inputfile=Guidefile
Rank=Rank
if Rank>1:
NMFAnalysis(inputfile,Rank,platform="RNASeq")
else:
pass | PypiClean |
/CWR-API-0.0.40.tar.gz/CWR-API-0.0.40/cwr/parser/encoder/file.py | from config_cwr.accessor import CWRConfiguration
from cwr.parser.encoder.common import Encoder
from cwr.parser.encoder.standart.record import CwrRecordEncoderFactory
from data_cwr.accessor import CWRTables
import difflib
"""
Parsers for encoding CWR model classes, creating a text string for them which
complies with the CWR standard.
While the encoder classes are accessible, they are meant to be used directly
only when creating custom versions, by default the factory methods
old_filename_encoder() and default_filename_encoder() should be used to
acquire the encoders to use when creating a file.
The old_filename_encoder() method will return an encoder which creates a
filename following the old specification, where ne numeric sequence consists
of two numbers, while the default_filename_encoder() will create a filename
following the new specification, where the sequence consists of four numbers.
Both encoders require a FileTag containing valid values, which will be
transformed into the resulting string.
These encoders are created from BaseCWRFileNameEncoder, just setting the
correct sequence number length.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
def old_filename_encoder():
"""
Creates an encoder which parses a CWR file name from a FileTag object.
This encoder will follow the old format, where the sequence number is only
two digits longs.
It should be noted that this format has been made obsolete, and files now
should use four digits.
These old file names follow the pattern CWyynnsss_rrr.Vxx.
:return: an encoder for filenames using the old convention
"""
return BaseCWRFileNameEncoder(2)
def default_filename_encoder():
"""
Creates an encoder which parses a CWR file name from a FileTag object.
This encoder will follow the CWR naming convention update done by the CWR
Management Committee, which increased the sequence length from two digits
to four.
After this change file names no longer follow the CISAC CWR standard, but
allows for higher number of CWR file transmissions.
These old files names follow the pattern CWyynnnnsss_rrr.Vxx.
:return: an encoder for filenames using the new convention
"""
return BaseCWRFileNameEncoder(4)
class BaseCWRFileNameEncoder(Encoder):
"""
Parses a CWR file name from a FileTag class.
As the file name is a very precise type of string, this parsing will be
lossless, meaning that all the information from the file name will be
always parsed into the resulting object.
CWR file names follow the pattern CWyynnnnsss_rrr.Vxx, where each section
means the following:
CW - Header indicating it is a CWR file.
yy - Year.
nnnn - Sequence. This was originally 2 numbers, later changed to 4.
sss - Sender. 2 or 3 digits.
rrr - Receiver. 2 or 3 digits.
xx - Version of the CWR standard (version x.x).
As the Sequence number length was changed, the encoder will require this
length to be indicated.
"""
# Delimiters
_header = 'CW'
_ip_delimiter = '_'
_version_delimiter = '.V'
def __init__(self, sequence_l):
super(BaseCWRFileNameEncoder, self).__init__()
self._sequence_l = sequence_l
def encode(self, tag):
"""
Parses a CWR file name from a FileTag object.
The result will be a string following the format CWyynnnnsss_rrr.Vxx,
where the numeric sequence will have the length set on the encoder's
constructor.
:param tag: FileTag to parse
:return: a string file name parsed from the FileTag
"""
# Acquires sequence number
sequence = str(tag.sequence_n)
# If the sequence is bigger the max, it is cut
if len(sequence) > self._sequence_l:
sequence = sequence[:self._sequence_l]
# If the sequence is smaller the max, it is padded with zeroes
while len(sequence) < self._sequence_l:
sequence = '0' + sequence
# Acquires version
version = str(tag.version)
# If the version is too long only the first and last number are taken,
# to remove decimal separator
if len(version) > 2:
version = version[:1] + version[-1:]
# If the version is too short, it is padded with zeroes
while len(version) < 2:
version = '0' + version
# Acquires year
# Only the two last digits of the year are used
year = str(tag.year)[-2:]
# Acquires sender and receiver
sender = tag.sender[:3]
receiver = tag.receiver[:3]
rule = self._header + year + sequence + sender
rule = rule + self._ip_delimiter + receiver + ".V" + version
return rule
class CwrFileEncoder(Encoder):
"""
Encodes a CWR class instance into a cwr binary format.
"""
_counter = 0
content = []
def __init__(self, record_configs, fields_configs, content=[]):
super(CwrFileEncoder, self).__init__()
self.record_encoder_factory = CwrRecordEncoderFactory(record_configs, fields_configs)
self.content = content
def _record_encode(self, entity):
encoder = self.record_encoder_factory.get_encoder(entity)
result = encoder.encode(entity)
return result
def encode(self, transmission):
"""
Encodes the data, creating a CWR structure from an instance from the
domain model.
:param entity: the instance to encode
:return: a cwr string structure created from the received data
"""
data = ''
data += self._record_encode(transmission.header)
for group in transmission.groups:
data += self._record_encode(group.group_header)
for transaction in group.transactions:
for record in transaction:
data += self._record_encode(record)
data += self._record_encode(group.group_trailer)
data += self._record_encode(transmission.trailer)
return data
def default_file_encoder():
"""
Get default encoder cwr file
:return:
"""
config = CWRConfiguration()
field_configs = config.load_field_config('table')
field_configs.update(config.load_field_config('common'))
field_values = CWRTables()
for entry in field_configs.values():
if 'source' in entry:
values_id = entry['source']
entry['values'] = field_values.get_data(values_id)
record_configs = config.load_record_config('common')
return CwrFileEncoder(record_configs, field_configs) | PypiClean |
/FileTransfer-0.11.0-py3-none-any.whl/filetransfer/__main__.py | import base64
import hashlib
import logging
import os
import sys
from contextlib import suppress
from paramiko import HostKeys, Transport, SSHException
from salmagundi import strings
from salmagundi.utils import docopt_helper
from . import __version__, set_sigterm_handler, config, job, utils
from .const import SSH_PORT, ExitCodes
from .exceptions import Error, ConfigError, ConnectError, Terminated
_PROGNAME = 'FileTransfer'
_ENV_VAR_NAME = 'FILETRANSFER_CFG'
_logger = logging.getLogger(__name__)
def main():
"""Execute command."""
args = docopt_helper(__doc__.split('\n', 2)[2],
version_str=f'{_PROGNAME} {__version__}',
err_code=ExitCodes.CMDLINE.code,
name=_PROGNAME.lower(),
envvar=_ENV_VAR_NAME,
sshport=SSH_PORT,
exit_codes=ExitCodes.as_doc())
if args['--hostkey']:
return _get_hostkey(args)
if args['--delete']:
return _del_hostkey(args)
return _run_filetransfer(args)
def _run_filetransfer(args):
set_sigterm_handler()
verbose = args['--verbose']
try:
if args['--config']:
cfg_file = args['--config']
else:
cfg_file = os.getenv(_ENV_VAR_NAME)
app_cfg, job_cfg = config.configure(cfg_file, args['JOBID'])
result, status = job.run(app_cfg, job_cfg)
if verbose:
print(f'Job finished: {result}')
except Error as ex:
_handle_exception(verbose, ex)
status = ex.code
except (KeyboardInterrupt, Terminated) as ex:
_handle_exception(verbose, ex)
status = Terminated.code
except Exception as ex:
# should not happen but may be useful for debugging
_handle_exception(verbose, ex)
status = ExitCodes.FAILURE.code
_logger.debug('exit status=%d', status)
return status
def _handle_exception(verbose, exc):
if verbose:
with suppress(AttributeError):
print(f'Job finished: {exc.result}')
if verbose == 1:
if str(exc).strip():
print(f'{exc.__class__.__name__}: {exc}', file=sys.stderr)
else:
print(exc.__class__.__name__, file=sys.stderr)
if verbose == 2:
import traceback
traceback.print_exc()
def _get_hostkey(args):
host = args['HOST']
file = args['FILE']
hash_ = args['--hash']
try:
port = strings.str2port(args['--port'])
with Transport((host, port)) as transport:
transport.start_client()
hostkey = transport.get_remote_server_key()
name = hostkey.get_name().split('-', 1)[1].upper()
# same fingerprints as the OpenSSH commands generate
print(f'{name} ({hostkey.get_bits()}) Fingerprints:')
fp_md5 = hashlib.md5()
fp_md5.update(hostkey.asbytes())
fp_md5_dig = strings.insert_separator(fp_md5.hexdigest(), ':', 2)
print(f' MD5: {fp_md5_dig}')
fp_sha = hashlib.sha256()
fp_sha.update(hostkey.asbytes())
fp_sha_dig = base64.b64encode(fp_sha.digest()).decode().strip('=')
print(f' SHA256: {fp_sha_dig}')
while True:
a = input(f'Save this key to file "{file}" (yes/no)? ').lower()
if a in ('yes', 'no'):
break
print('Type "yes" or "no"!')
if a != 'no':
hostname = utils.format_knownhost(host, port)
hostkeys = HostKeys()
addkey = True
if os.path.exists(file):
hostkeys.load(file)
if hostkeys.lookup(hostname):
if hostkeys.check(hostname, hostkey):
print(f'Key for "{hostname}" exists'
f' in file "{file}"')
addkey = False
else:
del hostkeys[hostname]
print(f'Key for "{hostname}" replaced'
f' in file "{file}"')
else:
print(f'Key for "{hostname}" added in file "{file}"')
else:
print(f'Key for "{hostname}" added in new file "{file}"')
if addkey:
if hash_:
hostname = HostKeys.hash_host(hostname)
hostkeys.add(hostname, hostkey.get_name(), hostkey)
hostkeys.save(file)
except ConfigError as ex:
print(ex, file=sys.stderr)
return ConfigError.code
except (OSError, SSHException) as ex:
print(ex, file=sys.stderr)
return ConnectError.code
except Exception as ex:
print(repr(ex), file=sys.stderr)
return ExitCodes.FAILURE.code
return ExitCodes.SUCCESS.code
def _del_hostkey(args):
host = args['HOST']
file = args['FILE']
try:
port = strings.str2port(args['--port'])
hostname = utils.format_knownhost(host, port)
hostkeys = HostKeys()
hostkeys.load(file)
if hostkeys.lookup(hostname):
del hostkeys[hostname]
hostkeys.save(file)
print(f'Key for "{hostname}" deleted in file "{file}"')
else:
print(f'Key for "{hostname}" not found in file "{file}"')
except (FileNotFoundError, ConfigError) as ex:
print(ex, file=sys.stderr)
return ConfigError.code
except Exception as ex:
print(repr(ex), file=sys.stderr)
return ExitCodes.FAILURE.code
return ExitCodes.SUCCESS.code
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/Cantonese-1.0.7-py3-none-any.whl/src/Compile.py | import dis
"""
1st return register
"""
class Register_rax(object):
def __init__(self) -> None:
self.used = False
"""
2nd return register
"""
class Register_rdx(object):
def __init__(self) -> None:
self.used = False
class Register_rcx(object):
def __init__(self) -> None:
self.used = False
class Register_ecx(object):
def __init__(self) -> None:
self.used = False
"""
used to pass 5th argument to functions
"""
class Register_r8(object):
def __init__(self) -> None:
self.used = False
"""
Used to pass 6th argument to functions
"""
class Register_r9(object):
def __init__(self) -> None:
self.used = False
"""
temp register
"""
class Register_r10(object):
def __init__(self) -> None:
self.used = False
class AsmRunner(object):
def __init__(self, Nodes, asm_code, label = '', path = ''):
self.LC = 0 # For count the string
self.BC = 0 # (block count) For count the block
self.asm_code = asm_code # The asm code
self.path = path
self.Nodes = Nodes
self.global_head = """
#----------------------Welcome to Cantonese-------------------------
# by Stepfen Shawn
#-------------------------------------------------------------------
"""
self.file_head = "\t.file " + self.path
self.stack_size = 16
self.rpb_offset = 0
self.lc_data_map = {}
self.bc_data_map = {}
self.func_segment_list = []
self.var_type_map = {}
self.var_address_map = {}
"""
Size map:
1 : BYTE PTR
2 : WORD PTR
4 : DWORD PTR
8 : QWORD PTR
"""
self.int_size = 4
self.long_size = 4
self.float_size = 4
self.double_size = 8
self.string_size = 8 # char* type
self.char_size = 1
# The base attr : to mark which registers used for function block
self.rax = Register_rax()
self.ecx = Register_ecx()
self.r8 = Register_r8()
self.r9 = Register_r9()
self.register = [self.rax, self.ecx, self.r8, self.r9]
self.bc_ins = []
self.lc_ins = []
self.reg_map = {
"rax": ["rax", "eax", "ax", "al"],
"rbx": ["rbx", "ebx", "bx", "bl"],
"rcx": ["rcx", "ecx", "cx", "cl"],
"rdx": ["rdx", "edx", "dx", "dl"],
"rsi": ["rsi", "esi", "si", "sil"],
"rdi": ["rdi", "edi", "di", "dil"],
"r8": ["r8", "r8d", "r8w", "r8b"],
"r9": ["r9", "r9d", "r9w", "r9b"],
"r10": ["r10", "r10d", "r10w", "r10b"],
"r11": ["r11", "r11d", "r11w", "r11b"],
"rbp": ["rbp", "", "", ""],
"rsp": ["rsp", "", "", ""]
}
self.function_args_map = {}
self.block_name = "__main"
def init_lc(self):
self.lc_data_map["%d\\n\\0"] = self.LC
# self.asm_code += self.init_string("\"%d\\n\\0\"")
self.lc_ins.append(self.init_string("\"%d\\n\\0\""))
self.LC += 1
self.lc_data_map["%s\\n\\0"] = self.LC
# self.asm_code += self.init_string("\"%s\\n\\0\"")
self.lc_ins.append(self.init_string("\"%s\\n\\0\""))
self.LC += 1
def init_main_section(self):
self.asm_code += "\t.text\n"
self.asm_code += "\t.globl main\n"
self.asm_code += "main:\n"
def init_main_return_value(self):
self.asm_code += "\t" + "movl $0, %eax\n"
self.asm_code += "\t" + "leave\n"
self.asm_code += "\t" + "ret\n"
def add_to_datasegment(self, data):
if data[0] == 'string':
data[1] = "\"" + eval(data[1]) + "\\0" + "\""
if data[1] in self.lc_data_map.keys():
return
else:
self.lc_data_map[data[1]] = self.LC
# self.asm_code += self.init_string(data[1])
self.lc_ins.append(self.init_string(data[1]))
self.LC += 1
def var_to_address(self, val):
if self.var_type_map[val] == 'int':
val_size = self.int_size
elif self.var_type_map[val] == 'str':
val_size = self.string_size
self.rpb_offset += val_size
address = " -" + str(self.rpb_offset) + "(%rbp) "
self.var_address_map[val] = address
return address
"""
Init a string
"""
def init_string(self, string):
ret = "\n.LC" + str(self.LC) + ":\n\t .ascii " + string + "\n"
return ret
def init_block(self):
ret = "\n.BLOCK" + str(self.BC) + ":\n"
return ret
def init_function_block(self, name):
ret = "\n" + name + ":\n"
return ret
def init_main_stack(self):
self.asm_code += "\t" + "pushq %rbp\n"
self.asm_code += "\t" + "movq %rsp, %rbp\n"
def init_stack_size(self):
self.asm_code += "\t" + "subq $32, %rsp\n"
def count_stack_size(self):
if self.rpb_offset != 0:
self.stack_size += (int(self.rpb_offset / 16) + 1) * 16
def init_call_main(self):
self.asm_code += "\tcall __main\n"
def call_puts(self, lc_index):
re = ""
re += "\t" + "leaq .LC" + str(lc_index) + "(%rip), %rcx\n"
re += "\t" + "call puts\n"
self.ins.append(re)
def make_block(self, node : list):
self.BC += 1
re, code = '', ''
re = ".BC" + str(self.BC) + ":\n"
re += IfBlockParse(node, code, self.LC, self.BC, self.lc_ins, self.var_address_map, self.bc_ins).run()
self.bc_ins.append(re)
ret = [code, self.BC]
return ret
def call_printf(self, lc_index, char_type : bool = False, val : int = None, arg : list = None):
re = ""
if lc_index == None and arg != None:
"""
movq %rcx, 16(%rbp)
movq 16(%rbp), %rcx
"""
re += "\t" + "movq %rcx, " + self.var_address_map[arg] + "\n"
re += "\t" + "movq " + self.var_address_map[arg] + ", %rcx\n"
re += "\t" + "call puts\n"
else:
if not char_type:
if val is not None:
re += "\t" + "movl " + self.var_address_map[val] + ", %eax\n"
re += "\t" + "movl %eax, %edx\n"
else:
re += "\t" + "movq " + self.var_address_map[val] + ", %rax\n"
re += "\t" + "movq %rax, %rdx\n"
re += "\t" + "leaq .LC" + str(lc_index) + "(%rip), %rcx\n"
re += "\t" + "call printf\n"
self.ins.append(re)
def call_exit(self):
re = ""
re += "\t" + "movl $1, %ecx\n"
re += "\t" + "call exit\n"
self.ins.append(re)
def assign_movl(self, var_name, val):
re = ""
if var_name[0] == 'expr':
var_name = ExprEval(var_name[1]).parse().genAsm()
else:
var_name = var_name[1]
if val[0] != 'expr':
val = val[1]
if isinstance(eval(val), int):
self.var_type_map[var_name] = 'int'
re += "\t" + "movl $" + val + "," + self.var_to_address(var_name) + "\n"
elif isinstance(eval(val), str):
self.var_type_map[var_name] = 'str'
self.add_to_datasegment(['string', val])
re += "\t" + "leaq .LC" + str(self.lc_data_map["\"" + eval(val) + "\\0" + "\""]) + "(%rip), %rax\n"
re += "\t" + "movq %rax, " + self.var_to_address(var_name) + "\n"
else:
pass
self.ins.append(re)
def __function(self, func_name, func_args, func_body):
code = ''
if func_args is not None:
func_parse = AsmBlockParse(func_body, code, self.LC, self.lc_data_map, func_name[1], [func_args])
else:
func_parse = AsmBlockParse(func_body, code, self.LC, self.lc_data_map, func_name[1])
self.func_segment_list.append(func_name)
print(func_parse.run())
self.LC = func_parse.LC
def __return(self, val_node):
if val_node[0] == 'num':
self.asm_code += "\t" + "movl $" + val_node[1] + ", %eax\n"
self.asm_code += "\t" + "ret\n"
if val_node[0] == 'string':
self.add_to_datasegment(val_node)
self.asm_code += "\t" + "leap .LC" + str(self.lc_data_map["\"" + eval(val_node[1]) + "\\0" + "\""]) + \
"(%rip), %rax\n"
self.asm_code += "\t" + "ret\n"
def __call(self, f):
re = ""
expr_eval_ret = ExprEval(f, self).parse().genAsm()
if not expr_eval_ret['has_args']:
re += "\t" + "call " + expr_eval_ret['func_name'] + "\n"
else:
for a in expr_eval_ret['args']:
self.add_to_datasegment(a)
re += "\t" + "leaq " + ".LC" + str(self.lc_data_map[a[1]]) + "(%rip), %rcx\n"
re += "\t" + "call " + expr_eval_ret['func_name'] + "\n"
self.ins.append(re)
def _exec_if(self, cond : str, block : list) -> None:
re = ""
re += ExprEval(cond, self).parse().genAsm()
re += ".BC" + str(self.make_block(block)[1]) + "\n"
self.ins.append(re)
def add_all_ins(self):
for item in self.ins:
self.asm_code += item
def add_all_block_ins(self):
for i in range(len(self.bc_ins) - 1, -1, -1):
self.asm_code += self.bc_ins[i]
def add_all_lc_ins(self):
for item in self.lc_ins:
self.asm_code += item
def run_init(self):
self.asm_code += self.global_head
self.init_lc()
def run(self, in_main = True):
self.ins = []
for node in self.Nodes:
if node[0] == "node_print":
self.add_to_datasegment(node[1])
if node[1][0] == 'string':
self.call_puts(self.lc_data_map[node[1][1]])
elif node[1][0] == 'identifier':
if self.var_type_map[node[1][1]] == 'int':
self.call_printf(self.lc_data_map["%d\\n\\0"], val = node[1][1])
elif self.var_type_map[node[1][1]] == 'str':
self.call_printf(self.lc_data_map["%s\\n\\0"], char_type = True, val = node[1][1])
elif self.var_type_map[node[1][1]] == 'arg':
self.call_printf(None, arg = node[1][1])
elif node[1][0] == 'expr':
if hasattr(ExprEval(node[1][1], self).parse(), 'op'):
self.ins.append(ExprEval(node[1][1], self).parse().genAsm())
self.call_printf(self.lc_data_map["%d\\n\\0"])
else:
self.call_printf(self.lc_data_map["%d\\n\\0"], val = ExprEval(node[1][1], self).parse().genAsm())
elif node[0] == 'node_let':
self.assign_movl(node[1], node[2])
elif node[0] == 'node_fundef':
func_name = node[1]
func_args = node[2]
func_body = node[3]
self.__function(func_name, func_args, func_body)
elif node[0] == 'node_call':
self.__call(node[1][1])
elif node[0] == 'node_return':
self.__return(node[1])
elif node[0] == "node_exit":
self.call_exit()
elif node[0] == "node_if":
self._exec_if(node[1][1], node[2])
else:
pass
self.add_all_lc_ins()
self.init_main_section()
self.init_main_stack()
self.count_stack_size()
self.init_stack_size()
self.init_call_main()
self.add_all_ins()
self.init_main_return_value()
self.add_all_block_ins()
return self.asm_code
def add_ins(self):
pass
"""
A simple function stack structure:
args 3 (int) <- 20(%rbp)
args 2 (int) <- 16(%rbp)
args 1 (int) <- 12(%rbp)
return address (int) <- 8(%rbp)
old %rbp <- 0(%rbp)
variable 1 (int) <- -4(%rbp)
variable 2 (int) <- -8(%rbp)
variable 3 (int) <- -12(%rbp)
not used <- -16(%rbp) and (%rsp)
"""
class AsmBlockParse(AsmRunner):
def __init__(self, Nodes : list, asm_code : list, lc_index : int, lc_data_map : dict, block_name : str = '', func_args : list = []) -> None:
super(AsmBlockParse, self).__init__(Nodes, asm_code)
self.LC = lc_index
self.lc_data_map = lc_data_map
self.block_name = block_name
# Because of the return address, the args need start from 16(%rbp)
self.args_start_offset = 16
self.args_map = {}
self.func_args = func_args
def stack_add_args(self, arg : str):
self.var_address_map[arg] = str(self.args_start_offset) + "(%rbp)"
self.var_type_map[arg] = 'arg'
self.args_start_offset += 16
# Override
def run(self):
if len(self.func_args) != 0:
for i in self.func_args:
self.stack_add_args(i[1])
return super().run()
# Override
def init_main_section(self):
self.asm_code += "\t.text\n"
self.asm_code += "\t.globl " + self.block_name + "\n"
self.asm_code += self.block_name + ":\n"
# Override
def init_call_main(self):
return
# Override
def init_lc(self):
return
class IfBlockParse(AsmRunner):
def __init__(self, Nodes : list, code : str, lc_index : int, bc_index : int, lc_ins, var_address_map : dict, bc_ins) -> None:
super(IfBlockParse, self).__init__(Nodes, code)
self.LC = lc_index
self.BC = bc_index
self.lc_ins = lc_ins
self.var_address_map = var_address_map
self.bc_ins = bc_ins
# Override
def init_main_section(self):
return
# Override
def init_main_stack(self):
return
# Override
def init_call_main(self):
return
# Override
def init_lc(self):
return
# Override
def init_main_return_value(self):
return
# Override
def count_stack_size(self):
return
# Override
def init_stack_size(self):
return
# Override
def add_all_block_ins(self):
return
# Override
def add_all_lc_ins(self):
return
class Compile(object):
def __init__(self, ast, target, path) -> None:
self.ast = ast
self.target = target
self.path = path
self.TO_JS_CODE = ""
self.TO_CPP_CODE = ""
self.TO_ASM_CODE = ""
if self.target == "js":
self.run_js(self.ast)
if self.target == "cpp":
self.run_cpp(self.ast)
if self.target == "asm":
self.run_asm(self.ast)
def ret(self):
if self.target == "js":
return self.TO_JS_CODE, self.path[ : len(self.path) - len('cantonese')] + 'js'
if self.target == "cpp":
return self.TO_CPP_CODE, self.path[ : len(self.path) - len('cantonese')] + 'cpp'
if self.target == "asm":
return self.TO_ASM_CODE, self.path[ : len(self.path) - len('cantonese')] + 'S'
# TODO
def eval_expr(self, expr):
return expr
# TODO
def run_asm(self, Nodes : list, label = '', path = '') -> None:
asmbler = AsmRunner(Nodes, self.TO_ASM_CODE, path = path)
asmbler.run_init()
self.TO_ASM_CODE = asmbler.run()
def run_cpp(self, Nodes : list, label = '', path = '') -> None:
for node in Nodes:
if node[0] == "node_print":
self.TO_C_CODE += "std::cout<<" + self.eval_expr(node[1][1]) + ";\n"
def run_js(self, Nodes : list, label = '', path = '', in_web = False) -> None:
for node in Nodes:
if node[0] == "node_print":
if in_web:
self.TO_JS_CODE += "alert(" + self.eval_expr(node[1][1]) + ");\n"
else:
self.TO_JS_CODE += "console.log(" + self.eval_expr(node[1][1]) + ");\n"
if node[0] == "node_exit":
self.TO_JS_CODE += "process.exit();\n"
if node[0] == "node_let":
self.TO_JS_CODE += node[1][1] + " = " + self.eval_expr(node[2][1]) + ";\n"
if node[0] == "node_if":
self.TO_JS_CODE += "if (" + self.eval_expr(node[1][1]) + ") {\n"
self.run_js(node[2])
self.TO_JS_CODE += "}"
if node[0] == "node_elif":
self.TO_JS_CODE += "else if (" + self.eval_expr(node[1][1]) + ") {\n"
self.run_js(node[2])
self.TO_JS_CODE += "}"
if node[0] == "node_else":
self.TO_JS_CODE += "else{"
self.run_js(node[1])
self.TO_JS_CODE += "}"
if node[0] == "node_call":
self.TO_JS_CODE += node[1][1] + ";\n"
if node[0] == "node_fundef":
if node[2] == 'None':
self.TO_JS_CODE += "function " + node[1][1] + "() {\n"
self.run_js(node[3])
self.TO_JS_CODE += "}\n"
else:
self.TO_JS_CODE += "function " + node[1][1] + "(" + node[2][1] + ") {\n"
self.run_js(node[3])
self.TO_JS_CODE += "}\n"
class ExprFunctionCall(object):
def __init__(self, func_name :str, args : list, state : AsmRunner = None) -> None:
self.func_name = func_name
self.args = args
self.state = state
def genAsm(self) -> str:
if self.args == None:
return {'has_args' : False, 'func_name' : self.func_name}
else:
return {'has_args' : True, 'func_name' : self.func_name, 'args' : self.args}
class ExprNumOrIdentifier(object):
def __init__(self, arg : list, state : AsmRunner = None):
self.arg = arg
self.state = state
def genAsm(self):
return self.arg[1]
class ExprOp(object):
def __init__(self, op : str, arg1 : list, arg2 : list, state : AsmRunner = None):
self.op = op
self.arg1 = arg1
self.arg2 = arg2
self.state = state
def genAsm(self):
if self.op == '+':
re = ""
if self.arg1[0] == 'num':
if self.arg2[0] == 'identifier':
re += "\t" + "movl " + self.state.var_address_map[self.arg2[1]] + ", %eax\n"
re += "\t" + "addl $" + self.arg1[1] + ", %eax\n"
elif self.arg1[0] == 'identifier':
if self.arg2[0] == 'num':
re += "\t" + "movl " + self.state.var_address_map[self.arg1[1]] + ", %eax\n"
re += "\t" + "addl $" + self.arg2[1] + ". %eax\n"
elif self.op == '-':
re = ""
if self.arg1[0] == 'num':
if self.arg2[0] == 'identifier':
re += "\t" + "movl " + self.state.var_address_map[self.arg2[1]] + ", %eax\n"
re += "\t" + "subl $" + self.arg1[1] + ", %eax\n"
elif self.arg1[0] == 'identifier':
if self.arg2[0] == 'num':
re += "\t" + "movl " + self.state.var_address_map[self.arg1[1]] + ", %eax\n"
re += "\t" + "subl $" + self.arg2[1] + ". %eax\n"
elif self.op == '==' or self.op == '0': # == or is
re = ""
if self.arg1[0] == 'num':
if self.arg2[0] == 'identifier':
re += '\t' + "cmpl $" + self.arg1[1] + ", " + self.state.var_address_map[self.arg2[1]] + '\n'
re += '\t' + 'je '
elif self.arg1[0] == 'identifier':
if self.arg2[0] == 'num':
re += '\t' + "cmpl $" + self.arg2[1] + ", " + self.state.var_address_map[self.arg1[1]] + '\n'
re += '\t' + 'je '
elif self.op == '!=':
re = ""
if self.arg1[0] == 'num':
if self.arg2[0] == 'identifier':
re += '\t' + "cmpl $" + self.arg1[1] + ", " + self.state.var_address_map[self.arg2[1]] + '\n'
re += '\t' + 'jne '
elif self.arg1[0] == 'identifier':
if self.arg2[0] == 'num':
re += '\t' + "cmpl $" + self.arg2[1] + ", " + self.state.var_address_map[self.arg1[1]] + '\n'
re += '\t' + 'jne '
return re
def __str__(self) -> str:
return str(self.arg1) + " " + self.op + " " + str(self.arg2)
"""
result_type : 'EXPR_OP' | 'EXPR_VAR_OR_IDENT'
"""
class ExprEval(object):
def __init__(self, string, state : AsmRunner = None):
self.string = string
self.py_ins = []
self.op = ['+', '-', '*', '/', '%']
self.stack = []
self.state = state
def parse(self):
# Trans the expr to python vm, then to asm
# Expr -> Py-Opcode -> Mid-Opcode
bytecode = dis.Bytecode(self.string)
for instr in bytecode:
self.py_ins.append({'opname' : instr.opname, 'args' : instr.argrepr})
if instr.opname == 'LOAD_NAME':
self.stack.append(instr.argrepr)
elif instr.opname == 'LOAD_CONST':
self.stack.append(instr.argrepr)
elif instr.opname == 'CALL_FUNCTION':
self._result_type = "EXPR_CALL"
return self.__eval_func_call(self.stack)
elif instr.opname == 'BINARY_ADD':
self._result_type = "EXPR_OP"
return self.__eval_op('+')
elif instr.opname == 'BINARY_SUBTRACT':
self._result_type = "EXPR_OP"
return self.__eval_op('-')
elif instr.opname == 'COMPARE_OP' or instr.opname == 'IS_OP':
self._result_type = "EXPR_OP"
return self.__eval_op(instr.argrepr)
elif instr.opname == 'RETURN_VALUE' and instr.argrepr == '':
break
self._result_type = "EXPR_VAR_OR_IDENT"
return self.__eval()
def get_type(self, val):
try:
v = eval(val)
if isinstance(v, float) or isinstance(v, int):
return 'num'
elif isinstance(v, str):
return 'string'
except NameError:
return 'identifier'
def __eval_op(self, op : str = ''):
val1 = self.stack.pop()
val2 = self.stack.pop()
if self.get_type(val1) == 'identifier':
if self.get_type(val2) == 'num':
return ExprOp(op, ['identifier', val1], ['num', val2], self.state)
elif self.get_type(val2) == 'identifier':
return ExprOp(op, ['identifier', val1], ['identifier', val2], self.state)
elif self.get_type(val2) == 'string':
return ExprOp(op, ['identifier', val1], ['string', val2], self.state)
elif self.get_type(val1) == 'num':
if self.get_type(val2) == 'num':
return ExprOp(op, ['num', val1], ['num', val2], self.state)
elif self.get_type(val2) == 'identifier':
return ExprOp(op, ['num', val1], ['identifier', val2], self.state)
elif self.get_type(val2) == 'string':
return ExprOp(op, ['num', val1], ['string', val2], self.state)
elif self.get_type(val1) == 'string':
if self.get_type(val2) == 'num':
return ExprOp(op, ['string', val1], ['num', val2], self.state)
elif self.get_type(val2) == 'identifier':
return ExprOp(op, ['string', val1], ['identifier', val2], self.state)
elif self.get_type(val2) == 'string':
return ExprOp(op, ['string', val1], ['string', val2], self.state)
def __eval_func_call(self, stack):
func_name = stack[0]
args_lst = []
# If the function has no args
if len(stack) == 1:
return ExprFunctionCall(func_name, None, self.state)
else:
i = 1
while i < len(stack):
args_lst.append([self.get_type(stack[i]), stack[i]])
i += 1
return ExprFunctionCall(func_name, args_lst, self.state)
def __eval(self):
# Var or identifier?
if len(self.stack) == 1:
try:
val = eval(self.stack[0])
if isinstance(val, float) or isinstance(val, int):
return ExprNumOrIdentifier(['num', self.stack[0]], self.state)
elif isinstance(val, str):
return ExprNumOrIdentifier(['string', self.stack[0]], self.state)
except NameError:
return ExprNumOrIdentifier(['identifier', self.stack[0]], self.state)
else:
pass
def result_type(self):
return self._result_type | PypiClean |
/Charisma_toolbox-0.0.2-py3-none-any.whl/orangecontrib/extension/widgets/OWLDA.py | import numpy
from orangewidget.gui import tabWidget, createTabPage
from AnyQt.QtWidgets import QFormLayout
from Orange.data import Table, Variable, Domain, ContinuousVariable
from Orange.data.sql.table import SqlTable, AUTO_DL_LIMIT
from orangecontrib.extension.utils.Projection.lda import LDA, LDAtestTransform
from Orange.widgets.widget import Input, Output, AttributeList, Msg
from orangecontrib.extension.utils import scattergraph
from Orange.widgets.utils.itemmodels import DomainModel
from math import isnan, isinf
from itertools import chain
import unicodedata
from AnyQt.QtWidgets import QTableView, QHeaderView,QSizePolicy
from AnyQt.QtGui import QFont, QBrush, QColor, QStandardItemModel, QStandardItem
from AnyQt.QtCore import Qt, QSize, QItemSelectionModel, QItemSelection, Signal
import numpy as np
import sklearn.metrics as skl_metrics
from sklearn.model_selection import cross_val_score
import Orange
import Orange.evaluation
from Orange.widgets import widget, gui
from Orange.widgets.settings import \
Setting, ContextSetting
from Orange.widgets.utils.annotated_data import (create_annotated_table,
ANNOTATED_DATA_SIGNAL_NAME)
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.utils.state_summary import format_summary_details
from sklearn.model_selection import KFold, StratifiedKFold, LeaveOneOut
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as lda
import time
from joblib import Parallel, delayed, parallel_backend
MAX_COMPONENTS = 100
LINE_NAMES = ["Reconstruction Error"]
LINE_NAMES_TWO = ["component variance", "cumulative variance"]
BorderRole = next(gui.OrangeUserRole)
BorderColorRole = next(gui.OrangeUserRole)
learner_name = "LDA"
def confusion_matrix(data, pred):
"""
Compute confusion matrix
Args:
res (Orange.evaluation.Results): evaluation results
index (int): model index
Returns: Confusion matrix
"""
labels = np.arange(len(data.domain.class_var.values))
if not data.Y.size:
return np.zeros((len(labels), len(labels)))
else:
return skl_metrics.confusion_matrix(
y_true=data.Y, y_pred=pred, labels=labels)
class OWLDA(widget.OWWidget):
name = "LDA"
description = "Performs a linear discriminant analysis and " \
"displays a LDA plot and a confusion matrix,"
icon = "icons/LDA.svg"
priority = 1
keywords = ["linear discriminant analysis", "linear transformation"]
class Inputs:
train_data = Input("Data", Table)
test_data = Input("Test data", Table)
class Outputs:
lda = Output("LDA", LDA, dynamic=False)
class Error(widget.OWWidget.Error):
sparse_train_Matrix = Msg("Train data contains NaN")
sparse_test_Matrix = Msg("Test data contains NaN")
invalid_values = Msg("Class data contains NaN")
empty_input = widget.Msg("Empty result on input. Nothing to display.")
quantities = ["Number of instances",
"Proportion of predicted",
"Proportion of actual"]
selected_quantity = Setting(0)
ncomponentsl = Setting(3)
ncomponentsb = Setting(3)
auto_commit = Setting(True)
class_box = Setting(True)
legend_box = Setting(False)
testdata_box = Setting(False)
testdata_classes_box = Setting(False)
selection = ContextSetting(set())
attr_x = ContextSetting(None)
attr_y = ContextSetting(None)
TestOnTrain, TestOnTest, LeaveOneOut, KFold, StratifiedKFold = 0, 1, 2, 3, 4
NFolds = [2, 3, 5, 10]
sNFolds = [2, 3, 5, 10]
resampling = Setting(0, schema_only=True)
n_folds = Setting(3)
sn_folds = Setting(3)
xy_changed_manually = Signal(Variable, Variable)
common_options = dict(
labelWidth=50, orientation=Qt.Horizontal, sendSelectedValue=True,
contentsLength=14
)
def __init__(self):
super().__init__()
self.parallel = Parallel(n_jobs=-1, prefer="threads", pre_dispatch='2*n_jobs')
self.train_data = None
self.test_data = None
self.train_classes = None
self.test_classes = None
self.plot = None
self.tablemodel = None
self.train_headers = []
self.test_headers = []
self._lda = None
#self._ldaCV = None
self._transformed = None
self._transformedCV = None
self.train_pred = None
self.test_pred = None
self.classes = None
self.domainIndexes = {}
self.datalabel = None
self.train_datalabel = None
self.test_datalabel = None
self.PlotStyle = None
self.testlabel = None
self.train_class_values = None
self.test_class_values = None
self.SYMBOLBRUSH = [(0, 204, 204, 180), (51, 255, 51, 180), (255, 51, 51, 180), (0, 128, 0, 180), \
(195, 46, 212, 180), (250, 194, 5, 180), (55, 55, 55, 180), (0, 114, 189, 180), (217, 83, 25, 180), (237, 177, 32, 180), \
(126, 47, 142, 180), (119, 172, 180)]
self.SYMBOLPEN = [(0, 204, 204, 255), (51, 255, 51, 255), (255, 51, 51, 255), (0, 128, 0, 255), \
(195, 46, 212, 255), (250, 194, 5, 255), (55, 55, 55, 255), (0, 114, 189, 255), (217, 83, 25, 255), (237, 177, 32, 255), \
(126, 47, 142, 255), (119, 172, 255)]
self._init_projector()
box = gui.vBox(self.controlArea, "Discriminant function selection")
form = QFormLayout()
box.layout().addLayout(form)
dmod = DomainModel
self.xy_model = DomainModel(dmod.MIXED, valid_types=ContinuousVariable)
self.cb_attr_x = gui.comboBox(
box, self, "attr_x", label=None,
callback=self.set_attr_from_combo,
model=self.xy_model, **self.common_options,
searchable=True)
self.cb_attr_y = gui.comboBox(
box, self, "attr_y", label=None,
callback=self.set_attr_from_combo,
model=self.xy_model, **self.common_options,
searchable=True)
form.addRow("Axis x:", self.cb_attr_x)
form.addRow("Axis y:", self.cb_attr_y)
class_box = gui.vBox(self.controlArea, "Plot options")
self.classb = gui.checkBox(class_box,
self, value="class_box", label="Color by class",
callback=self._update_class_box, tooltip="Datapoints get colored by class, when checked")
self.legendb = gui.checkBox(class_box,
self, value="legend_box", label="Show legend",
callback=self._update_legend_box, tooltip=None)
self.testdatab = gui.checkBox(class_box,
self, value="testdata_box", label="Show test data",
callback=self._update_testdata_box, tooltip=None)
self.testdatabc = gui.checkBox(class_box,
self, value="testdata_classes_box", label="Hide test data classes",
callback=self._update_testdata_classes_box, tooltip=None)
box = gui.vBox(self.controlArea, "Confusion matrix options")
form = QFormLayout()
box.layout().addLayout(form)
rbox = gui.radioButtons(
box, self, "resampling", callback=self._param_changed)
gui.appendRadioButton(rbox, "Test on train data")
gui.appendRadioButton(rbox, "Test on test data")
gui.appendRadioButton(rbox, "Leave one out")
gui.appendRadioButton(rbox, "Cross validation")
ibox = gui.indentedBox(rbox)
gui.comboBox(
ibox, self, "n_folds", label="Number of folds: ",
items=self.NFolds,
orientation=Qt.Horizontal, callback=self.kfold_changed)
gui.appendRadioButton(rbox, "Stratified cross validation")
ibox = gui.indentedBox(rbox)
gui.comboBox(
ibox, self, "sn_folds", label="Number of folds: ",
items=[str(x) for x in self.sNFolds],
orientation=Qt.Horizontal, callback=self.skfold_changed)
form.addRow("Evaluation mode:", rbox)
self.controlArea.layout().addStretch()
gui.auto_apply(self.controlArea, self, "auto_commit")
tabs = tabWidget(self.mainArea)
boxScatter = gui.vBox(self.mainArea, "Scatterplot")
formScatter = QFormLayout()
boxScatter.layout().addLayout(formScatter)
self.plot = scattergraph.ScatterGraph(callback=None)
boxScatter.layout().addWidget(self.plot)
tab = createTabPage(tabs, "Scatterplot")
tab.layout().addWidget(boxScatter)
boxConfus = gui.vBox(self.mainArea, "Confusion matrix")
formConfus = QFormLayout()
boxConfus.layout().addLayout(formConfus)
sbox = gui.hBox(boxConfus)
gui.rubber(sbox)
gui.comboBox(sbox, self, "selected_quantity",
items=self.quantities, label="Show: ",
orientation=Qt.Horizontal, callback=self._param_changed)
self.tablemodel = QStandardItemModel(self)
view = self.tableview = QTableView(
editTriggers=QTableView.NoEditTriggers)
view.setModel(self.tablemodel)
view.horizontalHeader().hide()
view.verticalHeader().hide()
view.horizontalHeader().setMinimumSectionSize(110)
view.setShowGrid(False)
view.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
boxConfus.layout().addWidget(view)
tab = createTabPage(tabs, "Confusion matrix")
tab.layout().addWidget(boxConfus)
#Scatter plot
def _update_testdata_box(self):
if self.test_data is None:
self.testdata_box = False
else:
if self.testdata_box == False:
self.testdata_classes_box = False
self.test_datalabel = self.test_data.Y
self.init_attr_values()
self._setup_plot(self.attr_x, self.attr_y)
def _update_testdata_classes_box(self):
if self.test_data is None:
self.testdata_box = False
self.testdata_classes_box = False
else:
if self.testdata_classes_box == True:
self.testdata_box = True
self.test_datalabel = np.zeros(self.test_data.Y.shape, dtype=int)
self.init_attr_values()
self._setup_plot(self.attr_x, self.attr_y)
else:
self.test_datalabel = self.test_data.Y
self.init_attr_values()
self._setup_plot(self.attr_x, self.attr_y)
def set_attr_from_combo(self):
self.attr_changed()
self.xy_changed_manually.emit(self.attr_x, self.attr_y)
def attr_changed(self):
self._setup_plot(self.attr_x, self.attr_y)
self.commit()
def _update_class_box(self):
self.plot.clear_plot()
self._setup_plot(self.attr_x, self.attr_y)
def _update_legend_box(self):
self.plot.clear_plot()
self._setup_plot(self.attr_x, self.attr_y)
def _setup_plot(self, x_axis, y_axis):
self.plot.clear_plot()
if self._lda is None:
self.plot.clear_plot()
return
x=self._Transformed.X[:,self.domainIndexes[str(self.attr_x)]]
y=self._Transformed.X[:,self.domainIndexes[str(self.attr_y)]]
classes = self.train_classes.copy()
if self.test_data is not None:
if self.testdata_box is True:
if self.testdata_classes_box ==True:
for kk in range(0,len(np.unique(self.testlabel))):
classes[len(self.train_classes)+kk] = 'Transformed testdata'
pass
else:
for kk in range(0,len(np.unique(self._testdata_transformed.Y))):
classes[len(self.train_classes)+kk] = f'predicted {self.train_classes[kk]}'
if self.class_box:
self.PlotStyle = [
dict(pen=None, symbolBrush=self.SYMBOLBRUSH[i], symbolPen=self.SYMBOLPEN[i], symbol='o', symbolSize=10,
name=classes[i]) for i in range(len(classes))]
self.plot.update(x,y, Style=self.PlotStyle, labels=self.datalabel, x_axis_label=x_axis, y_axis_label=y_axis, legend=self.legend_box)
else:
self.Style = [
dict(pen=None, symbolBrush=self.SYMBOLBRUSH[0], symbolPen=self.SYMBOLPEN[0], symbol='o', symbolSize=10,
name=classes[i]) for i in range(len(classes))]
self.plot.update(x, y, Style=self.Style, labels=self.datalabel, x_axis_label=x_axis, y_axis_label=y_axis,legend=self.legend_box)
def init_attr_values(self):
if self.testdata_box:
testlabel = self.testlabel = self.test_datalabel + np.max(self.train_datalabel) + 1
self.datalabel = np.hstack((self.train_datalabel, testlabel))
datatrans = np.vstack((self._transformed, self._testdata_transformed.X))
else:
self.datalabel = self.train_datalabel
datatrans = self._transformed
domain = numpy.array(['DF{}'.format(i + 1)
for i in range(datatrans.shape[1])],
dtype=object)
for i in range(len(domain)):
self.domainIndexes[domain[i]] = i
proposed = [a for a in domain]
dom = Domain(
[ContinuousVariable(name, compute_value=lambda _: None)
for name in proposed],
metas=None)
self._Transformed = Table(dom, datatrans, metas=None)
self.xy_model.set_domain(dom)
self.attr_x = self.xy_model[0] if self.xy_model else None
self.attr_y = self.xy_model[1] if len(self.xy_model) >= 2 \
else self.attr_x
#Confusion matrix
def _param_changed(self):
self._update_ConfMat()
def _update_ConfMat(self):
def _isinvalid(x):
return isnan(x) or isinf(x)
if self.resampling == self.TestOnTrain:
if self.train_pred is not None:
cmatrix = confusion_matrix(self.train_data, self.train_pred)
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = np.diag_indices(n)
colors = cmatrix.astype(np.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(int)
formatstr = "{}"
div = np.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, np.newaxis]
div = colors.max(axis=1)[:, np.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
maxval = normalized[diag].max()
if maxval > 0:
colors[diag] = normalized[diag] / maxval
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.train_headers[i], self.train_headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
elif self.resampling == self.TestOnTest:
if self.test_pred is not None and self.test_data is not None:
cmatrix = confusion_matrix(self.test_data, self.test_pred)
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = np.diag_indices(n)
colors = cmatrix.astype(np.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(int)
formatstr = "{}"
div = np.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, np.newaxis]
div = colors.max(axis=1)[:, np.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
maxval = normalized[diag].max()
if maxval > 0:
colors[diag] = normalized[diag] / maxval
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.train_headers[i], self.train_headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
else:
return
elif self.resampling == self.KFold:
LDA = lda(solver="svd", shrinkage=None, priors=None,
n_components=min(self.train_data.X.shape[1], len(self.classes)-1), store_covariance=False, tol=1e-4)
kf = KFold(n_splits=self.NFolds[self.n_folds], shuffle=True, random_state=42)
precmatrix = []
for i in range(self.NFolds[self.n_folds]):
precmatrix.append(numpy.zeros(shape=(len(self.train_data.domain.class_var.values),len(self.train_data.domain.class_var.values))))
zaehler = 0
pb = gui.ProgressBar(self, self.NFolds[self.n_folds])
for train_index, test_index in kf.split(self.train_data):
train, test = self.train_data[train_index], self.train_data[test_index]
LDA.fit(train.X, train.Y)
Y_test_pred = LDA.predict(test.X)
if zaehler == 0:
precmatrix[zaehler] = confusion_matrix(test, Y_test_pred)
else:
precmatrix[zaehler] = precmatrix[zaehler-1] + confusion_matrix(test, Y_test_pred)
zaehler = zaehler +1
pb.advance()
pb.finish()
cmatrix = precmatrix[len(precmatrix)-1]
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = np.diag_indices(n)
colors = cmatrix.astype(np.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(int)
formatstr = "{}"
div = np.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, np.newaxis]
div = colors.max(axis=1)[:, np.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
maxval = normalized[diag].max()
if maxval > 0:
colors[diag] = normalized[diag] / maxval
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.train_headers[i], self.train_headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
elif self.resampling == self.StratifiedKFold:
kf = StratifiedKFold(n_splits=self.sNFolds[self.sn_folds], shuffle=True, random_state=42)
precmatrix = []
LDA = lda(solver="svd", shrinkage=None, priors=None,
n_components=min(self.train_data.X.shape[1], len(self.classes)-1), store_covariance=False, tol=1e-4)
for i in range(self.sNFolds[self.sn_folds]):
precmatrix.append(numpy.zeros(shape=(len(self.train_data.domain.class_var.values),len(self.train_data.domain.class_var.values))))
zaehler = 0
pb = gui.ProgressBar(self, self.NFolds[self.n_folds])
for train_index, test_index in kf.split(self.train_data, self.train_data.Y):
train, test = self.train_data[train_index], self.train_data[test_index]
LDA.fit(train.X, train.Y)
Y_test_pred = LDA.predict(test.X)
if zaehler == 0:
precmatrix[zaehler] = confusion_matrix(test, Y_test_pred)
else:
precmatrix[zaehler] = precmatrix[zaehler-1] + confusion_matrix(test, Y_test_pred)
zaehler = zaehler +1
pb.advance()
pb.finish()
cmatrix = precmatrix[len(precmatrix)-1]
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = np.diag_indices(n)
colors = cmatrix.astype(np.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(int)
formatstr = "{}"
div = np.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, np.newaxis]
div = colors.max(axis=1)[:, np.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
maxval = normalized[diag].max()
if maxval > 0:
colors[diag] = normalized[diag] / maxval
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.train_headers[i], self.train_headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
elif self.resampling == self.LeaveOneOut:
LDA = lda(solver="svd", shrinkage=None, priors=None,
n_components=min(self.train_data.X.shape[1], len(self.classes)-1), store_covariance=False, tol=1e-4)
kf = KFold(n_splits=self.train_data.Y.size, shuffle=True, random_state=42)
precmatrix = []
for i in range(self.train_data.Y.size):
precmatrix.append(numpy.zeros(shape=(len(self.train_data.domain.class_var.values),len(self.train_data.domain.class_var.values))))
zaehler = 0
pb = gui.ProgressBar(self, self.NFolds[self.n_folds])
for train_index, test_index in kf.split(self.train_data):
train, test = self.train_data[train_index], self.train_data[test_index]
LDA.fit(train.X, train.Y)
Y_test_pred = LDA.predict(test.X)
if zaehler == 0:
precmatrix[zaehler] = confusion_matrix(test, Y_test_pred)
else:
precmatrix[zaehler] = precmatrix[zaehler-1] + confusion_matrix(test, Y_test_pred)
zaehler = zaehler +1
pb.advance()
pb.finish()
cmatrix = precmatrix[len(precmatrix)-1]
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = np.diag_indices(n)
colors = cmatrix.astype(np.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(int)
formatstr = "{}"
div = np.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, np.newaxis]
div = colors.max(axis=1)[:, np.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
maxval = normalized[diag].max()
if maxval > 0:
colors[diag] = normalized[diag] / maxval
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.train_headers[i], self.train_headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
pass
else:
return
def kfold_changed(self):
self.resampling = OWLDA.KFold
self._param_changed()
def skfold_changed(self):
self.resampling = OWLDA.StratifiedKFold
self._param_changed()
def _set_item(self, i, j, item):
self.tablemodel.setItem(i, j, item)
def _item(self, i, j):
return self.tablemodel.item(i, j) or QStandardItem()
def _init_table(self, nclasses):
item = self._item(0, 2)
item.setData("Predicted", Qt.DisplayRole)
item.setTextAlignment(Qt.AlignCenter)
item.setFlags(Qt.NoItemFlags)
self._set_item(0, 2, item)
item = self._item(2, 0)
item.setData("Actual", Qt.DisplayRole)
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignBottom)
item.setFlags(Qt.NoItemFlags)
self.tableview.setItemDelegateForColumn(0, gui.VerticalItemDelegate())
self._set_item(2, 0, item)
self.tableview.setSpan(0, 2, 1, nclasses)
self.tableview.setSpan(2, 0, nclasses, 1)
font = self.tablemodel.invisibleRootItem().font()
bold_font = QFont(font)
bold_font.setBold(True)
for i in (0, 1):
for j in (0, 1):
item = self._item(i, j)
item.setFlags(Qt.NoItemFlags)
self._set_item(i, j, item)
for p, label in enumerate(self.train_headers):
for i, j in ((1, p + 2), (p + 2, 1)):
item = self._item(i, j)
item.setData(label, Qt.DisplayRole)
item.setFont(bold_font)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
if p < len(self.train_headers) - 1:
item.setData("br"[j == 1], BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
self._set_item(i, j, item)
hor_header = self.tableview.horizontalHeader()
if len(' '.join(self.train_headers)) < 120:
hor_header.setSectionResizeMode(QHeaderView.ResizeToContents)
else:
hor_header.setDefaultSectionSize(110)
self.tablemodel.setRowCount(nclasses + 3)
self.tablemodel.setColumnCount(nclasses + 3)
#widget properties
@staticmethod
def sizeHint():
"""Initial size"""
return QSize(933, 600)
def _prepare_data(self):
indices = self.tableview.selectedIndexes()
indices = {(ind.row() - 2, ind.column() - 2) for ind in indices}
actual = self.train_data.Y
predicted = self.train_pred
selected = [i for i, t in enumerate(zip(actual, predicted))
if t in indices]
extra = []
class_var = self.train_data.domain.class_var
metas = self.train_data.domain.metas
attrs = self.train_data.domain.attributes
names = [var.name for var in chain(metas, [class_var], attrs)]
domain = Orange.data.Domain(self.train_data.domain.attributes,
self.train_data.domain.class_vars,
metas)
data = self.train_data.transform(domain)
if extra:
data.metas[:, len(self.train_data.domain.metas):] = \
np.hstack(tuple(extra))
data.name = learner_name
if selected:
annotated_data = create_annotated_table(data, selected)
data = data[selected]
else:
annotated_data = create_annotated_table(data, [])
data = None
return data, annotated_data
def commit(self):
"""Output data instances corresponding to selected cells"""
self.Outputs.lda.send(self._lda_projector)
def send_report(self):
"""Send report"""
if self.train_data is None:
return
if self.train_pred is not None:
self.report_plot("Score plot", self.plot)
self.report_table("Confusion matrix", self.tableview)
#Data handling
@Inputs.train_data
def set_train_data(self, data):
self.clear()
self.train_data = None
if (data == None):
return
if isinstance(data, SqlTable):
if data.approx_len() < AUTO_DL_LIMIT:
data = Table(data)
else:
self.information("Data has been sampled")
data_sample = data.sample_time(1, no_cache=True)
data_sample.download_data(2000, partial=True)
data = Table(data_sample)
if isinstance(data, Table):
if not data.domain.attributes:
self.Error.no_features()
self.clear_outputs()
return
if not data:
self.Error.no_instances()
self.clear_outputs()
return
self._init_projector()
self.train_data = data
self.train_datalabel = self.train_data.Y
self.classes = numpy.unique(self.train_data.Y)
if self.train_data.domain.class_var:
self.train_classes = {int(self.classes[i]) : self.train_data.domain.class_var.values[i] for i in numpy.arange(0,len(self.train_data.domain.class_var.values))}
self._lda, self._transformed, self.train_pred, self.train_class_values= self._fit(data)
self.train_headers = self.train_class_values + \
(unicodedata.lookup("N-ARY SUMMATION"), )
self.init_attr_values()
self._setup_plot(self.attr_x, self.attr_y)
self._init_table(len(self.train_class_values))
self._update_ConfMat()
@Inputs.test_data
def set_test_data(self, data):
if data == None:
return
self.test_data = None
if isinstance(data, SqlTable):
if data.approx_len() < AUTO_DL_LIMIT:
data = Table(data)
else:
self.information("Data has been sampled")
data_sample = data.sample_time(1, no_cache=True)
data_sample.download_data(2000, partial=True)
data = Table(data_sample)
if isinstance(data, Table):
if not data.domain.attributes:
self.Error.no_features()
self.clear_outputs()
return
if not data:
self.Error.no_instances()
self.clear_outputs()
return
self.test_data = data
self.test_datalabel = self.test_data.Y
b = numpy.unique(self.test_data.Y)
if self.test_data.domain.class_var:
self.test_classes = {int(b[i]) : self.test_data.domain.class_var.values[i] for i in numpy.arange(0,len(self.test_data.domain.class_var.values))}
if data is not None:
if np.any(np.isnan(self.test_data.X)):
self.Error.sparse_test_Matrix()
elif self._lda is None:
time.sleep(3)
elif self._lda is not None:
self.test_pred = self._lda.proj.predict(self.test_data.X)
self.test_class_values = data.domain.class_var.values
self._testdata_transformed = self.testdata_transform(self.test_data, self._lda)
if self.test_pred is not None:
nan_values = False
if np.any(np.isnan(self.train_data.Y)) or \
np.any(np.isnan(self.test_pred)):
nan_values = True
pred = data = None
self.test_headers = self.test_class_values + \
(unicodedata.lookup("N-ARY SUMMATION"), )
self.Error.invalid_values(shown=nan_values)
if self.testdata_box:
self.init_attr_values()
self._setup_plot(self.attr_x, self.attr_y)
self._update_ConfMat()
self.unconditional_commit()
def _fit(self, data=None, testset=None):
self.clear()
if data is None:
return
lda = self._lda_projector(data[:])
if data is not None:
if np.any(np.isnan(data.X)):
self.Error.sparse_train_Matrix()
else:
pred = lda.proj.predict(data.X)
class_values = data.domain.class_var.values
if pred is not None:
nan_values = False
if np.any(np.isnan(data.Y)) or \
np.any(np.isnan(pred)):
nan_values = True
pred = data = None
self.Error.invalid_values(shown=nan_values)
return lda, lda._transformedData, pred, class_values
def testdata_transform(self, data, projector):
X = data
Projector = projector
transformed = Projector(X)
return transformed
def clear_outputs(self):
self.Outputs.lda.send(None)
def clear(self):
self.tablemodel.clear()
self.train_headers = []
self._lda = None
self.plot.clear_plot()
self._transformed = None
self.train_pred = None
self.train_class_values = None
def _init_projector(self):
self._lda_projector = LDA(solver="svd", shrinkage=None, priors=None,
n_components=MAX_COMPONENTS, store_covariance=False, tol=1e-4,
preprocessors=None)
if __name__ == "__main__": # pragma: no cover
X = Table("iris")
KF = KFold(n_splits=3, shuffle=True, random_state=42)
KF.get_n_splits(X)
train_index, test_index, bla = KF.split(X)
X_train, X_test = X[test_index[0]], X[test_index[1]]
WidgetPreview(OWLDA).run(set_train_data=X_train, set_test_data=X_test) | PypiClean |
/Nevow-0.14.5.tar.gz/Nevow-0.14.5/examples/i18n/i18n.py | import os
from twisted.python import util
from nevow import inevow, loaders, rend, tags as T, url
from nevow.i18n import _, I18NConfig
LOCALE_DIR = util.sibpath(__file__, 'locale')
langs = [d for d in os.listdir(LOCALE_DIR) if d != '.svn']
langs.sort()
class Common(rend.Page):
addSlash = True
def renderHTTP(self, ctx):
# We're only overriding renderHTTP to look for a 'lang' query parameter
# without cluttering up the messages renderer, below.
# If 'lang' is present then we "force" the translation language. This
# simulates how user preferences from the session might be used to
# override the browser's language settings.
lang = ctx.arg('lang')
if lang is not None:
ctx.remember([lang], inevow.ILanguages)
# Let the base class handle it, really.
return rend.Page.renderHTTP(self, ctx)
def render_langs(self, ctx, data):
"""Render a list of links to select from the available translations.
"""
out = [T.a(href=url.here.remove('lang'))['default'], ' | ']
for lang in langs:
out.append(T.a(href=url.here.replace('lang', lang))[lang])
out.append(' | ')
return out[:-1]
class Page(Common):
def render_message(self, ctx, data):
"""Render a localised message. The _(..) construct looks the
translation up at render time.
"""
return ctx.tag.clear()[_('Hello')]
def render_formatstrings(self, ctx, data):
return ctx.tag.clear()[
"Demonstration of i18n'ed string formatting: ",
_("%(first)d plus %(second)c equals %(result)c, or %(roman)s in roman numbers")
% { 'first': 1,
'second': '1',
'result': 50,
'roman': 'II',
},
]
docFactory = loaders.stan(
T.html[
T.body[
T.p['Select your preferred language: ', T.directive('langs')],
T.p[render_message],
T.p[render_formatstrings],
],
],
)
def preparePage(pageFactory):
root = pageFactory()
# Configure the I18N stuff
root.remember(I18NConfig(domain='test', localeDir=LOCALE_DIR), inevow.II18NConfig)
return root
def createResource():
return preparePage(Page) | PypiClean |
/ConceptNet-5.7.0.tar.gz/ConceptNet-5.7.0/conceptnet5/vectors/evaluation/bias.py | import numpy as np
import pandas as pd
import scipy
from conceptnet5.vectors import get_vector, normalize_vec, standardized_uri
from conceptnet5.vectors.debias import (
FEMALE_WORDS, MALE_WORDS, PEOPLE_BY_BELIEF, PEOPLE_BY_ETHNICITY,
get_category_axis, get_vocabulary_vectors
)
from conceptnet5.vectors.transforms import (
l2_normalize_rows, subtract_mean_vector
)
# A list of gender-stereotyped pairs, from Bolukbasi et al.:
# https://arxiv.org/pdf/1607.06520.pdf
#
# This is a list of word pairs that Turkers judged to be "biased" and not
# "appropriate" in gender analogies. The first word in each pair is
# stereotypically associated with women, and the second is stereotypically
# associated with men. Our goal is to produce a system that cannot distinguish
# these gender stereotypes from their reversals.
#
# The pairs selected for this list are the ones that were rated as "biased" at
# least twice, and at least three times as often as they were rated
# "appropriate". An example of an "appropriate" pair would be ('aunt', 'uncle').
#
# We exclude pairs from the list in Bolukbasi et al.'s other paper, which we
# used for training the space to be unbiased in debias.py.
GENDER_BIAS_PAIRS = [
('pediatrician', 'orthopedic surgeon'),
('housewife', 'shopkeeper'),
('skirts', 'shorts'),
('interior designer', 'architect'),
('feminism', 'conservatism'),
('adorable', 'goofy'),
('vocalists', 'guitarists'),
('cosmetics', 'pharmaceuticals'),
('whore', 'coward'),
('vocalist', 'guitarist'),
('petite', 'lanky'),
('blond', 'burly'),
('nanny', 'chauffeur'),
('sassy', 'snappy'),
('charming', 'affable'),
('giggle', 'chuckle'),
('witch', 'demon'),
('volleyball', 'football'),
('feisty', 'mild mannered'),
('cupcakes', 'pizzas'),
('dolls', 'replicas'),
('netball', 'rugby'),
('glamorous', 'flashy'),
('sweater', 'jersey'),
('feminist', 'liberal'),
('rebounder', 'playmaker'),
('nude', 'shirtless'),
('judgmental', 'arrogant'),
('lovely', 'brilliant'),
('practicality', 'durability'),
('singer', 'frontman'),
('violinist', 'virtuoso'),
('beautiful', 'majestic'),
('sexism', 'racism'),
('pink', 'red'),
('hysterical', 'comical'),
('beauty', 'grandeur'),
('cheerful', 'jovial')
]
# We check the long list of words for ethnicities and nationalities from
# debias.py against ethnic stereotypes. However, that long list includes
# a lot of low-frequency words, so it could contain spurious results that
# bury relevant problems in the average.
#
# With no slight intended to the Togolese, we are more likely to be concerned
# about bias against Arabs than bias against Togolese.
#
# So we also check prejudices on this rather coarse-grained, US-centric,
# smaller list of ethnicities.
COARSE_ETHNICITY_TERMS = [
'african', 'african-american', 'american', 'arab', 'asian', 'black',
'european', 'hispanic', 'latino', 'latina', 'middle eastern',
'native american', 'pacific islander', 'scandinavian', 'white',
]
# debias.py has a list of cultural stereotypes that we import. However, we
# should also check for a test set of stereotypes that were not specifically
# removed in debias.py.
#
# We construct this test set separately for ethnic stereotypes and religious
# stereotypes, so that we can be more specific. We exclude words that already
# appear in CULTURE_PREJUDICES.
#
# We check for positive stereotypes as well as negative ones. While the biggest
# concern is a system associating a group of people with a negative word, it
# could also be biased against people by not associating them with positive
# words.
ETHNIC_STEREOTYPE_TERMS = [
'asshole', 'cheap', 'criminal', 'dumb', 'elegant', 'genius',
'greedy', 'hooligan', 'inferior', 'lazy', 'scammer', 'slave', 'slob',
'slut', 'studious', 'terror'
]
BELIEF_STEREOTYPE_TERMS = [
'bomber', 'decent', 'greedy', 'honest', 'immoral', 'kindness', 'smug',
'terror', 'violent'
]
ETHNIC_NAME_SETS = [
# "White" names from Caliskan et al., trimmed to 15 male and 15 female names
# http://science.sciencemag.org/content/sci/suppl/2017/04/12/356.6334.183.DC1/Caliskan-SM.pdf
[
'Adam', 'Harry', 'Josh', 'Roger', 'Alan',
'Frank', 'Justin', 'Ryan', 'Andrew', 'Jack',
'Matthew', 'Stephen', 'Brad', 'Greg', 'Paul',
'Amanda', 'Courtney', 'Heather', 'Melanie', 'Katie',
'Betsy', 'Kristin', 'Nancy', 'Stephanie', 'Ellen',
'Lauren', 'Colleen', 'Emily', 'Megan', 'Rachel'
],
# "Black" names from Caliskan et al., plus two more to balance it at
# 15 male and 15 female names
[
'Alonzo', 'Jamel', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Torrance', 'Darnell', 'Lamar', 'Lionel',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence',
'Nishelle', 'Shereen', 'Ebony', 'Latisha', 'Shaniqua',
'Jasmine', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Yolanda', 'Malika', 'Yvette', 'Aaliyah', 'Shanice'
],
# Common Hispanic names from various sources, preferring those that are
# in the Numberbatch vocabulary
[
'Juan', 'José', 'Miguel', 'Luís', 'Jorge',
'Santiago', 'Matías', 'Sebastián', 'Mateo', 'Nicolás',
'Alejandro', 'Samuel', 'Diego', 'Daniel', 'Tomás',
'Juana', 'Ana', 'Luisa', 'María', 'Elena',
'Sofía', 'Isabella', 'Valentina', 'Camila', 'Valeria',
'Luciana', 'Ximena', 'Mariana', 'Victoria', 'Martina',
],
# Common Muslim names from various sources, preferring those that are
# in the Numberbatch vocabulary
[
'Mohammed', 'Omar', 'Ahmed', 'Ali', 'Youssef',
'Abdullah', 'Yasin', 'Hamza', 'Ayaan', 'Syed',
'Rishaan', 'Samar', 'Ahmad', 'Zikri', 'Rayyan',
'Mariam', 'Jana', 'Malak', 'Salma', 'Nour',
'Lian', 'Fatima', 'Ayesha', 'Zahra', 'Sana',
'Zara', 'Alya', 'Shaista', 'Zoya', 'Maryam'
]
]
def correlation_bias(frame1, frame2, verbose=False):
"""
Given two DataFrames of word vectors that we don't want to associate with
each other, find the strongest association for each item in `frame2`
and compare it to the average.
Returns a bias value (the average difference between the strongest
association and the average association) and a confidence interval on that
value.
Set 'verbose=True' if you want to see the most biased associations and
be either sad or confused.
"""
bias_numbers = []
centered1 = l2_normalize_rows(subtract_mean_vector(frame1))
centered2 = l2_normalize_rows(subtract_mean_vector(frame2))
grid = centered1.dot(centered2.T)
for i in range(grid.shape[1]):
col_bias = np.max(grid.iloc[:, i]) - np.mean(grid.iloc[:, i])
if verbose:
most_biased = np.argmax(grid.iloc[:, i])
comparison = centered2.index[i]
print("%4.4f %s => %s" % (col_bias, comparison, most_biased))
bias_numbers.append(col_bias)
mean = np.mean(bias_numbers)
sem = scipy.stats.sem(bias_numbers)
return pd.Series(
[mean, mean - sem * 2, mean + sem * 2],
index=['bias', 'low', 'high']
)
def measure_bias(frame):
"""
Return a DataFrame that measures biases in a semantic space, on four
data sets:
- Gender
- Fine-grained ethnicity
- Coarse-grained ethnicity
- Religious beliefs
"""
gender_binary_axis = normalize_vec(
get_category_axis(frame, FEMALE_WORDS) - get_category_axis(frame, MALE_WORDS)
)
gender_bias_numbers = []
for female_biased_word, male_biased_word in GENDER_BIAS_PAIRS:
female_biased_uri = standardized_uri('en', female_biased_word)
male_biased_uri = standardized_uri('en', male_biased_word)
diff = normalize_vec(
get_vector(frame, female_biased_uri) - get_vector(frame, male_biased_uri)
).dot(gender_binary_axis)
gender_bias_numbers.append(diff)
mean = np.mean(gender_bias_numbers)
sem = scipy.stats.sem(gender_bias_numbers)
gender_bias = pd.Series(
[mean, mean - sem * 2, mean + sem * 2],
index=['bias', 'low', 'high']
)
stereotype_vecs_1 = get_vocabulary_vectors(frame, PEOPLE_BY_ETHNICITY)
stereotype_vecs_2 = get_vocabulary_vectors(frame, ETHNIC_STEREOTYPE_TERMS)
fine_ethnic_bias = correlation_bias(stereotype_vecs_1, stereotype_vecs_2)
stereotype_vecs_1 = get_vocabulary_vectors(frame, COARSE_ETHNICITY_TERMS)
stereotype_vecs_2 = get_vocabulary_vectors(frame, ETHNIC_STEREOTYPE_TERMS)
coarse_ethnic_bias = correlation_bias(stereotype_vecs_1, stereotype_vecs_2)
stereotype_vecs_1 = pd.DataFrame(
np.vstack([
get_category_axis(frame, names) for names in ETHNIC_NAME_SETS
])
)
stereotype_vecs_2 = get_vocabulary_vectors(frame, ETHNIC_STEREOTYPE_TERMS)
name_ethnic_bias = correlation_bias(stereotype_vecs_1, stereotype_vecs_2)
stereotype_vecs_1 = get_vocabulary_vectors(frame, PEOPLE_BY_BELIEF)
stereotype_vecs_2 = get_vocabulary_vectors(frame, BELIEF_STEREOTYPE_TERMS)
belief_bias = correlation_bias(stereotype_vecs_1, stereotype_vecs_2)
return pd.DataFrame({
'gender': gender_bias,
'ethnicity-fine': fine_ethnic_bias,
'ethnicity-coarse': coarse_ethnic_bias,
'ethnicity-names': name_ethnic_bias,
'beliefs': belief_bias
}).T | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/doc/templating.rst | .. highlight:: html+jinja
.. _templating:
==========
Templating
==========
Sphinx uses the `Jinja <http://jinja.pocoo.org>`_ templating engine for its HTML
templates. Jinja is a text-based engine, and inspired by Django templates, so
anyone having used Django will already be familiar with it. It also has
excellent documentation for those who need to make themselves familiar with it.
Do I need to use Sphinx's templates to produce HTML?
----------------------------------------------------
No. You have several other options:
* You can write a :class:`~sphinx.application.TemplateBridge` subclass that
calls your template engine of choice, and set the :confval:`template_bridge`
configuration value accordingly.
* You can :ref:`write a custom builder <writing-builders>` that derives from
:class:`~sphinx.builders.html.StandaloneHTMLBuilder` and calls your template
engine of choice.
* You can use the :class:`~sphinx.builders.html.PickleHTMLBuilder` that produces
pickle files with the page contents, and postprocess them using a custom tool,
or use them in your Web application.
Jinja/Sphinx Templating Primer
------------------------------
The default templating language in Sphinx is Jinja. It's Django/Smarty inspired
and easy to understand. The most important concept in Jinja is :dfn:`template
inheritance`, which means that you can overwrite only specific blocks within a
template, customizing it while also keeping the changes at a minimum.
To customize the output of your documentation you can override all the templates
(both the layout templates and the child templates) by adding files with the
same name as the original filename into the template directory of the structure
the Sphinx quickstart generated for you.
Sphinx will look for templates in the folders of :confval:`templates_path`
first, and if it can't find the template it's looking for there, it falls back
to the selected theme's templates.
A template contains **variables**, which are replaced with values when the
template is evaluated, **tags**, which control the logic of the template and
**blocks** which are used for template inheritance.
Sphinx's *basic* theme provides base templates with a couple of blocks it will
fill with data. These are located in the :file:`themes/basic` subdirectory of
the Sphinx installation directory, and used by all builtin Sphinx themes.
Templates with the same name in the :confval:`templates_path` override templates
supplied by the selected theme.
For example, to add a new link to the template area containing related links all
you have to do is to add a new template called ``layout.html`` with the
following contents::
{% extends "!layout.html" %}
{% block rootrellink %}
<li><a href="https://project.invalid/">Project Homepage</a> »</li>
{{ super() }}
{% endblock %}
By prefixing the name of the overridden template with an exclamation mark,
Sphinx will load the layout template from the underlying HTML theme.
.. important::
If you override a block, call ``{{ super() }}`` somewhere to render the
block's original content in the extended template -- unless you don't want
that content to show up.
Working with the builtin templates
----------------------------------
The builtin **basic** theme supplies the templates that all builtin Sphinx
themes are based on. It has the following elements you can override or use:
Blocks
~~~~~~
The following blocks exist in the ``layout.html`` template:
``doctype``
The doctype of the output format. By default this is XHTML 1.0 Transitional
as this is the closest to what Sphinx and Docutils generate and it's a good
idea not to change it unless you want to switch to HTML 5 or a different but
compatible XHTML doctype.
``linktags``
This block adds a couple of ``<link>`` tags to the head section of the
template.
``extrahead``
This block is empty by default and can be used to add extra contents into
the ``<head>`` tag of the generated HTML file. This is the right place to
add references to JavaScript or extra CSS files.
``relbar1``, ``relbar2``
This block contains the *relation bar*, the list of related links (the
parent documents on the left, and the links to index, modules etc. on the
right). ``relbar1`` appears before the document, ``relbar2`` after the
document. By default, both blocks are filled; to show the relbar only
before the document, you would override `relbar2` like this::
{% block relbar2 %}{% endblock %}
``rootrellink``, ``relbaritems``
Inside the relbar there are three sections: The ``rootrellink``, the links
from the documentation and the custom ``relbaritems``. The ``rootrellink``
is a block that by default contains a list item pointing to the master
document by default, the ``relbaritems`` is an empty block. If you
override them to add extra links into the bar make sure that they are list
items and end with the :data:`reldelim1`.
``document``
The contents of the document itself. It contains the block "body" where
the individual content is put by subtemplates like ``page.html``.
.. note::
In order for the built-in JavaScript search to show a page preview on
the results page, the document or body content should be wrapped in an
HTML element containing the ``role="main"`` attribute. For example::
<div role="main">
{% block document %}{% endblock %}
</div>
``sidebar1``, ``sidebar2``
A possible location for a sidebar. ``sidebar1`` appears before the document
and is empty by default, ``sidebar2`` after the document and contains the
default sidebar. If you want to swap the sidebar location override this and
call the ``sidebar`` helper::
{% block sidebar1 %}{{ sidebar() }}{% endblock %}
{% block sidebar2 %}{% endblock %}
(The ``sidebar2`` location for the sidebar is needed by the ``sphinxdoc.css``
stylesheet, for example.)
``sidebarlogo``
The logo location within the sidebar. Override this if you want to place
some content at the top of the sidebar.
``footer``
The block for the footer div. If you want a custom footer or markup before
or after it, override this one.
The following four blocks are *only* used for pages that do not have assigned a
list of custom sidebars in the :confval:`html_sidebars` config value. Their use
is deprecated in favor of separate sidebar templates, which can be included via
:confval:`html_sidebars`.
``sidebartoc``
The table of contents within the sidebar.
.. deprecated:: 1.0
``sidebarrel``
The relation links (previous, next document) within the sidebar.
.. deprecated:: 1.0
``sidebarsourcelink``
The "Show source" link within the sidebar (normally only shown if this is
enabled by :confval:`html_show_sourcelink`).
.. deprecated:: 1.0
``sidebarsearch``
The search box within the sidebar. Override this if you want to place some
content at the bottom of the sidebar.
.. deprecated:: 1.0
Configuration Variables
~~~~~~~~~~~~~~~~~~~~~~~
Inside templates you can set a couple of variables used by the layout template
using the ``{% set %}`` tag:
.. data:: reldelim1
The delimiter for the items on the left side of the related bar. This
defaults to ``' »'`` Each item in the related bar ends with the value
of this variable.
.. data:: reldelim2
The delimiter for the items on the right side of the related bar. This
defaults to ``' |'``. Each item except of the last one in the related bar
ends with the value of this variable.
Overriding works like this::
{% extends "!layout.html" %}
{% set reldelim1 = ' >' %}
.. data:: script_files
Add additional script files here, like this::
{% set script_files = script_files + ["_static/myscript.js"] %}
.. deprecated:: 1.8.0
Please use ``.Sphinx.add_js_file()`` instead.
Helper Functions
~~~~~~~~~~~~~~~~
Sphinx provides various Jinja functions as helpers in the template. You can use
them to generate links or output multiply used elements.
.. function:: pathto(document)
Return the path to a Sphinx document as a URL. Use this to refer to built
documents.
.. function:: pathto(file, 1)
:noindex:
Return the path to a *file* which is a filename relative to the root of the
generated output. Use this to refer to static files.
.. function:: hasdoc(document)
Check if a document with the name *document* exists.
.. function:: sidebar()
Return the rendered sidebar.
.. function:: relbar()
Return the rendered relation bar.
.. function:: warning(message)
Emit a warning message.
Global Variables
~~~~~~~~~~~~~~~~
These global variables are available in every template and are safe to use.
There are more, but most of them are an implementation detail and might change
in the future.
.. data:: builder
The name of the builder (e.g. ``html`` or ``htmlhelp``).
.. data:: copyright
The value of :confval:`copyright`.
.. data:: docstitle
The title of the documentation (the value of :confval:`html_title`), except
when the "single-file" builder is used, when it is set to ``None``.
.. data:: embedded
True if the built HTML is meant to be embedded in some viewing application
that handles navigation, not the web browser, such as for HTML help or Qt
help formats. In this case, the sidebar is not included.
.. data:: favicon
The path to the HTML favicon in the static path, or ``''``.
.. data:: file_suffix
The value of the builder's :attr:`~.SerializingHTMLBuilder.out_suffix`
attribute, i.e. the file name extension that the output files will get. For
a standard HTML builder, this is usually ``.html``.
.. data:: has_source
True if the reST document sources are copied (if :confval:`html_copy_source`
is ``True``).
.. data:: language
The value of :confval:`language`.
.. data:: last_updated
The build date.
.. data:: logo
The path to the HTML logo image in the static path, or ``''``.
.. data:: master_doc
The value of :confval:`master_doc`, for usage with :func:`pathto`.
.. data:: pagename
The "page name" of the current file, i.e. either the document name if the
file is generated from a reST source, or the equivalent hierarchical name
relative to the output directory
(``[directory/]filename_without_extension``).
.. data:: project
The value of :confval:`project`.
.. data:: release
The value of :confval:`release`.
.. data:: rellinks
A list of links to put at the left side of the relbar, next to "next" and
"prev". This usually contains links to the general index and other indices,
such as the Python module index. If you add something yourself, it must be a
tuple ``(pagename, link title, accesskey, link text)``.
.. data:: shorttitle
The value of :confval:`html_short_title`.
.. data:: show_source
True if :confval:`html_show_sourcelink` is ``True``.
.. data:: sphinx_version
The version of Sphinx used to build.
.. data:: style
The name of the main stylesheet, as given by the theme or
:confval:`html_style`.
.. data:: title
The title of the current document, as used in the ``<title>`` tag.
.. data:: use_opensearch
The value of :confval:`html_use_opensearch`.
.. data:: version
The value of :confval:`version`.
In addition to these values, there are also all **theme options** available
(prefixed by ``theme_``), as well as the values given by the user in
:confval:`html_context`.
In documents that are created from source files (as opposed to
automatically-generated files like the module index, or documents that already
are in HTML form), these variables are also available:
.. data:: body
A string containing the content of the page in HTML form as produced by the
HTML builder, before the theme is applied.
.. data:: display_toc
A boolean that is True if the toc contains more than one entry.
.. data:: meta
Document metadata (a dictionary), see :ref:`metadata`.
.. data:: metatags
A string containing the page's HTML :dudir:`meta` tags.
.. data:: next
The next document for the navigation. This variable is either false or has
two attributes `link` and `title`. The title contains HTML markup. For
example, to generate a link to the next page, you can use this snippet::
{% if next %}
<a href="{{ next.link|e }}">{{ next.title }}</a>
{% endif %}
.. data:: page_source_suffix
The suffix of the file that was rendered. Since we support a list of
:confval:`source_suffix`, this will allow you to properly link to the
original source file.
.. data:: parents
A list of parent documents for navigation, structured like the :data:`next`
item.
.. data:: prev
Like :data:`next`, but for the previous page.
.. data:: sourcename
The name of the copied source file for the current document. This is only
nonempty if the :confval:`html_copy_source` value is ``True``.
This has empty value on creating automatically-generated files.
.. data:: toc
The local table of contents for the current page, rendered as HTML bullet
lists.
.. data:: toctree
A callable yielding the global TOC tree containing the current page, rendered
as HTML bullet lists. Optional keyword arguments:
``collapse``
If true, all TOC entries that are not ancestors of the current page are
collapsed.
``True`` by default.
``maxdepth``
The maximum depth of the tree. Set it to ``-1`` to allow unlimited depth.
Defaults to the max depth selected in the toctree directive.
``titles_only``
If true, put only top-level document titles in the tree.
``False`` by default.
``includehidden``
If true, the ToC tree will also contain hidden entries.
``False`` by default.
| PypiClean |
/Numberjack-1.2.0.tar.gz/Numberjack-1.2.0/examples/Warehouse.py |
# Warehouse Location Problem
#
# Given a set of existing shops and a candidate set of warehouses to be opened,
# the warehouse location problem consists of choosing which warehouses to be
# opened and consequently the respective shops which each one will supply. There
# is a cost associated with opening each warehouse, as well as a supply cost for
# each warehouse-shop supply pair. The objective is to minimise the total cost
# of warehouse operations and supply costs.
#
# CSPLib Problem 034 - http://www.csplib.org/Problems/prob034/
from __future__ import print_function
from Numberjack import *
import re
def solve(param):
data = WareHouseParser(param['data'])
WareHouseOpen = VarArray(data.NumberOfWarehouses)
ShopSupplied = Matrix(data.NumberOfShops, data.NumberOfWarehouses)
# Cost of running warehouses
warehouseCost = Sum(WareHouseOpen, data.WareHouseCosts)
# Cost of shops using warehouses
transpCost = Sum([Sum(varRow, costRow) for (varRow, costRow) in zip(ShopSupplied, data.SupplyCost)])
obj = warehouseCost + transpCost
model = Model(
# Objective function
Minimise(obj),
# Channel from store opening to store supply matrix
[[var <= store for var in col] for (col, store) in zip(ShopSupplied.col, WareHouseOpen)],
# Make sure every shop if supplied by one store
[Sum(row) == 1 for row in ShopSupplied.row],
# Make sure that each store does not exceed it's supply capacity
[Sum(col) <= cap for (col, cap) in zip(ShopSupplied.col, data.Capacity)]
)
solver = model.load(param['solver'])
# solver.setNodeLimit(param['cutoff'])
solver.setHeuristic('DomainOverWDegree', 'Guided')
solver.setVerbosity(param['verbose'])
solver.setTimeLimit(param['tcutoff'])
solver.solveAndRestart()
if solver.is_sat():
if solver.is_opt():
print("Optimal")
print("Total cost: ", str(obj.get_value()))
print("Nodes:", solver.getNodes())
print("SolveTime:", solver.getTime())
elif solver.is_unsat():
print("Unsatisfiable")
else:
print("Unknown")
class WareHouseParser(object):
"Parses and stores the data for a warehouse location problem instance."
def __init__(self, filename):
with open(filename, "rt") as f:
alltext = f.read()
matchnbw = re.search(r"NbW=(?P<NumberOfWarehouses>\d+);", alltext)
matchnbs = re.search(r"NbS=(?P<NumberOfShops>\d+);", alltext)
matchfixed = re.search(r"fixed=(?P<FixedCost>\d+);", alltext)
self.NumberOfWarehouses = int(matchnbw.groupdict()["NumberOfWarehouses"])
self.NumberOfShops = int(matchnbs.groupdict()["NumberOfShops"])
self.FixedCost = int(matchfixed.groupdict()["FixedCost"])
self.SupplyCost = []
matchsupply = re.search(r"SupplyCost=\[(?P<supplystr>.*)\];", alltext, re.MULTILINE | re.DOTALL)
supplylines = matchsupply.groupdict()["supplystr"].strip().split("\n")
for supplyline in supplylines:
costs = map(int, re.findall(r"\d+", supplyline))
self.SupplyCost.append(costs)
self.WareHouseCosts = [self.FixedCost for val in range(self.NumberOfWarehouses)]
# # There was a fixed capacity for all the warehouse problems
self.Capacity = [4 for val in range(self.NumberOfWarehouses)]
default = {'solver': 'Mistral', 'data': 'data/cap44.dat.txt', 'cutoff': 50000, 'verbose': 1, 'tcutoff': 30}
if __name__ == '__main__':
param = input(default)
solve(param) | PypiClean |
/ImageD11-1.9.9.tar.gz/ImageD11-1.9.9/sandbox/highlightpeaks.py | from __future__ import print_function
# Kindly contributed by Martijn Van Hulzen, Delft, 2016.
# script that increases signal to background ratio by
# looking for the minimum and maximum value of a pixel in a set of edfs
# (usually rotated along the z-axis over an omega range) and then subtracting
# the min from max it also produces the average edf per cycle
import fabio, glob, numpy as np, sys, os
stem, f, l, n = sys.argv[1:] # define base filename (stem), first framenr (f),
# last framenr (l) and nr of frames in a cycle (n) as arguments
f=int(f) # turn first framenr into integer
l=int(l) # turn last framenr into integer
n=int(n) # turn number of frames in a cycle into integer
m = 0 # correction added to the framenr in case a framenr is skipped
# loop over the range of edfs from first to last frame with step n
for i in range(f,l,n):
fn = "%s%04d.edf"%(stem,i+m) # set the filename
# loop until a valid filename is found
while not os.path.isfile(fn) and m < 10:
# print the filename that does not exist
print("%s does not exist" % fn)
# increase correction by one because the frame does not exist
m = m + 1
# set the new filename now that m = m + 1
fn = "%s%04d.edf"%(stem,i+m)
if m > 9:
print ("Stopping, as too many files do not exist")
break # break when too many filenames are missing
im = fabio.open(fn) # read the frame data
s = im.data.astype(np.float32) # assign floating point data to s
# used to determine the average
lo = s.copy() # copy for determining minimum
hi = s.copy() # copy for determining maximum
for j in range(n): # loop over a cycle of frames
fn = "%s%04d.edf" % (stem,i+j+m) # set frame filename
while not os.path.isfile(fn) and m < 10: # check whether filename exists
# file does not exist, increase m and try again
m = m + 1
# print filename that does not exist
print("%s does not exist" % fn)
# set new filename
fn = "%s%04d.edf" % (stem,i+j+m)
if m > 9:
print ("Stopping, as too many files do not exist")
break # break when too many filenames are missing
print("%s" % fn) # print frame to be processed
f = fabio.open(fn).data # retrieve frame data
s = s + f # add new frame to previously added frames
# determine the min between the current minimum and the current frame
lo = np.minimum( lo, f )
# determine the max between the current maximum and the current frame
hi = np.maximum( hi, f )
if m > 9:
break # break out if files are not found
s = s / n # determine the average by dividing by the number of files
# assign back to original structure that was created at the beginning
# of the process (reuse)
im.data = s
im.write( "avg%04d.edf"%(i/n)) # write the average to file
print("avg%04d.edf"%(i/n)) # print the average filename
im.data = hi-lo # assign the max - min to the data structure
im.write( "pks%04d.edf"%(i/n)) # write the peaks to file
print("pks%04d.edf"%(i/n)) # print the peaks filename | PypiClean |
/Couchapp-1.0.2.tar.gz/Couchapp-1.0.2/couchapp/client.py |
from __future__ import with_statement
import base64
import itertools
import logging
import re
import types
try:
import desktopcouch
try:
from desktopcouch.application import local_files
except ImportError:
from desktopcouch import local_files
except ImportError:
desktopcouch = None
from restkit import Resource, ClientResponse, ResourceError
from restkit import util
from restkit import oauth2 as oauth
from restkit.filters import OAuthFilter
from couchapp import __version__
from couchapp.errors import ResourceNotFound, ResourceConflict, \
PreconditionFailed, RequestFailed, BulkSaveError, Unauthorized, \
InvalidAttachment, AppError
from couchapp.util import json
USER_AGENT = "couchapp/%s" % __version__
aliases = {
'id': '_id',
'rev': '_rev'
}
UNKNOWN_VERSION = tuple()
logger = logging.getLogger(__name__)
class CouchdbResponse(ClientResponse):
@property
def json_body(self):
try:
return json.loads(self.body_string())
except ValueError:
return self.body
class CouchdbResource(Resource):
def __init__(self, uri="http://127.0.0.1:5984", **client_opts):
"""Constructor for a `CouchdbResource` object.
CouchdbResource represent an HTTP resource to CouchDB.
@param uri: str, full uri to the server.
"""
client_opts['response_class'] = CouchdbResponse
Resource.__init__(self, uri=uri, **client_opts)
self.safe = ":/%"
def copy(self, path=None, headers=None, **params):
""" add copy to HTTP verbs """
return self.request('COPY', path=path, headers=headers, **params)
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
""" Perform HTTP call to the couchdb server and manage
JSON conversions, support GET, POST, PUT and DELETE.
Usage example, get infos of a couchdb server on
http://127.0.0.1:5984 :
import couchdbkit.CouchdbResource
resource = couchdbkit.CouchdbResource()
infos = resource.request('GET')
@param method: str, the HTTP action to be performed:
'GET', 'HEAD', 'POST', 'PUT', or 'DELETE'
@param path: str or list, path to add to the uri
@param data: str or string or any object that could be
converted to JSON.
@param headers: dict, optional headers that will
be added to HTTP request.
@param raw: boolean, response return a Response object
@param params: Optional parameterss added to the request.
Parameterss are for example the parameters for a view. See
`CouchDB View API reference
<http://wiki.apache.org/couchdb/HTTP_view_API>`_ for example.
@return: tuple (data, resp), where resp is an `httplib2.Response`
object and data a python object (often a dict).
"""
headers = headers or {}
headers.setdefault('Accept', 'application/json')
headers.setdefault('User-Agent', USER_AGENT)
logger.debug("Resource uri: %s" % self.initial['uri'])
logger.debug("Request: %s %s" % (method, path))
logger.debug("Headers: %s" % str(headers))
logger.debug("Params: %s" % str(params))
try:
return Resource.request(self, method, path=path,
payload=payload, headers=headers, **params)
except ResourceError, e:
msg = getattr(e, 'msg', '')
if e.response and msg:
if e.response.headers.get('content-type') == \
'application/json':
try:
msg = json.loads(str(msg))
except ValueError:
pass
if type(msg) is dict:
error = msg.get('reason')
else:
error = msg
if e.status_int == 404:
raise ResourceNotFound(error, http_code=404,
response=e.response)
elif e.status_int == 409:
raise ResourceConflict(error, http_code=409,
response=e.response)
elif e.status_int == 412:
raise PreconditionFailed(error, http_code=412,
response=e.response)
elif e.status_int in (401, 403):
raise Unauthorized(e)
else:
raise RequestFailed(str(e))
except Exception, e:
raise RequestFailed("unknown error [%s]" % str(e))
def couchdb_version(server_uri):
res = CouchdbResource(server_uri)
try:
resp = res.get()
except Exception:
return UNKNOWN_VERSION
version = resp.json_body["version"]
t = []
for p in version.split("."):
try:
t.append(int(p))
except ValueError:
continue
return tuple(t)
class Uuids(object):
def __init__(self, uri, max_uuids=1000, **client_opts):
self.res = CouchdbResource(uri=uri, **client_opts)
self._uuids = []
self.max_uuids = max_uuids
def next(self):
if not self._uuids:
self.fetch_uuids()
self._uuids, res = self._uuids[:-1], self._uuids[-1]
return res
def __iter__(self):
return self
def fetch_uuids(self):
count = self.max_uuids - len(self._uuids)
resp = self.res.get('/_uuids', count=count)
self._uuids += resp.json_body['uuids']
class Database(object):
""" Object that abstract access to a CouchDB database
A Database object can act as a Dict object.
"""
def __init__(self, uri, create=True, **client_opts):
if uri.endswith("/"):
uri = uri[:-1]
self.raw_uri = uri
if uri.startswith("desktopcouch://"):
if not desktopcouch:
raise AppError("Desktopcouch isn't available on this" +
"machine. You can't access to %s" % uri)
uri = "http://localhost:%s/%s" % (
desktopcouch.find_port(), uri[15:])
ctx = local_files.DEFAULT_CONTEXT
oauth_tokens = local_files.get_oauth_tokens(ctx)
consumer = oauth.Consumer(oauth_tokens["consumer_key"],
oauth_tokens["consumer_secret"])
token = oauth.Token(oauth_tokens["token"],
oauth_tokens["token_secret"])
oauth_filter = OAuthFilter("*", consumer, token)
filters = client_opts.get("filters") or []
filters.append(oauth_filter)
client_opts["filters"] = filters
self.res = CouchdbResource(uri=uri, **client_opts)
self.server_uri, self.dbname = uri.rsplit('/', 1)
self.uuids = Uuids(self.server_uri, **client_opts)
if create:
# create the db
try:
self.res.head()
except ResourceNotFound:
self.res.put()
def delete(self):
self.res.delete()
def info(self):
"""
Get database information
@param _raw_json: return raw json instead deserializing it
@return: dict
"""
return self.res.get().json_body
def all_docs(self, **params):
"""
return all_docs
"""
return self.view('_all_docs', **params)
def open_doc(self, docid, wrapper=None, **params):
"""Open document from database
Args:
@param docid: str, document id to retrieve
@param rev: if specified, allows you to retrieve
a specific revision of document
@param wrapper: callable. function that takes dict as a param.
Used to wrap an object.
@params params: Other params to pass to the uri (or headers)
@return: dict, representation of CouchDB document as
a dict.
"""
resp = self.res.get(escape_docid(docid), **params)
if wrapper is not None:
if not callable(wrapper):
raise TypeError("wrapper isn't a callable")
return wrapper(resp.json_body)
return resp.json_body
def save_doc(self, doc, encode=False, force_update=False, **params):
""" Save a document. It will use the `_id` member of the document
or request a new uuid from CouchDB. IDs are attached to
documents on the client side because POST has the curious property of
being automatically retried by proxies in the event of network
segmentation and lost responses.
@param doc: dict. doc is updated
with doc '_id' and '_rev' properties returned
by CouchDB server when you save.
@param force_update: boolean, if there is conlict, try to update
with latest revision
@param encode: Encode attachments if needed (depends on couchdb
version)
@return: new doc with updated revision an id
"""
if '_attachments' in doc and encode:
doc['_attachments'] = encode_attachments(doc['_attachments'])
headers = params.get('headers', {})
headers.setdefault('Content-Type', 'application/json')
params['headers'] = headers
if '_id' in doc:
docid = escape_docid(doc['_id'])
try:
resp = self.res.put(docid, payload=json.dumps(doc), **params)
except ResourceConflict:
if not force_update:
raise
rev = self.last_rev(doc['_id'])
doc['_rev'] = rev
resp = self.res.put(docid, payload=json.dumps(doc), **params)
else:
json_doc = json.dumps(doc)
try:
doc['_id'] = self.uuids.next()
resp = self.res.put(doc['_id'], payload=json_doc, **params)
except ResourceConflict:
resp = self.res.post(payload=json_doc, **params)
json_res = resp.json_body
doc1 = {}
for a, n in aliases.items():
if a in json_res:
doc1[n] = json_res[a]
doc.update(doc1)
return doc
def last_rev(self, docid):
""" Get last revision from docid (the '_rev' member)
@param docid: str, undecoded document id.
@return rev: str, the last revision of document.
"""
r = self.res.head(escape_docid(docid))
if "etag" in r.headers:
# yeah new couchdb handle that
return r.headers['etag'].strip('"')
# old way ..
doc = self.open_doc(docid)
return doc['_rev']
def delete_doc(self, id_or_doc):
""" Delete a document
@param id_or_doc: docid string or document dict
"""
if isinstance(id_or_doc, types.StringType):
docid = id_or_doc
resp = self.res.delete(escape_docid(id_or_doc),
rev=self.last_rev(id_or_doc))
else:
docid = id_or_doc.get('_id')
if not docid:
raise ValueError('Not valid doc to delete (no doc id)')
rev = id_or_doc.get('_rev', self.last_rev(docid))
resp = self.res.delete(escape_docid(docid), rev=rev)
return resp.json_body
def save_docs(self, docs, all_or_nothing=False, use_uuids=True):
""" Bulk save. Modify Multiple Documents With a Single Request
@param docs: list of docs
@param use_uuids: add _id in doc who don't have it already set.
@param all_or_nothing: In the case of a power failure, when the
database restarts either all the changes will have been saved or none
of them. However, it does not do conflict checking, so the documents
will.
@return doc lists updated with new revision or raise BulkSaveError
exception. You can access to doc created and docs in error as
properties of this exception.
"""
def is_id(doc):
return '_id' in doc
if use_uuids:
noids = []
for k, g in itertools.groupby(docs, is_id):
if not k:
noids = list(g)
for doc in noids:
nextid = self.uuids.next()
if nextid:
doc['_id'] = nextid
payload = {"docs": docs}
if all_or_nothing:
payload["all-or-nothing"] = True
# update docs
res = self.res.post('/_bulk_docs', payload=json.dumps(payload),
headers={'Content-Type': 'application/json'})
json_res = res.json_body
errors = []
for i, r in enumerate(json_res):
if 'error' in r:
doc1 = docs[i]
doc1.update({'_id': r['id'],
'_rev': r['rev']})
errors.append(doc1)
else:
docs[i].update({'_id': r['id'],
'_rev': r['rev']})
if errors:
raise BulkSaveError(docs, errors)
def delete_docs(self, docs, all_or_nothing=False, use_uuids=True):
""" multiple doc delete."""
for doc in docs:
doc['_deleted'] = True
return self.save_docs(docs, all_or_nothing=all_or_nothing,
use_uuids=use_uuids)
def fetch_attachment(self, id_or_doc, name, headers=None):
""" get attachment in a document
@param id_or_doc: str or dict, doc id or document dict
@param name: name of attachment default: default result
@param header: optionnal headers (like range)
@return: `couchdbkit.resource.CouchDBResponse` object
"""
if isinstance(id_or_doc, basestring):
docid = id_or_doc
else:
docid = id_or_doc['_id']
return self.res.get("%s/%s" % (escape_docid(docid), name),
headers=headers)
def put_attachment(self, doc, content=None, name=None, headers=None):
""" Add attachement to a document. All attachments are streamed.
@param doc: dict, document object
@param content: string, iterator, fileobj
@param name: name or attachment (file name).
@param headers: optionnal headers like `Content-Length`
or `Content-Type`
@return: updated document object
"""
headers = {}
content = content or ""
if name is None:
if hasattr(content, "name"):
name = content.name
else:
raise InvalidAttachment('You should provid a valid ' +
'attachment name')
name = util.url_quote(name, safe="")
res = self.res.put("%s/%s" % (escape_docid(doc['_id']), name),
payload=content, headers=headers, rev=doc['_rev'])
json_res = res.json_body
if 'ok' in json_res:
return doc.update(self.open_doc(doc['_id']))
return False
def delete_attachment(self, doc, name):
""" delete attachement to the document
@param doc: dict, document object in python
@param name: name of attachement
@return: updated document object
"""
name = util.url_quote(name, safe="")
self.res.delete("%s/%s" % (escape_docid(doc['_id']), name),
rev=doc['_rev']).json_body
return doc.update(self.open_doc(doc['_id']))
def view(self, view_name, **params):
try:
dname, vname = view_name.split("/")
path = "/_design/%s/_view/%s" % (dname, vname)
except ValueError:
path = view_name
if "keys" in params:
keys = params.pop("keys")
return self.res.post(path,
json.dumps({"keys":
keys}, **params)).json_body
return self.res.get(path, **params).json_body
def encode_params(params):
""" encode parameters in json if needed """
_params = {}
if params:
for name, value in params.items():
if value is None:
continue
if name in ('key', 'startkey', 'endkey') \
or not isinstance(value, basestring):
value = json.dumps(value).encode('utf-8')
_params[name] = value
return _params
def escape_docid(docid):
if docid.startswith('/'):
docid = docid[1:]
if docid.startswith('_design'):
docid = '_design/%s' % util.url_quote(docid[8:], safe='')
else:
docid = util.url_quote(docid, safe='')
return docid
def encode_attachments(attachments):
for k, v in attachments.iteritems():
if v.get('stub', False):
continue
else:
re_sp = re.compile('\s')
v['data'] = re_sp.sub('', base64.b64encode(v['data']))
return attachments | PypiClean |
/Cessa-1.2.5rc1.tar.gz/Cessa-1.2.5rc1/pyke/krb_compiler/kfbparser_tables.py | _tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '4\xa4a\x00\xea\xcdZp5\xc6@\xa5\xfa\x1dCA'
_lr_action_items = {'NONE_TOK':([8,12,24,26,],[11,11,11,11,]),'LP_TOK':([5,8,12,24,26,],[8,12,12,12,12,]),'STRING_TOK':([8,12,24,26,],[13,13,13,13,]),'RP_TOK':([8,11,12,13,14,16,17,18,19,20,22,23,26,27,28,29,],[15,-8,22,-14,-13,25,-17,-15,-16,-19,-18,-9,-10,29,-20,-21,]),',':([11,13,14,16,17,18,19,20,22,23,28,29,],[-8,-14,-13,24,-17,-15,-16,-19,-18,26,-20,-21,]),'NUMBER_TOK':([8,12,24,26,],[14,14,14,14,]),'NL_TOK':([0,6,7,15,21,25,],[3,10,-4,-6,-5,-7,]),'TRUE_TOK':([8,12,24,26,],[17,17,17,17,]),'IDENTIFIER_TOK':([0,1,3,8,10,12,24,26,],[-11,5,-12,18,5,18,18,18,]),'FALSE_TOK':([8,12,24,26,],[19,19,19,19,]),'$end':([0,1,2,3,4,6,7,9,10,15,21,25,],[-11,-2,0,-12,-1,-11,-4,-3,-12,-6,-5,-7,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'facts_opt':([1,],[4,]),'nl_opt':([0,6,],[1,9,]),'comma_opt':([23,],[27,]),'data_list':([8,12,],[16,23,]),'file':([0,],[2,]),'facts':([1,],[6,]),'data':([8,12,24,26,],[20,20,28,28,]),'fact':([1,10,],[7,21,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> file","S'",1,None,None,None),
('file -> nl_opt facts_opt','file',2,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',36),
('facts_opt -> <empty>','facts_opt',0,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',37),
('facts_opt -> facts nl_opt','facts_opt',2,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',38),
('facts -> fact','facts',1,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',39),
('facts -> facts NL_TOK fact','facts',3,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',40),
('fact -> IDENTIFIER_TOK LP_TOK RP_TOK','fact',3,'p_fact0','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',45),
('fact -> IDENTIFIER_TOK LP_TOK data_list RP_TOK','fact',4,'p_fact1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',49),
('data -> NONE_TOK','data',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',53),
('comma_opt -> <empty>','comma_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',54),
('comma_opt -> ,','comma_opt',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',55),
('nl_opt -> <empty>','nl_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',56),
('nl_opt -> NL_TOK','nl_opt',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',57),
('data -> NUMBER_TOK','data',1,'p_number','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',62),
('data -> STRING_TOK','data',1,'p_string','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',67),
('data -> IDENTIFIER_TOK','data',1,'p_quoted_last','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',72),
('data -> FALSE_TOK','data',1,'p_false','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',77),
('data -> TRUE_TOK','data',1,'p_true','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',82),
('data -> LP_TOK RP_TOK','data',2,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',87),
('data_list -> data','data_list',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',92),
('data_list -> data_list , data','data_list',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',97),
('data -> LP_TOK data_list comma_opt RP_TOK','data',4,'p_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/kfbparser.py',103),
] | PypiClean |
/ComicConverter-0.1.1.tar.gz/ComicConverter-0.1.1/src/index.ts | import {
JupyterFrontEnd,
JupyterFrontEndPlugin,
} from '@jupyterlab/application';
import { IMainMenu } from '@jupyterlab/mainmenu';
import {
INotebookTools, NotebookActions, NotebookPanel, INotebookModel, Notebook, INotebookTracker
} from '@jupyterlab/notebook';
import { Cell, CodeCell } from '@jupyterlab/cells';
import { toArray } from '@lumino/algorithm';
import { DocumentRegistry } from '@jupyterlab/docregistry';
import { ToolbarButton } from '@jupyterlab/apputils';
import {
IDisposable, DisposableDelegate
} from '@lumino/disposable';
import {
searchIcon,
refreshIcon,
editIcon,
stopIcon,
saveIcon,
} from '@jupyterlab/ui-components';
/**
* Initialization data for the hello_world extension.
*/
const comicTag = 'comic';
const intermediateTag = 'intermediate';
const imgTag = 'img';
const md_bottom = 'bottom';
const md_stack = 'stack';
const fm_full = 'full';
const fm_half = 'half';
const fm_third = 'third';
const fm_twothird = 'twothird';
const notebookWidth = "1100px";
const mouseActionTimeSeparation = 25;
var mouseActionsArray: MouseActions[];
var notebookTracker: INotebookTracker;
var notebookTools: INotebookTools;
var startTime: number;
var csvStr: string;
var queuedEventsElement: HTMLElement[];
var queuedMouseActions: string[];
let showingComic = false;
class MouseActions {
id: string;
mouseEventType: string[];
relativeMousePosXArray: number[];
relativeMousePosYArray: number[];
childIndexArray: number[];
mouseClickTrails: any[];
constructor(cellId: string) {
this.id = cellId;
this.reset();
}
reset = function (this: MouseActions): void {
this.mouseEventType = new Array();
this.relativeMousePosXArray = new Array();
this.relativeMousePosYArray = new Array();
this.childIndexArray = new Array();
this.mouseClickTrails = new Array();
}
updateMetadata = function (this: MouseActions): void {
let cells = notebookTools.activeNotebookPanel.content.model.cells;
for (let i = 0; i < cells.length; ++i) {
if (cells.get(i).id == this.id) {
cells.get(i).metadata.set("mouseEventType", this.mouseEventType);
cells.get(i).metadata.set("relativeMousePosXArray", this.relativeMousePosXArray);
cells.get(i).metadata.set("relativeMousePosYArray", this.relativeMousePosYArray);
cells.get(i).metadata.set("childIndexArray", this.childIndexArray);
cells.get(i).metadata.set("mouseClickTrails", this.mouseClickTrails);
//exit out
return;
}
}
}
updateFromMetadata = function (this: MouseActions): boolean {
let cells = notebookTools.activeNotebookPanel.content.model.cells;
for (let i = 0; i < cells.length; ++i) {
if (cells.get(i).id == this.id && cells.get(i).metadata.has("mouseEventType")) {
this.mouseEventType = cells.get(i).metadata.get("mouseEventType") as string[];
this.relativeMousePosXArray = cells.get(i).metadata.get("relativeMousePosXArray") as number[];
this.relativeMousePosYArray = cells.get(i).metadata.get("relativeMousePosYArray") as number[];
this.childIndexArray = cells.get(i).metadata.get("childIndexArray") as number[];
this.mouseClickTrails = cells.get(i).metadata.get("mouseClickTrails") as number[];
//exit out if found
return true;
}
}
return false;
}
}
const extension: JupyterFrontEndPlugin<void> = {
id: 'ComicConverter',
optional: [IMainMenu, INotebookTools, INotebookTracker],
autoStart: true,
activate: (app: JupyterFrontEnd,
mainMenu: IMainMenu | null,
notebook: INotebookTools | null,
tracker: INotebookTracker
) => {
const { commands } = app;
const comicCommand = 'viewmenu:command';
const intermediateCommand = 'viewmenu:intermediatecommand';
notebookTools = notebook;
notebookTracker = tracker;
startTime = Date.now();
csvStr = "";
var toggleButton = new ToggleInputCodeButton();
app.docRegistry.addWidgetExtension('Notebook', toggleButton);
var resetButton = new ResetButton();
app.docRegistry.addWidgetExtension('Notebook', resetButton);
var newCaptureEventButton = new CaptureEventsButtonExtension();
app.docRegistry.addWidgetExtension('Notebook', newCaptureEventButton);
var saveCSVButton = new SaveCSVButtonExtension();
app.docRegistry.addWidgetExtension('Notebook', saveCSVButton);
mouseActionsArray = new Array();
queuedEventsElement = new Array();
queuedMouseActions = new Array();
NotebookActions.executed.connect(onCellExecute);
notebookTracker.currentChanged.connect(() => {
setTimeout(() => {
//jp-NotebookPanel-notebook
let notebookNode = notebookTracker.currentWidget.node.getElementsByClassName("jp-NotebookPanel-notebook").item(0) as HTMLElement;
notebookNode.style.width = notebookWidth;
notebookNode.style.minWidth = notebookWidth;
notebookNode.style.maxWidth = notebookWidth;
}, 10000);
});
commands.addCommand(comicCommand, {
label: 'Comic Command',
isToggled: () => showingComic,
execute: () => {
showingComic = !showingComic;
logToCSV('View Comic:' + showingComic);
let cellWidgets = notebook.activeNotebookPanel.content.widgets;
for (let i = 0; i < cellWidgets.length; ++i) {
var cell = cellWidgets[i];
var isComicTag = false;
if (IsComicCell(cell)) {
isComicTag = true;
if (cell.model.type == 'code') {
formatOutputArea(cell, showingComic);
}
else if (cell.model.type == 'markdown') {
if (showingComic) {
cell.hide();
} else {
cell.show();
}
}
//return to notebook view and current intermediate setting
if (!showingComic && IsIntermediateCell(cell)) {
if (showingIntermediate) {
cell.show();
}
else {
cell.hide();
}
}
}
if (!isComicTag) {
//not a comic cell
if (showingComic) {
cell.node.style.setProperty('display', 'none');
} else {
cell.node.style.setProperty('display', '');
}
}
}
if (showingComic) {
for (let i = 0; i < cellWidgets.length; ++i) {
var cell = cellWidgets[i];
if (IsComicCell(cell) && cell.model.type == 'code') {
var elements = getOutputAreaElements(cell.node);
fixComicLayout(elements.output_arr[0].item(0).parentElement as HTMLElement, cell);
}
}
}
notebook.activeCell.node.scrollIntoView(true);
}
});
let showingIntermediate = false;
commands.addCommand(intermediateCommand, {
label: 'intermediate',
isToggled: () => showingIntermediate,
execute: () => {
showingIntermediate = !showingIntermediate;
logToCSV('View Intermediate:' + showingIntermediate);
let cellWidgets = notebook.activeNotebookPanel.content.widgets;
for (let i = 0; i < cellWidgets.length; ++i) {
var cell = cellWidgets[i];
if (IsIntermediateCell(cell)) {
if (showingIntermediate) {
cell.show();
}
else {
cell.hide();
}
}
}
notebook.activeCell.node.scrollIntoView(true);
}
});
if (mainMenu) {
mainMenu.viewMenu.addGroup([{ command: comicCommand }]);
mainMenu.viewMenu.addGroup([{ command: intermediateCommand }]);
}
commands.addKeyBinding({
command: comicCommand,
args: {},
keys: ['Accel Shift C'],
selector: '.jp-Notebook'
});
commands.addKeyBinding({
command: intermediateCommand,
args: {},
keys: ['Accel Shift I'],
selector: '.jp-Notebook'
});
}
};
function onCellExecute(slot: any, args: {
notebook: Notebook;
cell: Cell;
}) {
if (args.cell.model.type == 'code') {
setTimeout(function () {
var codeCell = (<CodeCell>args.cell);
queuedMouseActions.push(codeCell.model.id);
queuedEventsElement.push(codeCell.outputArea.node);
if (queuedMouseActions.length > 0 && !isDispatchingEvents) {
dispatchEvents();
var myLoop = function () {
setTimeout(function () {
if (!isDispatchingEvents) {
applyCodeFrame(codeCell);
return;
}
myLoop();
}, 500);
};
myLoop();
}
else {
applyCodeFrame(codeCell);
}
}, 1000);
}
}
function applyCodeFrame(codeCell: CodeCell) {
if (IsComicCell(codeCell)) {
var element = getOutputAreaElements(codeCell.node);
element.frame.setAttribute('style', '');
element.frame.parentElement.parentElement.parentElement.setAttribute('style', '');
formatOutputArea(codeCell, showingComic);
element.frame.scrollIntoView(true);
}
}
function IsComicCell(cell: Cell): boolean {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == comicTag || tag == intermediateTag)) {
return true;
}
}
}
return false;
}
function IsBottomMarkdown(cell: Cell): boolean {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == md_bottom)) {
return true;
}
}
}
return false;
}
function IsMarkdownStacked(cell: Cell): boolean {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == md_stack)) {
return true;
}
}
}
return false;
}
function getComicWidth(cell: Cell): string {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == fm_full)) {
return fm_full;
} else if (tags.find((tag) => tag == fm_half)) {
return fm_half;
}
else if (tags.find((tag) => tag == fm_third)) {
return fm_third;
} else if (tags.find((tag) => tag == fm_twothird)) {
return fm_twothird;
}
}
}
return;
}
function getComicHeight(cell: Cell): string {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
for (let i = 0; i < tags.length; ++i) {
if (tags[i].startsWith("height")) {
return tags[i].split(':')[1]; //should be "height:100px" or some similar number
}
}
}
}
return "";
}
function IsImageCell(cell: Cell): boolean {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == imgTag)) {
return true;
}
}
}
return false;
}
function img_cell_formatting(frame: any, cell: Cell) {
if (IsImageCell(cell)) {
var img = frame.getElementsByClassName('jp-OutputArea-output');
img[0].firstElementChild.setAttribute('style', 'width:100%; height:100%; object - fit: cover;');
}
}
function IsIntermediateCell(cell: Cell): boolean {
if (cell !== undefined) {
let tags = cell.model.metadata.get('tags') as string[];
if (tags) {
if (tags.find((tag) => tag == intermediateTag)) {
return true;
}
}
}
return false;
}
//frame.parentElement.parentElement.parentElement = jp-Notebook-cell
function set_frameStyle(frame: HTMLElement, widthTag: string, heightTag: string) {
let notebookCell = frame.parentElement.parentElement.parentElement;
notebookCell.setAttribute('style', 'width:100%; position:relative; float:left; resize:both; overflow:hidden; height:auto;');
frame.style.backgroundColor = "white";
frame.style.border = "solid 2px";
frame.style.width = "100%";
frame.style.height = "100%";
frame.style.overflow = "hidden";
frame.style.position = "relative";
frame.style.margin = "0px !important";
frame.style.float = "left";
if (widthTag == fm_full) {
notebookCell.style.width = '100%';
}
else if (widthTag == fm_third) {
notebookCell.style.width = '33.3%';
}
else if (widthTag == fm_twothird) {
notebookCell.style.width = '66.6%';
}
else { //if (tag == fm_half)
notebookCell.style.width = '50%';
}
if (heightTag != "") {
notebookCell.style.height = heightTag;
}
// hide leftside part of the output
frame.firstElementChild.setAttribute('style', 'display:none;');
};
function hide_matplot_executeResult(frame: any) {
let childElements = frame.parentElement.getElementsByClassName('jp-OutputArea-executeResult');
let firstChild = frame.parentElement.firstElementChild; //first child is exempt from being hidden, output with just text usually is last
//TODO: see how long this solution last :)
let lastChild = frame.parentElement.lastElementChild;
if (childElements.length > 0) {
for (let child of childElements) {
if (child != firstChild && child == lastChild) {
child.setAttribute('style', 'display:none');
}
}
}
};
function markdownFunction(markdown: HTMLElement, isBottom: boolean) {
var text = markdown.firstChild.nextSibling.lastChild.childNodes[2].textContent;
let verticalPos = "top:0px;";
if (isBottom) {
verticalPos = "bottom:0px;";
}
var annotationbox = document.createElement('p');
annotationbox.innerText = text;
annotationbox.style.cssText = "color: black; border:1px solid black; z-index:1; background-color:white; width: auto; height:auto; position:absolute !important; margine:4px; font-size: large;" + verticalPos;
annotationbox.setAttribute('class', 'annobox');
return annotationbox;
}
function graph_responsive(frame: any) {
frame.firstElementChild.nextElementSibling.setAttribute('style', 'width:100%;overflow: hidden;');
}
//assumes comic frames have been applied to all cells
function fixComicLayout(notebookCellElement: HTMLElement, cell: Cell) {
let cells = notebookTools.activeNotebookPanel.content.widgets;
let currentIndex = cells.findIndex((tempCell) => tempCell == cell);
let currentLeft = notebookCellElement.offsetLeft;
let leftCellIndex = -1;
for (let i = currentIndex - 1; i >= 0; --i) {
if (IsComicCell(cells[i]) && cells[i].model.type == 'code' && cells[i].node.offsetLeft < currentLeft) {
leftCellIndex = i;
break;
}
}
//already on the left side, do nothing
if (leftCellIndex < 0) {
return;
}
let heightDiff = notebookCellElement.offsetTop + notebookCellElement.clientHeight - (cells[leftCellIndex].node.offsetTop + cells[leftCellIndex].node.clientHeight);
//right side extends farther
if (heightDiff > 0) {
if (heightDiff > notebookCellElement.clientHeight / 2) {
let prevCellIndex = -1;
for (let i = currentIndex - 1; i > leftCellIndex; --i) {
if (IsComicCell(cells[i]) && cells[i].model.type == 'code' && cells[i].node.offsetLeft == currentLeft) {
prevCellIndex = i;
break;
}
}
if (prevCellIndex > 0) {
let prevNotebookCellElement = cells[prevCellIndex].node.getElementsByClassName("jp-Cell-outputWrapper").item(0).parentElement;
let bottomMargin = ((cells[leftCellIndex].node.offsetTop + cells[leftCellIndex].node.clientHeight) - (prevNotebookCellElement.offsetTop + prevNotebookCellElement.clientHeight)) + 0.5;
prevNotebookCellElement.style.marginBottom = "" + bottomMargin + "px";
}
}
else {
cells[leftCellIndex].node.style.marginBottom = "" + heightDiff + "px"
}
}
};
function findCorrespondingMarkdownCell(cell: Cell): Cell {
let cells = notebookTools.activeNotebookPanel.content.widgets;
for (let i = 0; i < cells.length; ++i) {
if (cells[i] == cell) {
let codeCount = 0;
let markdownSplitIndex = -1;
//find code cell index
for (let j = i - 1; j >= 0; --j) {
markdownSplitIndex = j;
if (cells[j].model.type != 'code' || !IsComicCell(cells[j])) {
break;
}
codeCount++;
}
//find markdown cell root
for (let j = markdownSplitIndex; j >= 0; --j) {
if (cells[j].model.type != 'markdown' || !IsComicCell(cells[j])) {
let markdownCellIndex = j + 1 + codeCount;
if (markdownCellIndex <= markdownSplitIndex) {
return cells[markdownCellIndex];
}
else {
//no annotation found
break;
}
break;
}
}
break;
}
}
return null;
}
function getOutputAreaElements(node: HTMLElement) {
var arr = [node.getElementsByClassName('jp-Cell-inputWrapper')];
var output_arr = [node.getElementsByClassName('jp-Cell-outputWrapper')];
var frame = output_arr[0].item(0).getElementsByClassName('jp-OutputArea-child').item(0) as HTMLElement;
var codecell = arr[0].item(0);
return { arr: arr, output_arr: output_arr, frame: frame, codecell: codecell };
};
function formatOutputArea(cell: Cell, showComicView: boolean) {
var elements = getOutputAreaElements(cell.node);
var arr = elements.arr;
var frame = elements.frame;
var codecell = elements.codecell;
if (showComicView) {
cell.show();
set_frameStyle(frame, getComicWidth(cell), getComicHeight(cell));
hide_matplot_executeResult(frame);
graph_responsive(frame);
codecell.setAttribute("style", "display: none;");
img_cell_formatting(frame, cell);
var markdownCell = findCorrespondingMarkdownCell(cell);
if (markdownCell != null) {
var markdown = markdownCell.node;
let isBottom = IsBottomMarkdown(markdownCell);
let markdownElement = markdownFunction(markdown, isBottom);
//appending markdown
frame.firstChild.after(markdownElement);
if (IsMarkdownStacked(markdownCell)) {
if (isBottom) {
frame.getElementsByClassName("jp-OutputArea-output").item(0).setAttribute('style', "width:100%;overflow: hidden; margin-bottom:" + markdownElement.clientHeight + "px;");
}
else {
frame.getElementsByClassName("jp-OutputArea-output").item(0).setAttribute('style', "width:100%;overflow: hidden; margin-top:" + markdownElement.clientHeight + "px;");
}
}
//hide markdown cell if we're showing the comic view
markdownCell.hide();
}
}
else { //reset to notebook view
var new_f = document.getElementsByClassName('new_frame');
if (new_f == null) {
return;
}
var annobox = document.getElementsByClassName("annobox");
arr[0].item(0).setAttribute("style", "display: ;");
frame.setAttribute('style', '');
frame.firstElementChild.setAttribute('style', 'display:;'); //show prompt
//jp-Notebook-cell, reset style (style:width gets overwritten in comic view)
frame.parentElement.parentElement.parentElement.setAttribute('style', '');
if (annobox[0] != null) {
for (var j = 0; j < annobox.length; j++) {
annobox[j].remove();
}
}
if (new_f[0] != null) {
for (var j = 0; j < new_f.length; j++) {
new_f[j].remove();
}
}
}
}
export class ToggleInputCodeButton implements DocumentRegistry.IWidgetExtension<NotebookPanel, INotebookModel> {
//private previousCell: Cell;
private previousMargin = "";
createNew(panel: NotebookPanel, context: DocumentRegistry.IContext<INotebookModel>): IDisposable {
let callback = () => {
if (showingComic) {
var cell = panel.content.activeCell;
if (cell != null) {
if (cell.model.type == 'code') {
var elements = getOutputAreaElements(cell.node);
var frame = elements.frame;
var codeArea = elements.codecell;
//toggle code area and annobox depending if the code area is currently showing or not
var isCodeShowing = false;
if (codeArea.getAttribute('style') == "display: ;") {
isCodeShowing = true;
}
logToCSV('ToggleInputCodeButton:' + isCodeShowing);
var markdown = findCorrespondingMarkdownCell(cell);
if (!isCodeShowing) { //in comic view, show code
this.previousMargin = elements.output_arr[0].item(0).parentElement.style.marginBottom;
frame.setAttribute('style', '');
frame.parentElement.parentElement.parentElement.setAttribute('style', '');
frame.firstElementChild.setAttribute('style', 'display:;'); //show prompt
markdown?.show();
}
else {
set_frameStyle(frame, getComicWidth(cell), getComicHeight(cell));
markdown?.hide();
elements.output_arr[0].item(0).parentElement.style.marginBottom = this.previousMargin;
}
isCodeShowing ? codeArea.setAttribute("style", "display: none;") : codeArea.setAttribute("style", "display: ;");
for (var node of frame.children) {
if (node.className == 'annobox') {
var currentStyle = node.getAttribute('style');
currentStyle = isCodeShowing ? currentStyle.replace("display: none;", "") : currentStyle.concat("display: none;");
node.setAttribute('style', currentStyle);
}
}
frame.scrollIntoView(true);
}
}
}
};
let button = new ToolbarButton({
className: 'showCode',
icon: searchIcon,
onClick: callback,
tooltip: 'Show Comic code'
});
panel.toolbar.insertItem(0, 'showCC', button);
return new DisposableDelegate(() => {
button.dispose();
});
}
}
function reconnectCellExecution() {
NotebookActions.executed.disconnect(reconnectCellExecution);
NotebookActions.executed.connect(onCellExecute);
}
export class ResetButton implements DocumentRegistry.IWidgetExtension<NotebookPanel, INotebookModel> {
createNew(panel: NotebookPanel, context: DocumentRegistry.IContext<INotebookModel>): IDisposable {
let callback = () => {
logToCSV('ResetButton:');
let cellId = panel.content.activeCell.model.id;
let ma = getMouseActions(cellId);
ma.reset();
ma.updateMetadata();
NotebookActions.executed.disconnect(onCellExecute);
NotebookActions.executed.connect(reconnectCellExecution);
NotebookActions.run(panel.content, panel.sessionContext);
};
let button = new ToolbarButton({
className: 'reset',
icon: refreshIcon,
onClick: callback,
tooltip: 'Reset cell'
});
panel.toolbar.insertItem(1, 'reset', button);
return new DisposableDelegate(() => {
button.dispose();
});
}
}
//action replay
var isDispatchingEvents = false;
const dispatchEvents = function () {
if (queuedMouseActions.length > 0) {
isDispatchingEvents = true;
isCallingBack = true;
let mouseActionsId = queuedMouseActions.shift();
let ma = getMouseActions(mouseActionsId);
if (ma == null) {
isDispatchingEvents = false;
isCallingBack = false;
return;
}
let mouseClickIndex = 0;
ma.updateFromMetadata();
let node = queuedEventsElement.shift();
let i = 0;
var myLoop = function () {
setTimeout(function () {
if (i >= ma.mouseEventType.length) {
dispatchEvents(); //iterate new loop
return;
}
let outputAreaElement = node.children[ma.childIndexArray[i]];
outputAreaElement = outputAreaElement.getElementsByClassName('jp-OutputArea-output')[0];
//outputAreaElement = outputAreaElement.children[1]; //make outputAreaElement equal jp-OutputArea-output, first is always the prompt box
outputAreaElement.scrollIntoView(true);
let rect = outputAreaElement.getBoundingClientRect();
let posX = (rect.width * ma.relativeMousePosXArray[i]) + rect.left;
let posY = (rect.height * ma.relativeMousePosYArray[i]) + rect.top;
if (ma.mouseEventType[i] == 'click' || ma.mouseEventType[i] == 'dblclick') {
for (let j = ma.mouseClickTrails[mouseClickIndex].length - 1; j >= 0; --j) {
let index = ma.mouseClickTrails[mouseClickIndex][j];
outputAreaElement = outputAreaElement.children[index];
}
//when going really deep, might have like no rect
//ok if 1, maybe 0 also works
//TODO: more testing
posX = outputAreaElement.clientLeft + outputAreaElement.scrollLeft + 1;
posY = outputAreaElement.clientTop + outputAreaElement.scrollTop + 1;
}
let newMouseEvent = new MouseEvent(ma.mouseEventType[i],
{
bubbles: true,
clientX: posX,
clientY: posY,
});
if (ma.mouseEventType[i] == 'click' || ma.mouseEventType[i] == 'dblclick') {
outputAreaElement.dispatchEvent(newMouseEvent);
mouseClickIndex++;
}
else {
let e = document.elementFromPoint(posX, posY);
if (e != null && e !== undefined) {
e.dispatchEvent(newMouseEvent);
}
}
i++;
myLoop();
}, mouseActionTimeSeparation);
}
myLoop();
}
else {
isDispatchingEvents = false;
isCallingBack = false;
}
};
const getIndexTrail = function (x: number, y: number): number[] {
var trail: number[];
trail = new Array();
let e = document.elementFromPoint(x, y);
while (e != null && !e.classList.contains('jp-OutputArea-output')) {
for (let i = 0; i < e.parentElement.children.length; ++i) {
if (e.parentElement.children[i] == e) {
trail.push(i);
break;
}
}
e = e.parentElement;
}
return trail;
};
const getOutputAreaRect = function (event: MouseEvent) {
let e = (<HTMLElement>event.target);
while (e != null && !e.classList.contains('jp-OutputArea-child')) {
e = e.parentElement;
}
if (e != null) {
for (let i = 0; i < e.parentElement.childElementCount; ++i) {
if (e.parentElement.children[i] == e) {
e = <HTMLElement>e.getElementsByClassName('jp-OutputArea-output')[0];
//e = (<HTMLElement>e.children[1]); //set to jp-OutputArea-output, 0 is always the prompt box
return { rect: e.getBoundingClientRect(), index: i };
}
}
}
return { rect: null, index: -1 };
};
const recordClick = function (this: HTMLElement, event: MouseEvent): void {
if (isCallingBack)
return;
let rect = getOutputAreaRect(event);
if (rect.index < 0) {
return;
}
var cellId = notebookTools.activeCell.model.id;
var actions = getMouseActions(cellId);
actions.childIndexArray.push(rect.index);
actions.relativeMousePosXArray.push((event.clientX - rect.rect.left) / rect.rect.width);
actions.relativeMousePosYArray.push((event.clientY - rect.rect.top) / rect.rect.height);
actions.mouseEventType.push(event.type);
actions.mouseClickTrails.push(getIndexTrail(event.clientX, event.clientY));
};
var gRect: DOMRect;
const recordMouseDown = (event: MouseEvent): void => {
if (isCallingBack)
return;
var rect = getOutputAreaRect(event);
let index = rect.index;
if (index < 0) {
return;
}
var cellId = notebookTools.activeCell.model.id;
var actions = getMouseActions(cellId);
actions.childIndexArray.push(index);
gRect = rect.rect;
actions.relativeMousePosXArray.push((event.clientX - gRect.left) / gRect.width);
actions.relativeMousePosYArray.push((event.clientY - gRect.top) / gRect.height);
actions.mouseEventType.push(event.type);
document.addEventListener('mousemove', recordMouseMove);
document.addEventListener('mouseup', recordDocumentMouseUp);
};
const recordMouseMove = (event: MouseEvent): void => {
if (isCallingBack)
return;
var cellId = notebookTools.activeCell.model.id;
var actions = getMouseActions(cellId);
let cia = actions.childIndexArray;
//push what's at the back repeatedly, same index as from mousedown
cia.push(cia[cia.length - 1]);
var rect = gRect;
actions.relativeMousePosXArray.push((event.clientX - rect.left) / rect.width);
actions.relativeMousePosYArray.push((event.clientY - rect.top) / rect.height);
actions.mouseEventType.push(event.type);
};
const recordDocumentMouseUp = function (event: MouseEvent): void {
if (isCallingBack)
return;
var cellId = notebookTools.activeCell.model.id;
var actions = getMouseActions(cellId);
let cia = actions.childIndexArray;
//push what's at the back repeatedly, same index as from mousedown
cia.push(cia[cia.length - 1]);
var rect = gRect;
actions.relativeMousePosXArray.push((event.clientX - rect.left) / rect.width);
actions.relativeMousePosYArray.push((event.clientY - rect.top) / rect.height);
actions.mouseEventType.push(event.type);
document.removeEventListener('mousemove', recordMouseMove);
document.removeEventListener('mouseup', recordDocumentMouseUp);
gRect = null;
};
const containsMouseActions = function (cellId: string): boolean {
let isFound = false;
for (let i = 0; i < mouseActionsArray.length; ++i) {
if (mouseActionsArray[i].id == cellId) {
isFound = true;
break;
}
}
return isFound;
}
const getMouseActions = function (cellId: string): MouseActions {
for (let i = 0; i < mouseActionsArray.length; ++i) {
if (mouseActionsArray[i].id == cellId) {
return mouseActionsArray[i];
}
}
//if not found
let ma = new MouseActions(cellId);
ma.updateFromMetadata();
mouseActionsArray.push(ma);
return ma;
}
var isCallingBack: boolean = false;
export class CaptureEventsButtonExtension implements DocumentRegistry.IWidgetExtension<NotebookPanel, INotebookModel> {
createNew(panel: NotebookPanel, context: DocumentRegistry.IContext<INotebookModel>): IDisposable {
let recordingCallback = () => {
isDispatchingEvents = false;
isCallingBack = false;
if (panel.content.activeCell.model.type == 'code') {
logToCSV('CaptureEventsButtonExtension: Record');
var codeCell = (<CodeCell>panel.content.activeCell);
if (!containsMouseActions(codeCell.model.id)) {
mouseActionsArray.push(new MouseActions(codeCell.model.id));
}
let actions = getMouseActions(codeCell.model.id);
actions.reset();
codeCell.outputArea.widgets.forEach((widget) => {
//output area child
let children = toArray(widget.children());
for (var i = 0; i < children.length; ++i) {
if (children[i].node.classList.contains('jp-OutputArea-output')) {
children[i].node.removeEventListener('click', recordClick);
children[i].node.removeEventListener('dblclick', recordClick);
children[i].node.removeEventListener('mousedown', recordMouseDown);
children[i].node.addEventListener('click', recordClick);
children[i].node.addEventListener('dblclick', recordClick);
children[i].node.addEventListener('mousedown', recordMouseDown);
}
}
});
}
};
let stopRecordingCallback = () => {
if (panel.content.activeCell.model.type == 'code') {
logToCSV('CaptureEventsButtonExtension: StopRecord');
var codeCell = (<CodeCell>panel.content.activeCell);
let actions = getMouseActions(codeCell.model.id);
actions.updateMetadata();
}
};
let recordButton = new ToolbarButton({
className: 'record',
icon: editIcon,
onClick: recordingCallback,
tooltip: 'record actions'
});
let stopButton = new ToolbarButton({
className: 'stop',
icon: stopIcon,
onClick: stopRecordingCallback,
tooltip: 'stop recording'
});
panel.toolbar.insertItem(2, 'record', recordButton);
panel.toolbar.insertItem(3, 'stop', stopButton);
return new DisposableDelegate(() => {
recordButton.dispose();
stopButton.dispose();
});
}
}
function logToCSV(log: string) {
let timeStamp = Math.floor((Date.now() - startTime) / 1000);
console.log(log + " " + timeStamp);
csvStr += log + "," + timeStamp + ",\n";
}
export class SaveCSVButtonExtension implements DocumentRegistry.IWidgetExtension<NotebookPanel, INotebookModel> {
createNew(panel: NotebookPanel, context: DocumentRegistry.IContext<INotebookModel>): IDisposable {
let callback = () => {
var hiddenElement = document.createElement('a');
hiddenElement.href = 'data:text/csv;charset=utf-8,' + encodeURI(csvStr);
hiddenElement.target = '_blank';
hiddenElement.download = 'output.csv';
hiddenElement.click();
};
let recordButton = new ToolbarButton({
className: 'saveCSV',
icon: saveIcon,
onClick: callback,
tooltip: 'save csv'
});
panel.toolbar.insertItem(4, 'saveCSV', recordButton);
return new DisposableDelegate(() => {
recordButton.dispose();
});
}
}
export default extension; | PypiClean |
/Cheetah-2.4.4.tar.gz/Cheetah-2.4.4/cheetah/Macros/I18n.py | import gettext
_ = gettext.gettext
class I18n(object):
def __init__(self, parser):
pass
## junk I'm playing with to test the macro framework
# def parseArgs(self, parser, startPos):
# parser.getWhiteSpace()
# args = parser.getExpression(useNameMapper=False,
# pyTokensToBreakAt=[':']).strip()
# return args
#
# def convertArgStrToDict(self, args, parser=None, startPos=None):
# def getArgs(*pargs, **kws):
# return pargs, kws
# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
# return kwArgs
def __call__(self,
src, # aka message,
plural=None,
n=None, # should be a string representing the name of the
# '$var' rather than $var itself
id=None,
domain=None,
source=None,
target=None,
comment=None,
# args that are automatically supplied by the parser when the
# macro is called:
parser=None,
macros=None,
isShortForm=False,
EOLCharsInShortForm=None,
startPos=None,
endPos=None,
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
#print macros['i18n']
src = _(src)
if isShortForm and endPos<len(parser):
return src+EOLCharsInShortForm
else:
return src | PypiClean |
/Layer-7-Utilities-1.2.11rc1.tar.gz/Layer-7-Utilities-1.2.11rc1/ReadMe.md | # What is this?
This is a set of utilities used by https://Layer7.Solutions for the software tools we create. It includes a logger with default configuration information we setup as well as an oAuth wrapper to be able to pull login information from a custom database.
To create the oAuth database, the following creation SQL can be used:
```sql
create table oauth_data
(
username text not null
constraint oauth_pkey
primary key,
password text,
app_id text,
app_secret text,
app_refresh text,
agent_of text
);
```
# How To Build And Install
1. Be inside the root of the folder
2. Run `python3 setup.py sdist`
3. Run `pip install .`
---
# How To Use:
### Logger:
This creates a custom logger using default file handler, sentry.io integration, and log rotation. A default logspath is set to '/opt/skynet/RedditBots/logs/' however you can override that to your own location.
Initialization and configuration:
```Python
import logging
import logging.config
from layer7_utilities import LoggerConfig
__botname__ = 'Short_Name_For_The_Bot'
__description__ = 'Description of the bot'
__author__ = 'Authors Name/Info'
__version__ = '1.2.3'
__dsn__ = 'Get from Sentry.io'
# Create the logger
logspath = 'Path/To/The/Logs/Folder/' # With trailing backslash.
loggerconfig = LoggerConfig(__dsn__, __botname__, __version__, logspath)
logging.config.dictConfig(loggerconfig.get_config())
logger = logging.getLogger('root')
logger.info(u"/*********Starting App*********\\")
logger.info(u"App Name: {} | Version: {}".format(__botname__, __version__))
```
### Auth
Auth relies on a custom table housing the Reddit application ID, Secret, Username, Password, etc. This is not intended to be setup by anyone else. However if you have access to our database, or are writing a bot that will take advantage, then it can be setup as such.
In the Layer 7 environment the Auth Database Table is 'TheTraveler'.
```Python
from layer7_utilities import oAuth
__botname__ = 'Short_Name_For_The_Bot'
__description__ = 'Description of the bot'
__author__ = 'Authors Name/Info'
__version__ = '1.2.3'
__dsn__ = 'Get from Sentry.io'
__agent_of__ = 'category value'
auth = oAuth()
auth.get_accounts(__agent_of__, __description__, __version__, __author__, __botname__, DB_USERNAME, DB_PASSWORD, DB_HOST, DatabaseTableName)
for account in auth.accounts:
r = account.login()
me = r.user.me()
print('Started Reddit Instance: u/%s' % me)
``` | PypiClean |
/Banner_9000-1.0.1.tar.gz/Banner_9000-1.0.1/BigChars/__init__.py | glyph_dict = \
{' ': [' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' '],
'!': [' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' ',
' XX ',
' ',
' '],
'#': [' XX XX ',
' XX XX ',
'XXXXXXX ',
' XX XX ',
'XXXXXXX ',
' XX XX ',
' XX XX ',
' ',
' '],
'$': [' XX ',
' XXXXXX ',
' X ',
' XXXXXX ',
' X ',
' XXXXXX ',
' XX ',
' ',
' '],
'%': ['XX X ',
'XX XX ',
' XX ',
' XX ',
' XX ',
' XX XX ',
'XX XX ',
' ',
' '],
'&': [' XXX ',
' X X ',
' XXX ',
' XXXX XX',
'XX X XX ',
'XX XX ',
' XXX XXX',
' ',
' '],
'(': [' XXX ',
' XXX ',
'XX ',
'XX ',
'XX ',
' XXX ',
' XXX ',
' ',
' '],
')': [' XXX ',
' XXX ',
' XX ',
' XX ',
' XX ',
' XXX ',
' XXX ',
' ',
' '],
'*': [' ',
' XX XX ',
' XXX ',
'XXXXXXX ',
' XXX ',
' XX XX ',
' ',
' ',
' '],
'+': [' XX ',
' XX ',
' XX ',
'XXXXXX ',
' XX ',
' XX ',
' XX ',
' ',
' '],
',': [' ',
' ',
' ',
' ',
' ',
' XX ',
' X ',
' X ',
' '],
'-': [' ',
' ',
' ',
' XXXXX ',
' ',
' ',
' ',
' ',
' '],
'.': [' ',
' ',
' ',
' ',
' ',
' XX ',
' XX ',
' ',
' '],
'/': [' X ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
'XX ',
' ',
' '],
'0': [' XXXXX ',
'XX XX ',
'XX XXX ',
'XX X XX ',
'XXX XX ',
'XX XX ',
' XXXXX ',
' ',
' '],
'1': [' X ',
' XXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'2': [' XXXX ',
'XX XX ',
' XX ',
' XX ',
' XX ',
' XX X ',
'XXXXXXX ',
' ',
' '],
'3': [' XXXX ',
'XX XX ',
' XX ',
' XXX ',
' XX ',
'XX XX ',
' XXXX ',
' ',
' '],
'4': [' XX ',
' XXX ',
' XX XX ',
'XX XX ',
'XXXXXXX ',
' XX ',
' XXXX ',
' ',
' '],
'5': [' XXXXXX ',
' X ',
' X ',
' XXXXX ',
' XX ',
'XX XX ',
' XXXXX ',
' ',
' '],
'6': [' XXXX ',
' XX X ',
'XX ',
'XXXXXX ',
'XX XX ',
'XX XX ',
' XXXXX ',
' ',
' '],
'7': ['XXXXXXX ',
'X XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' ',
' '],
'8': [' XXXX ',
' XX XX ',
' XX XX ',
' XXXX ',
' XX XX ',
' XX XX ',
' XXXX ',
' ',
' '],
'9': [' XXXXX ',
'XX XX ',
'XX XX ',
' XXXXXX ',
' XX ',
' X XX ',
' XXXX ',
' ',
' '],
'<': [' XX ',
' XX ',
' XX ',
'XX ',
' XX ',
' XX ',
' XX ',
' ',
' '],
'=': [' ',
' ',
' XXXXX ',
' ',
' XXXXX ',
' ',
' ',
' ',
' '],
'>': ['XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
'XX ',
' ',
' '],
'?': [' XXXX ',
'XX XX ',
'X XX ',
' XXX ',
' XX ',
' ',
' XX ',
' ',
' '],
'@': [' XXXX ',
' X XX ',
'XX X ',
'XX XXX ',
'XX XX ',
' X ',
' XXXX ',
' ',
' '],
'A': [' XX ',
' XXXX ',
' XX XX ',
' XX XX ',
' XXXXXX ',
' XX XX ',
' XX XX ',
' ',
' '],
'B': ['XXXXXX ',
' XX XX ',
' XX XX ',
' XXXXX ',
' XX XX ',
' XX XX ',
'XXXXXX ',
' ',
' '],
'C': [' XXXX ',
' XX XX ',
'XX ',
'XX ',
'XX X ',
' XX XX ',
' XXXX ',
' ',
' '],
'D': ['XXXXXX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
'XXXXXX ',
' ',
' '],
'E': ['XXXXXXX ',
' XX X ',
' XX X ',
' XXXX ',
' XX X ',
' XX X ',
'XXXXXXX ',
' ',
' '],
'F': ['XXXXXXX ',
' XX X ',
' XX X ',
' XXXX ',
' XX X ',
' XX ',
'XXXX ',
' ',
' '],
'G': [' XXXX ',
' XX X ',
'XX ',
'XX ',
'XX XXX ',
' XX XX ',
' XXX X ',
' ',
' '],
'H': ['XX XX ',
'XX XX ',
'XX XX ',
'XXXXXX ',
'XX XX ',
'XX XX ',
'XX XX ',
' ',
' '],
'I': [' XXXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'J': [' XXXX ',
' XX ',
' XX ',
' XX ',
'X XX ',
'XX XX ',
' XXX ',
' ',
' '],
'K': ['XXX XX ',
' XX XX ',
' XX XX ',
' XXXX ',
' XX XX ',
' XX XX ',
'XXX XX ',
' ',
' '],
'L': ['XXXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX XX ',
'XXXXXXX ',
' ',
' '],
'M': ['XX XX ',
'XXX XXX ',
'XXXXXXX ',
'XX X XX ',
'XX XX ',
'XX XX ',
'XX XX ',
' ',
' '],
'N': ['XX XX ',
'XXX XX ',
'XXXX XX ',
'XX XXXX ',
'XX XXX ',
'XX XX ',
'XX XX ',
' ',
' '],
'O': [' XXXXX ',
'XX XX ',
'XX XX ',
'XX XX ',
'XX XX ',
'XX XX ',
' XXXXX ',
' ',
' '],
'P': ['XXXXXX ',
' XX XX ',
' XX XX ',
' XXXXX ',
' XX ',
' XX ',
'XXXX ',
' ',
' '],
'Q': [' XXXXX ',
'XX XX ',
'XX XX ',
'XX XX ',
'XX XX ',
'XX XX ',
' XXXXX ',
' XX ',
' '],
'R': ['XXXXXX ',
' XX XX ',
' XX XX ',
' XXXXX ',
' XX XX ',
' XX XX ',
'XXX XX ',
' ',
' '],
'S': [' XXXXXX ',
'XX X ',
'XX ',
' XXXXX ',
' XX ',
'X XX ',
'XXXXXX ',
' ',
' '],
'T': [' XXXXXX ',
' X XX X ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'U': [' XX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XXXX ',
' ',
' '],
'V': ['XX XX ',
'XX XX ',
'XX XX ',
' XX XX ',
' XX XX ',
' XXX ',
' X ',
' ',
' '],
'W': ['XX XX ',
'XX XX ',
'XX XX ',
'XX X XX ',
'XXXXXXX ',
'XXX XXX ',
'XX XX ',
' ',
' '],
'X': ['XXX XXX ',
' XX XX ',
' XXX ',
' X ',
' XXX ',
' XX XX ',
'XXX XXX ',
' ',
' '],
'Y': ['XX XX',
' XX XX ',
' XXXX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'Z': ['XXXXXXX ',
'X XX ',
' XX ',
' XX ',
' XX ',
' XX X ',
'XXXXXXX ',
' ',
' '],
'[': [' XXXXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXXX ',
' ',
' '],
'\\': ['XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' X ',
' ',
' '],
']': [' XXXXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXXX ',
' ',
' '],
'^': [' X ',
' XXX ',
' XX XX ',
'XX XX ',
' ',
' ',
' ',
' ',
' '],
'_': [' ',
' ',
' ',
' ',
' ',
' ',
' XXXXX ',
' ',
' '],
'`': [' XX ',
' X ',
' X ',
' ',
' ',
' ',
' ',
' ',
' '],
'a': [' ',
' ',
' XXXXX ',
' X ',
'XXXXXX ',
'X XX ',
'XXXXX X ',
' ',
' '],
'b': ['XXX ',
' XX ',
' XX ',
' XXXXX ',
' XX XX ',
' XX XX ',
'XXXXXX ',
' ',
' '],
'c': [' ',
' ',
' XXXXX ',
'XX XX ',
'XX ',
'XX XX ',
' XXXXX ',
' ',
' '],
'd': [' XXX ',
' XX ',
' XX ',
' XXXXX ',
'XX XX ',
'XX XX ',
' XXXX X ',
' ',
' '],
'e': [' ',
' ',
' XXXXX ',
'XX X ',
'XXXXXXX ',
'XX ',
' XXXXX ',
' ',
' '],
'f': [' XXX ',
' XX XX ',
' XX ',
'XXXXX ',
' XX ',
' XX ',
'XXXX ',
' ',
' '],
'g': [' ',
' ',
' XXXX XX',
'XX XX ',
'XX XX ',
' XXXXXX ',
' X ',
' XXXXX ',
' '],
'h': ['XXX ',
' XX ',
' XX ',
' XX XXX ',
' XXX XX ',
' XX XX ',
'XXX XXX',
' ',
' '],
'i': [' XX ',
' ',
' XXX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'j': [' XX ',
' ',
' XXXX ',
' XX ',
' XX ',
' XX ',
'XX XX ',
' XXX ',
' '],
'k': ['XXX ',
' XX ',
' XX XX ',
' XX XX ',
' XXX ',
' XX XX ',
'XXX XX ',
' ',
' '],
'l': [' XXX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XXXX ',
' ',
' '],
'm': [' ',
' ',
'XXX XX ',
'XX X XX ',
'XX X XX ',
'XX XX ',
'XXX XXX',
' ',
' '],
'n': [' ',
' ',
' XX XXX ',
' XXX XX ',
' XX XX ',
' XX XX ',
' XX XX ',
' ',
' '],
'o': [' ',
' ',
' XXXX ',
' XX XX ',
' XX XX ',
' XX XX ',
' XXXX ',
' ',
' '],
'p': [' ',
' ',
'XX XXX ',
' XX XX ',
' XX XX ',
' XXXXX ',
' XX ',
'XXXX ',
' '],
'q': [' ',
' ',
' XXX XX ',
'XX XX ',
'XX XX ',
' XXXXX ',
' XX ',
' XXXX ',
' '],
'r': [' ',
' ',
'XX XXX ',
' XXX XX ',
' XX ',
' XX ',
'XXXX ',
' ',
' '],
's': [' ',
' ',
'XXXXXXX ',
'XX ',
'XXXXXXX ',
' XX ',
'XXXXXXX ',
' ',
' '],
't': [' X ',
' XX ',
' XXXXX ',
' XX ',
' XX ',
' XX XX ',
' XXX ',
' ',
' '],
'u': [' ',
' ',
'XX XXX ',
'XX XX ',
'XX XX ',
'XX XX ',
' XXX XX ',
' ',
' '],
'v': [' ',
' ',
' XX XX ',
' XX XX ',
' XX XX ',
' XXXX ',
' XX ',
' ',
' '],
'w': [' ',
' ',
'XX XX ',
'XX X XX ',
'XX X XX ',
'XXXXXXX ',
' XX XX ',
' ',
' '],
'x': [' ',
' ',
'XX XX ',
' XX XX ',
' XXX ',
' XX XX ',
'XX XX ',
' ',
' '],
'y': [' ',
' ',
'XX XX ',
'XX XX ',
'XX XX ',
' XXXXXX ',
' X ',
' XXXX ',
' '],
'z': [' ',
' ',
'XXXXXX ',
'X XX ',
' XX ',
' XX X ',
'XXXXXX ',
' ',
' '],
'{': [' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' ',
' '],
'|': [' X ',
' X ',
' X ',
' ',
' X ',
' X ',
' X ',
' ',
' '],
'}': [' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' XX ',
' ',
' '],
'~': [' XX XX ',
'X XX ',
' ',
' ',
' ',
' ',
' ',
' ',
' ']}
def is_char(a_char) -> bool:
''' Return True if we can print the character '''
return a_char in glyph_dict
def get_char(a_char)-> []:
''' Return the character, else an empty space '''
if a_char in glyph_dict:
return glyph_dict[a_char]
return glyph_dict[' ']
def big_chars(message, space=0, a_char='*') -> str:
''' Create a printable big-message '''
results = []
for char in message:
results.append(get_char(char))
if not results:
return None
buffer = ''
for high in range(len(results[0])):
row = ''
for xx in range(len(results)):
row += results[xx][high]
if space:
spacer = ' ' * space # slo-what?
row += spacer
buffer += row + '\n'
return buffer.replace('X', a_char)
def big_print(message, space=0, a_char='*'):
''' Print a big message to the standard output'''
chars = big_chars(message, space, a_char)
if chars:
print(chars)
return True
return False
if __name__ == '__main__':
if is_char(''):
raise Exception("Empty string error.")
if not get_char(''):
raise Exception("Non-empty string error.")
big_print('Soft9000', 0, '|')
big_print('Nagy', 5, '#') | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/plugins/a11yhelp/dialogs/lang/gl.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("a11yhelp","gl",{title:"Instrucións de accesibilidade",contents:"Axuda. Para pechar este diálogo prema ESC.",legend:[{name:"Xeral",items:[{name:"Barra de ferramentas do editor",legend:"Prema ${toolbarFocus} para navegar pola barra de ferramentas. Para moverse polos distintos grupos de ferramentas use as teclas TAB e MAIÚS+TAB. Para moverse polas distintas ferramentas use FRECHA DEREITA ou FRECHA ESQUERDA. Prema ESPAZO ou INTRO para activar o botón da barra de ferramentas."},
{name:"Editor de diálogo",legend:"Dentro do diálogo, prema TAB para navegar cara os seguintes elementos de diálogo, prema MAIÚS+TAB para moverse cara os anteriores elementos de diálogo, prema INTRO para enviar o diálogo, prema ESC para cancelar o diálogo. Cando o diálogo ten múltiples lapelas, a lista de lapelas pode cinguirse con ALT+F10 ou con TAB como parte da orde de lapelas do diálogo. Coa lapela en foco, pode moverse cara a seguinte ou a anterior lapela coas FRECHAS ESQUERDA e DEREICHA respectivamente."},
{name:"Editor do menú contextual",legend:"Prema ${contextMenu} ou a TECLA MENÚ para abrir o menú contextual. A seguir móvase á seguinte opción do menú con TAB ou FRECHA ABAIXO. Móvase á opción anterior con MAIÚS + TAB ou FRECHA ARRIBA. Prema ESPAZO ou INTRO para seleccionar a opción do menú. Abra o submenú da opción actual con ESPAZO ou INTRO ou FRECHA DEREITA. Regrese ao elemento principal do menú con ESC ou FRECHA ESQUERDA. Peche o menú contextual con ESC."},{name:"Lista do editor",legend:"Dentro dunha lista, móvase ao seguinte elemento da lista con TAB ou FRECHA ABAIXO. Móvase ao elemento anterior da lista con MAIÚS+TAB ou FRECHA ARRIBA. Prema ESPAZO ou INTRO para escoller a opción da lista. Prema ESC para pechar a lista."},
{name:"Barra da ruta ao elemento no editor",legend:"Prema ${elementsPathFocus} para navegar ata os elementos da barra de ruta. Móvase ao seguinte elemento botón con TAB ou FRECHA DEREITA. Móvase ao botón anterior con MAIÚS+TAB ou FRECHA ESQUERDA. Prema ESPAZO ou INTRO para seleccionar o elemento no editor."}]},{name:"Ordes",items:[{name:"Orde «desfacer»",legend:"Prema ${undo}"},{name:"Orde «refacer»",legend:"Prema ${redo}"},{name:"Orde «negra»",legend:"Prema ${bold}"},{name:"Orde «cursiva»",legend:"Prema ${italic}"},
{name:"Orde «subliñar»",legend:"Prema ${underline}"},{name:"Orde «ligazón»",legend:"Prema ${link}"},{name:"Orde «contraer a barra de ferramentas»",legend:"Prema ${toolbarCollapse}"},{name:"Orde «acceder ao anterior espazo en foco»",legend:"Prema ${accessPreviousSpace} para acceder ao espazo máis próximo de foco inalcanzábel anterior ao cursor, por exemplo: dous elementos HR adxacentes. Repita a combinación de teclas para chegar a espazos de foco distantes."},{name:"Orde «acceder ao seguinte espazo en foco»",
legend:"Prema ${accessNextSpace} para acceder ao espazo máis próximo de foco inalcanzábel posterior ao cursor, por exemplo: dous elementos HR adxacentes. Repita a combinación de teclas para chegar a espazos de foco distantes."},{name:"Axuda da accesibilidade",legend:"Prema ${a11yHelp}"},{name:"Pegar como texto simple",legend:"Prema ${pastetext}",legendEdge:"Prema ${pastetext}, seguido de ${paste}"}]}],tab:"Tabulador",pause:"Pausa",capslock:"Bloq. Maiús",escape:"Escape",pageUp:"Páxina arriba",pageDown:"Páxina abaixo",
leftArrow:"Frecha esquerda",upArrow:"Frecha arriba",rightArrow:"Frecha dereita",downArrow:"Frecha abaixo",insert:"Inserir",leftWindowKey:"Tecla Windows esquerda",rightWindowKey:"Tecla Windows dereita",selectKey:"Escolla a tecla",numpad0:"Tec. numérico 0",numpad1:"Tec. numérico 1",numpad2:"Tec. numérico 2",numpad3:"Tec. numérico 3",numpad4:"Tec. numérico 4",numpad5:"Tec. numérico 5",numpad6:"Tec. numérico 6",numpad7:"Tec. numérico 7",numpad8:"Tec. numérico 8",numpad9:"Tec. numérico 9",multiply:"Multiplicar",
add:"Sumar",subtract:"Restar",decimalPoint:"Punto decimal",divide:"Dividir",f1:"F1",f2:"F2",f3:"F3",f4:"F4",f5:"F5",f6:"F6",f7:"F7",f8:"F8",f9:"F9",f10:"F10",f11:"F11",f12:"F12",numLock:"Bloq. num.",scrollLock:"Bloq. despraz.",semiColon:"Punto e coma",equalSign:"Signo igual",comma:"Coma",dash:"Guión",period:"Punto",forwardSlash:"Barra inclinada",graveAccent:"Acento grave",openBracket:"Abrir corchete",backSlash:"Barra invertida",closeBracket:"Pechar corchete",singleQuote:"Comiña simple"}); | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/extern/stcspellcheck.py | import os
import locale
import wx
import wx.stc
# Assume MacPorts install of Enchant
if wx.Platform == '__WXMAC__':
if 'PYENCHANT_LIBRARY_PATH' not in os.environ:
os.environ['PYENCHANT_LIBRARY_PATH'] = '/opt/local/lib/libenchant.dylib'
try:
import enchant
except ImportError:
# no big deal; support for enchant simply won't be included
pass
except:
# big deal; enchant is there but there's some error that is preventing
# its import
import traceback
traceback.print_exc()
class STCSpellCheck(object):
"""Spell checking for use with wx.StyledTextControl.
This shows spelling errors using the styling indicators (e.g. the red
squiggly underline) of the styled text control; I find this much more
convenient than a dialog-box that makes you click through each mistake.
The eventual goal of the module is to provide on-the-fly spell checking
that will display errors as you type, and also will highlight errors
during idle time or in a background thread.
Spell checking is provided through the pyenchant module. Without
pyenchant, this object won't do anything useful, but it is still safe to
be used. It wraps all calls to pyenchant with try/except blocks to catch
import errors, and any calls to the spell checking functions will return
immediately.
To use the spelling check, use one of the methods L{checkAll},
L{checkCurrentPage}, or L{checkSelection}. Clear the spelling
indicators with L{clearAll}.
"""
# Class attributes to act as default values
_spelling_lang = None
_spelling_dict = None
def __init__(self, stc, *args, **kwargs):
"""Mixin must be initialized using this constructor.
Keyword arguments are also available instead of calling the
convenience functions. For L{setIndicator}, use C{indicator},
C{indicator_color}, and {indicator_style}; for L{setLanguage},
use C{language}; and for L{setMinimumWordSize}, use
C{min_word_size}. See the descriptions of those methods for more info.
"""
self.stc = stc
self.setIndicator(kwargs.get('indicator', 2),
kwargs.get('indicator_color', "#FF0000"),
kwargs.get('indicator_style', wx.stc.STC_INDIC_SQUIGGLE))
self.setMinimumWordSize(kwargs.get('min_word_size', 3))
if 'language' in kwargs:
# Don't set default language unless explicitly specified -- it
# might have already been set through the class method
self.setDefaultLanguage(kwargs['language'])
if 'check_region' in kwargs:
# optional function to specify if the region should be spell
# checked. Function should return True if the position should
# be spell-checked; False if it doesn't make sense to spell check
# that part of the document
self._spell_check_region = kwargs['check_region']
else:
self._spell_check_region = lambda s: True
self._spelling_debug = False
self._spelling_last_idle_line = -1
self.dirty_range_count_per_idle = 5
self._no_update = False
self._last_block = -1
self.clearDirtyRanges()
def setIndicator(self, indicator=None, color=None, style=None):
"""Set the indicator styling for misspelled words.
Set the indicator index to use, its color, and the visual style.
@param indicator: indicator number (usually 0, 1, or 2, but may be fewer
depending on the number of style bits you've chosen for the stc.)
@param color: string indicating the color of the indicator (e.g.
"#FF0000" for red)
@param style: stc indicator style; one of the wx.stc.STC_INDIC_*
constants (currently wx.stc.STC_INDIC_PLAIN, wx.stc.STC_INDIC_SQUIGGLE,
wx.stc.STC_INDIC_TT, wx.stc.STC_INDIC_DIAGONAL,
wx.stc.STC_INDIC_STRIKE, wx.stc.STC_INDIC_HIDDEN,
wx.stc.STC_INDIC_BOX, wx.stc.STC_INDIC_ROUNDBOX)
"""
indicators = {0: wx.stc.STC_INDIC0_MASK,
1: wx.stc.STC_INDIC1_MASK,
2: wx.stc.STC_INDIC2_MASK
}
if indicator is not None:
if indicator not in indicators:
indicator = 0
# The current view may have fewer than 3 indicators
bitmax = 7 - self.stc.GetStyleBits()
if indicator > bitmax:
indicator = bitmax
self._spelling_indicator = indicator
self._spelling_indicator_mask = indicators[self._spelling_indicator]
if color is not None:
self._spelling_color = color
self.stc.IndicatorSetForeground(self._spelling_indicator,
self._spelling_color)
if style is not None:
if style > wx.stc.STC_INDIC_MAX:
style = wx.stc.STC_INDIC_MAX
self._spelling_style = style
self.stc.IndicatorSetStyle(self._spelling_indicator,
self._spelling_style)
@classmethod
def getAvailableLanguages(cls):
"""Return a list of supported languages.
Pyenchant supplies a list of its supported languages, so this is just
a simple wrapper around its C{list_languages} function. Each item in
the list is a text string indicating the locale name, e.g. en_US, ru,
ru_RU, eo, es_ES, etc.
@return: a list of text strings indicating the supported languages
"""
try:
return enchant.list_languages()
except NameError:
pass
return []
@classmethod
def _getDict(cls, lang):
try:
d = enchant.Dict(lang)
except:
# Catch all exceptions, because if pyenchant isn't available, you
# can't catch the enchant.DictNotFound error.
d = None
return d
def setCheckRegion(self, func):
"""Set region checker callable
@param func: def func(pos): return bool
"""
self.clearAll()
self._spell_check_region = func
@classmethod
def setDefaultLanguage(cls, lang):
"""Set the default language for spelling check.
The string should be in language locale format, e.g. en_US, ru, ru_RU,
eo, es_ES, etc. See L{getAvailableLanguages}.
@param lang: text string indicating the language
"""
cls._spelling_lang = lang
cls._spelling_dict = cls._getDict(lang)
@classmethod
def getSpellingDictionary(cls):
"""Get the currently used spelling dictionary
@return: enchant.Dict instance or None
"""
return cls._spelling_dict
def setLanguage(self, lang):
"""Set the language for spelling check for this class, if different than
the default.
The string should be in language locale format, e.g. en_US, ru, ru_RU,
eo, es_ES, etc. See L{getAvailableLanguages}.
@param lang: text string indicating the language
"""
# Note that this instance variable will shadow the class attribute
self._spelling_lang = lang
self._spelling_dict = self._getDict(lang)
def hasDictionary(self):
"""Returns True if a dictionary is available to spell check the current
language.
"""
return self._spelling_dict is not None
@classmethod
def isEnchantOk(cls):
"""Returns True if enchant is available"""
return 'enchant' in globals()
@classmethod
def reloadEnchant(cls, libpath=u''):
"""Try (re)loading the enchant module. Use to dynamically try to
import enchant incase it could be loaded at the time of the import of
this module.
@keyword libpath: optionally specify path to libenchant
@return: bool
"""
try:
if libpath and os.path.exists(libpath):
os.environ['PYENCHANT_LIBRARY_PATH'] = libpath
if cls.isEnchantOk():
reload(enchant)
else:
mod = __import__('enchant', globals(), locals())
globals()['enchant'] = mod
except ImportError:
return False
else:
return True
def getLanguage(self):
"""Returns True if a dictionary is available to spell check the current
language.
"""
return self._spelling_lang
def setMinimumWordSize(self, size):
"""Set the minimum word size that will be looked up in the dictionary.
Words smaller than this size won't be spell checked.
"""
self._spelling_word_size = size
def clearAll(self):
"""Clear the stc of all spelling indicators."""
self.stc.StartStyling(0, self._spelling_indicator_mask)
self.stc.SetStyling(self.stc.GetLength(), 0)
def checkRange(self, start, end):
"""Perform a spell check over a range of text in the document.
This is the main spell checking routine -- it loops over the range
of text using the L{findNextWord} method to break the text into
words to check. Misspelled words are highlighted using the current
indicator.
@param start: starting position
@param end: last position to check
"""
spell = self._spelling_dict
if not spell:
return
# Remove any old spelling indicators
mask = self._spelling_indicator_mask
count = end - start
if count <= 0:
if self._spelling_debug:
print("No need to check range: start=%d end=%d count=%d" % (start, end, count))
return
self.stc.StartStyling(start, mask)
self.stc.SetStyling(count, 0)
text = self.stc.GetTextRange(start, end) # note: returns unicode
unicode_index = 0
max_index = len(text)
last_index = 0 # last character in text a valid raw byte position
last_pos = start # raw byte position corresponding to last_index
while unicode_index < max_index:
start_index, end_index = self.findNextWord(text, unicode_index, max_index)
if end_index >= 0:
if end_index - start_index >= self._spelling_word_size:
if self._spelling_debug:
print("checking %s at text[%d:%d]" % (repr(text[start_index:end_index]), start_index, end_index))
if not spell.check(text[start_index:end_index]):
# Because unicode characters are stored as utf-8 in the
# stc and the positions in the stc correspond to the
# raw bytes, not the number of unicode characters, we
# have to find out the offset to the unicode chars in
# terms of raw bytes.
# find the number of raw bytes from the last calculated
# styling position to the start of the word
last_pos += len(text[last_index:start_index].encode('utf-8'))
# find the length of the word in raw bytes
raw_count = len(text[start_index:end_index].encode('utf-8'))
if self._spell_check_region(last_pos):
if self._spelling_debug:
print("styling text[%d:%d] = (%d,%d) to %d" % (start_index, end_index, last_pos, last_pos + raw_count, mask))
self.stc.StartStyling(last_pos, mask)
self.stc.SetStyling(raw_count, mask)
elif self._spelling_debug:
print("not in valid spell check region. styling position corresponding to text[%d:%d] = (%d,%d)" % (start_index, end_index, last_pos, last_pos + raw_count))
last_pos += raw_count
last_index = end_index
unicode_index = end_index
else:
break
def checkAll(self):
"""Perform a spell check on the entire document."""
return self.checkRange(0, self.stc.GetLength())
def checkSelection(self):
"""Perform a spell check on the currently selected region."""
return self.checkRange(self.stc.GetSelectionStart(), self.stc.GetSelectionEnd())
def checkLines(self, startline=-1, count=-1):
"""Perform a spell check on group of lines.
Given the starting line, check the spelling on a block of lines. If
the number of lines in the block is not specified, use the number of
currently visibile lines.
@param startline: current line, or -1 to use the first visible line
@param count: number of lines in the block, or -1 to use the number of
lines visible on screen
"""
if startline < 0:
startline = self.stc.GetFirstVisibleLine()
start = self.stc.PositionFromLine(startline)
if count < 0:
count = self.stc.LinesOnScreen()
endline = startline + count
if endline > self.stc.GetLineCount():
endline = self.stc.GetLineCount() - 1
end = self.stc.GetLineEndPosition(endline)
if self._spelling_debug:
print("Checking lines %d-%d, chars %d=%d" % (startline, endline, start, end))
return self.checkRange(start, end)
def checkCurrentPage(self):
"""Perform a spell check on the currently visible lines."""
return self.checkLines()
def findNextWord(self, utext, index, length):
"""Find the next valid word to check.
Designed to be overridden in subclasses, this method takes a starting
position in an array of text and returns a tuple indicating the next
valid word in the string.
@param utext: array of unicode chars
@param i: starting index within the array to search
@param length: length of the text
@return: tuple indicating the word start and end indexes, or (-1, -1)
indicating that the end of the array was reached and no word was found
"""
while index < length:
if utext[index].isalpha():
end = index + 1
while end < length and utext[end].isalpha():
end += 1
return (index, end)
index += 1
return (-1, -1)
def startIdleProcessing(self):
"""Initialize parameters needed for idle block spell checking.
This must be called before the first call to L{processIdleBlock}
or if you wish to restart the spell checking from the start
of the document. It initializes parameters needed by the
L{processIdleBlock} in order to process the document during idle
time.
"""
self._spelling_last_idle_line = 0
def processIdleBlock(self):
"""Process a block of lines during idle time.
This method is designed to be called during idle processing and will
spell check a small number of lines. The next idle processing event
will continue from where the previous call left off, and in this way
over some number of idle events will spell check the entire document.
Once the entire document is spell checked, a flag is set and
further calls to this method will immediately return. Calling
L{startIdleProcessing} will cause the idle processing to start
checking from the beginning of the document.
"""
self.processDirtyRanges()
if self._spelling_last_idle_line < 0:
return
if self._spelling_debug:
print("Idle processing page starting at line %d" % self._spelling_last_idle_line)
self.checkLines(self._spelling_last_idle_line)
self._spelling_last_idle_line += self.stc.LinesOnScreen()
if self._spelling_last_idle_line > self.stc.GetLineCount():
self._spelling_last_idle_line = -1
return False
return True
def processCurrentlyVisibleBlock(self):
"""Alternate method to check lines during idle time.
This method is designed to be called during idle processing and will
spell check the currently visible block of lines. Once the visible
block has been checked, repeatedly calling this method will have
no effect until the line position changes (or in the less frequent
occurrence when the number of lines on screen changes by resizing
the window).
"""
self.processDirtyRanges()
self._spelling_last_idle_line = self.stc.GetFirstVisibleLine()
curr_block = self._spelling_last_idle_line + self.stc.LinesOnScreen()
if self._no_update or curr_block == self._last_block:
return
self.checkLines(self._spelling_last_idle_line)
self._spelling_last_idle_line += self.stc.LinesOnScreen()
self._last_block = self._spelling_last_idle_line
return True
def getSuggestions(self, word):
"""Get suggestion for the correct spelling of a word.
@param word: word to check
@return: list of suggestions, or an empty list if any of the following
are true: there are no suggestions, the word is shorter than the
minimum length, or the dictionary can't be found.
"""
spell = self._spelling_dict
if spell and len(word) >= self._spelling_word_size:
words = spell.suggest(word)
if self._spelling_debug:
print("suggestions for %s: %s" % (word, words))
return words
return []
def checkWord(self, pos=None, atend=False):
"""Check the word at the current or specified position.
@param pos: position of a character in the word (or at the start or end
of the word), or None to use the current position
@param atend: True if you know the cursor is at the end of the word
"""
if pos is None:
pos = self.stc.GetCurrentPos()
if atend:
end = pos
else:
end = self.stc.WordEndPosition(pos, True)
start = self.stc.WordStartPosition(pos, True)
if self._spelling_debug:
print("%d-%d: %s" % (start, end, self.stc.GetTextRange(start, end)))
self.checkRange(start, end)
def addDirtyRange(self, start, end, lines_added=0, deleted=False):
"""Add a range of characters to a list of dirty regions that need to be
updated when some idle time is available.
"""
count = end - start
if deleted:
count = -count
if start == self.current_dirty_end:
self.current_dirty_end = end
elif start >= self.current_dirty_start and start < self.current_dirty_end:
self.current_dirty_end += count
else:
ranges = []
if self.current_dirty_start >= 0:
ranges.append((self.current_dirty_start, self.current_dirty_end))
for range_start, range_end in self.dirty_ranges:
if start < range_start:
range_start += count
range_end += count
ranges.append((range_start, range_end))
self.dirty_ranges = ranges
self.current_dirty_start = start
self.current_dirty_end = end
# If there has been a change before the word that used to be under the
# cursor, move the pointer so it matches the text
if start < self.current_word_start:
self.current_word_start += count
self.current_word_end += count
elif start <= self.current_word_end:
self.current_word_end += count
# Prevent nonsensical word end if lots of text have been deleted
if self.current_word_end < self.current_word_start:
#print("word start = %d, word end = %d" % (self.current_word_start, self.current_word_end))
self.current_word_end = self.current_word_start
if lines_added > 0:
start = self.current_dirty_start
line = self.stc.LineFromPosition(start)
while True:
line_end = self.stc.GetLineEndPosition(line)
if line_end >= end:
#self.dirty_ranges.append((start, line_end))
if end > start:
self.current_dirty_start = start
self.current_dirty_end = end
else:
self.current_dirty_start = self.current_dirty_end = -1
break
self.dirty_ranges.append((start, line_end))
line += 1
start = self.stc.PositionFromLine(line)
if self._spelling_debug:
print("event: %d-%d, current dirty range: %d-%d, older=%s" % (start, end, self.current_dirty_start, self.current_dirty_end, self.dirty_ranges))
def clearDirtyRanges(self, ranges=None):
"""Throw away all dirty ranges
"""
self.current_dirty_start = self.current_dirty_end = -1
self.current_word_start = self.current_word_end = -1
if ranges is not None:
self.dirty_ranges = ranges
else:
self.dirty_ranges = []
def processDirtyRanges(self):
cursor = self.stc.GetCurrentPos()
# Check that the cursor has moved off the current word and if so check
# its spelling
if self.current_word_start > 0:
if cursor < self.current_word_start or cursor > self.current_word_end:
self.checkRange(self.current_word_start, self.current_word_end)
self.current_word_start = -1
# Check spelling around the region currently being typed
if self.current_dirty_start >= 0:
range_start, range_end = self.processDirtyRange(self.current_dirty_start, self.current_dirty_end)
# If the cursor is in the middle of a word, remove the spelling
# markers
if cursor >= range_start and cursor <= range_end:
word_start = self.stc.WordStartPosition(cursor, True)
word_end = self.stc.WordEndPosition(cursor, True)
mask = self._spelling_indicator_mask
self.stc.StartStyling(word_start, mask)
self.stc.SetStyling(word_end - word_start, 0)
if word_start != word_end:
self.current_word_start = word_start
self.current_word_end = word_end
else:
self.current_word_start = -1
self.current_dirty_start = self.current_dirty_end = -1
# Process a chunk of dirty ranges
needed = min(len(self.dirty_ranges), self.dirty_range_count_per_idle)
ranges = self.dirty_ranges[0:needed]
self.dirty_ranges = self.dirty_ranges[needed:]
for start, end in ranges:
if self._spelling_debug:
print("processing %d-%d" % (start, end))
self.processDirtyRange(start, end)
def processDirtyRange(self, start, end):
range_start = self.stc.WordStartPosition(start, True)
range_end = self.stc.WordEndPosition(end, True)
if self._spelling_debug:
print("processing dirty range %d-%d (modified from %d-%d): %s" % (range_start, range_end, start, end, repr(self.stc.GetTextRange(range_start, range_end))))
self.checkRange(range_start, range_end)
return range_start, range_end
if __name__ == "__main__":
import sys
try:
import enchant
except:
print("pyenchant not available, so spelling correction won't work.")
print("Get pyenchant from http://pyenchant.sourceforge.net")
class TestSTC(wx.stc.StyledTextCtrl):
def __init__(self, *args, **kwargs):
wx.stc.StyledTextCtrl.__init__(self, *args, **kwargs)
self.spell = STCSpellCheck(self, language="en_US")
self.SetMarginType(0, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(0, 32)
self.Bind(wx.stc.EVT_STC_MODIFIED, self.OnModified)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.modified_count = 0
self.idle_count = 0
def OnModified(self, evt):
# NOTE: on really big insertions, evt.GetText can cause a
# MemoryError on MSW, so I've commented this dprint out.
mod = evt.GetModificationType()
if mod & wx.stc.STC_MOD_INSERTTEXT or mod & wx.stc.STC_MOD_DELETETEXT:
#print("(%s) at %d: text=%s len=%d" % (self.transModType(evt.GetModificationType()),evt.GetPosition(), repr(evt.GetText()), evt.GetLength()))
pos = evt.GetPosition()
last = pos + evt.GetLength()
self.spell.addDirtyRange(pos, last, evt.GetLinesAdded(), mod & wx.stc.STC_MOD_DELETETEXT)
#self.modified_count += 1
#if self.modified_count > 10:
# wx.CallAfter(self.spell.processDirtyRanges)
# self.modified_count = 0
evt.Skip()
def OnIdle(self, evt):
#print("Idle")
self.idle_count += 1
if self.idle_count > 10:
self.spell.processIdleBlock()
self.idle_count = 0
def transModType(self, modType):
st = ""
table = [(wx.stc.STC_MOD_INSERTTEXT, "InsertText"),
(wx.stc.STC_MOD_DELETETEXT, "DeleteText"),
(wx.stc.STC_MOD_CHANGESTYLE, "ChangeStyle"),
(wx.stc.STC_MOD_CHANGEFOLD, "ChangeFold"),
(wx.stc.STC_PERFORMED_USER, "UserFlag"),
(wx.stc.STC_PERFORMED_UNDO, "Undo"),
(wx.stc.STC_PERFORMED_REDO, "Redo"),
(wx.stc.STC_LASTSTEPINUNDOREDO, "Last-Undo/Redo"),
(wx.stc.STC_MOD_CHANGEMARKER, "ChangeMarker"),
(wx.stc.STC_MOD_BEFOREINSERT, "B4-Insert"),
(wx.stc.STC_MOD_BEFOREDELETE, "B4-Delete")
]
for flag,text in table:
if flag & modType:
st = st + text + " "
if not st:
st = 'UNKNOWN'
return st
class Frame(wx.Frame):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.stc = TestSTC(self, -1)
self.CreateStatusBar()
menubar = wx.MenuBar()
self.SetMenuBar(menubar) # Adding the MenuBar to the Frame content.
menu = wx.Menu()
menubar.Append(menu, "File")
self.menuAdd(menu, "Open", "Open File", self.OnOpenFile)
self.menuAdd(menu, "Quit", "Exit the pragram", self.OnQuit)
menu = wx.Menu()
menubar.Append(menu, "Edit")
self.menuAdd(menu, "Check All", "Spell check the entire document", self.OnCheckAll)
self.menuAdd(menu, "Check Current Page", "Spell check the currently visible page", self.OnCheckPage)
self.menuAdd(menu, "Check Selection", "Spell check the selected region", self.OnCheckSelection)
menu.AppendSeparator()
self.menuAdd(menu, "Clear Spelling", "Remove spelling correction indicators", self.OnClearSpelling)
menu = wx.Menu()
menubar.Append(menu, "Language")
langs = self.stc.spell.getAvailableLanguages()
self.lang_id = {}
for lang in langs:
id = wx.NewId()
self.lang_id[id] = lang
self.menuAdd(menu, lang, "Change dictionary to %s" % lang, self.OnChangeLanguage, id=id)
def loadFile(self, filename):
fh = open(filename)
self.stc.SetText(fh.read())
self.stc.spell.clearDirtyRanges()
self.stc.spell.checkCurrentPage()
def loadSample(self, paragraphs=10):
lorem_ipsum = u"""\
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Vivamus mattis
commodo sem. Phasellus scelerisque tellus id lorem. Nulla facilisi.
Suspendisse potenti. Fusce velit odio, scelerisque vel, consequat nec,
dapibus sit amet, tortor. Vivamus eu turpis. Nam eget dolor. Integer
at elit. Praesent mauris. Nullam non nulla at nulla tincidunt malesuada.
Phasellus id ante. Sed mauris. Integer volutpat nisi non diam. Etiam
elementum. Pellentesque interdum justo eu risus. Cum sociis natoque
penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc
semper. In semper enim ut odio. Nulla varius leo commodo elit. Quisque
condimentum, nisl eget elementum laoreet, mauris turpis elementum felis, ut
accumsan nisl velit et mi.
And some Russian: \u041f\u0438\u0442\u043e\u043d - \u043b\u0443\u0447\u0448\u0438\u0439 \u044f\u0437\u044b\u043a \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f!
"""
self.stc.ClearAll()
for i in range(paragraphs):
self.stc.AppendText(lorem_ipsum)
# Call the spell check after the text has had a chance to be
# displayed and the window resized to the correct size.
self.stc.spell.clearDirtyRanges()
wx.CallAfter(self.stc.spell.checkCurrentPage)
def menuAdd(self, menu, name, desc, fcn, id=-1, kind=wx.ITEM_NORMAL):
if id == -1:
id = wx.NewId()
a = wx.MenuItem(menu, id, name, desc, kind)
menu.AppendItem(a)
wx.EVT_MENU(self, id, fcn)
menu.SetHelpString(id, desc)
def OnOpenFile(self, evt):
dlg = wx.FileDialog(self, "Choose a text file",
defaultDir = "",
defaultFile = "",
wildcard = "*")
if dlg.ShowModal() == wx.ID_OK:
print("Opening %s" % dlg.GetPath())
self.loadFile(dlg.GetPath())
dlg.Destroy()
def OnQuit(self, evt):
self.Close(True)
def OnCheckAll(self, evt):
self.stc.spell.checkAll()
def OnCheckPage(self, evt):
self.stc.spell.checkCurrentPage()
def OnCheckSelection(self, evt):
self.stc.spell.checkSelection()
def OnClearSpelling(self, evt):
self.stc.spell.clearAll()
def OnChangeLanguage(self, evt):
id = evt.GetId()
normalized = locale.normalize(self.lang_id[id])
try:
locale.setlocale(locale.LC_ALL, normalized)
print("Changing locale %s, dictionary set to %s" % (normalized, self.lang_id[id]))
except locale.Error:
print("Can't set python locale to %s; dictionary set to %s" % (normalized, self.lang_id[id]))
self.stc.spell.setLanguage(self.lang_id[id])
self.stc.spell.clearAll()
self.stc.spell.checkCurrentPage()
app = wx.App(False)
frame = Frame(None, size=(600, -1))
need_sample = True
if len(sys.argv) > 1:
if not sys.argv[-1].startswith("-"):
frame.loadFile(sys.argv[-1])
need_sample = False
if need_sample:
frame.loadSample()
if '-d' in sys.argv:
frame.stc.spell._spelling_debug = True
frame.Show()
app.MainLoop() | PypiClean |
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/backends/iradio_storage.py |
# a Shoutcast radio media server for the Coherence UPnP Framework
# (heavily revamped from the existing IRadio plugin)
# Copyright 2007, Frank Scholz <[email protected]>
# Copyright 2009-2010, Jean-Michel Sizun <jmDOTsizunATfreeDOTfr>
from twisted.internet import defer, reactor
from twisted.python.failure import Failure
from twisted.web import server
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.DIDLLite import classChooser, Resource, DIDLElement
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
from coherence.backend import BackendItem, BackendStore, Container, LazyContainer, AbstractBackendStore
from urlparse import urlsplit
SHOUTCAST_WS_URL = 'http://www.shoutcast.com/sbin/newxml.phtml'
genre_families = {
# genre hierarchy created from http://forums.winamp.com/showthread.php?s=&threadid=303231
"Alternative": ["Adult Alternative", "Britpop", "Classic Alternative", "College", "Dancepunk", "Dream Pop", "Emo", "Goth", "Grunge", "Indie Pop", "Indie Rock", "Industrial", "Lo-Fi", "Modern Rock", "New Wave", "Noise Pop", "Post-Punk", "Power Pop", "Punk", "Ska", "Xtreme"],
"Blues": ["Acoustic Blues", "Chicago Blues", "Contemporary Blues", "Country Blues", "Delta Blues", "Electric Blues", "Cajun/Zydeco"],
"Classical": ["Baroque", "Chamber", "Choral", "Classical Period", "Early Classical", "Impressionist", "Modern", "Opera", "Piano", "Romantic", "Symphony"],
"Country": ["Alt-Country", "Americana", "Bluegrass", "Classic Country", "Contemporary Bluegrass", "Contemporary Country", "Honky Tonk", "Hot Country Hits", "Western"],
"Easy Listening": ["Exotica", "Light Rock", "Lounge", "Orchestral Pop", "Polka", "Space Age Pop"],
"Electronic": ["Acid House", "Ambient", "Big Beat", "Breakbeat", "Dance", "Demo", "Disco", "Downtempo", "Drum and Bass", "Electro", "Garage", "Hard House", "House", "IDM", "Remixes", "Jungle", "Progressive", "Techno", "Trance", "Tribal", "Trip Hop"],
"Folk": ["Alternative Folk", "Contemporary Folk", "Folk Rock", "New Acoustic", "Traditional Folk", "World Folk"],
"Themes": ["Adult", "Best Of", "Chill", "Experimental", "Female", "Heartache", "LGBT", "Love/Romance", "Party Mix", "Patriotic", "Rainy Day Mix", "Reality", "Sexy", "Shuffle", "Travel Mix", "Tribute", "Trippy", "Work Mix"],
"Rap": ["Alternative Rap", "Dirty South", "East Coast Rap", "Freestyle", "Hip Hop", "Gangsta Rap", "Mixtapes", "Old School", "Turntablism", "Underground Hip-Hop", "West Coast Rap"],
"Inspirational": ["Christian", "Christian Metal", "Christian Rap", "Christian Rock", "Classic Christian", "Contemporary Gospel", "Gospel", "Praise/Worship", "Sermons/Services", "Southern Gospel", "Traditional Gospel"],
"International": ["African", "Afrikaans", "Arabic", "Asian", "Brazilian", "Caribbean", "Celtic", "European", "Filipino", "Greek", "Hawaiian/Pacific", "Hindi", "Indian", "Japanese", "Jewish", "Klezmer", "Mediterranean", "Middle Eastern", "North American", "Polskie", "Polska", "Soca", "South American", "Tamil", "Worldbeat", "Zouk"],
"Jazz": ["Acid Jazz", "Avant Garde", "Big Band", "Bop", "Classic Jazz", "Cool Jazz", "Fusion", "Hard Bop", "Latin Jazz", "Smooth Jazz", "Swing", "Vocal Jazz", "World Fusion"],
"Latin": ["Bachata", "Banda", "Bossa Nova", "Cumbia", "Latin Dance", "Latin Pop", "Latin Rap/Hip-Hop", "Latin Rock", "Mariachi", "Merengue", "Ranchera", "Reggaeton", "Regional Mexican", "Salsa", "Tango", "Tejano", "Tropicalia"],
"Metal": ["Black Metal", "Classic Metal", "Extreme Metal", "Grindcore", "Hair Metal", "Heavy Metal", "Metalcore", "Power Metal", "Progressive Metal", "Rap Metal"],
"New Age": ["Environmental", "Ethnic Fusion", "Healing", "Meditation", "Spiritual"],
"Decades": ["30s", "40s", "50s", "60s", "70s", "80s", "90s"],
"Pop": ["Adult Contemporary", "Barbershop", "Bubblegum Pop", "Dance Pop", "Idols", "Oldies", "JPOP", "Soft Rock", "Teen Pop", "Top 40", "World Pop"],
"R&B/Urban": ["Classic R&B", "Contemporary R&B", "Doo Wop", "Funk", "Motown", "Neo-Soul", "Quiet Storm", "Soul", "Urban Contemporary", "Reggae", "Contemporary Reggae", "Dancehall", "Dub", "Pop-Reggae", "Ragga", "Rock Steady", "Reggae Roots"],
"Rock": ["Adult Album Alternative", "British Invasion", "Classic Rock", "Garage Rock", "Glam", "Hard Rock", "Jam Bands", "Piano Rock", "Prog Rock", "Psychedelic", "Rock & Roll", "Rockabilly", "Singer/Songwriter", "Surf"],
"Seasonal/Holiday": ["Anniversary", "Birthday", "Christmas", "Halloween", "Hanukkah", "Honeymoon", "Valentine", "Wedding", "Winter"],
"Soundtracks": ["Anime", "Bollywood", "Kids", "Original Score", "Showtunes", "Video Game Music"],
"Talk": ["Comedy", "Community", "Educational", "Government", "News", "Old Time Radio", "Other Talk", "Political", "Public Radio", "Scanner", "Spoken Word", "Sports", "Technology", "Hardcore", "Eclectic", "Instrumental"],
"Misc": [],
}
synonym_genres = {
# TODO: extend list with entries from "Misc" which are clearly the same
"24h": ["24h", "24hs"],
"80s": ["80s", "80er"],
"Acid Jazz": ["Acid", "Acid Jazz"],
"Adult": ["Adult", "Adulto"],
"Alternative": ["Alt", "Alternativa", "Alternative", "Alternativo"],
"Francais": ["Francais", "French"],
"Heavy Metal": ["Heavy Metal", "Heavy", "Metal"],
"Hip Hop": ["Hip", "Hop", "Hippop", "Hip Hop"],
"Islam": ["Islam", "Islamic"],
"Italy": ["Italia", "Italian", "Italiana", "Italo", "Italy"],
"Latina": ["Latin", "Latina", "Latino"],
}
useless_title_content = [
# TODO: extend list with title expressions which are clearly useless
" - [SHOUTcast.com]"
]
useless_genres = [
# TODO: extend list with entries from "Misc" which are clearly useless
"genres", "go", "here",
"Her", "Hbwa"
]
class PlaylistStreamProxy(utils.ReverseProxyUriResource, log.Loggable):
""" proxies audio streams published as M3U playlists (typically the case for shoutcast streams) """
logCategory = 'PlaylistStreamProxy'
stream_url = None
def __init__(self, uri):
log.Loggable.__init__(self)
self.stream_url = None
utils.ReverseProxyUriResource.__init__(self, uri)
def requestFinished(self, result):
""" self.connection is set in utils.ReverseProxyResource.render """
self.debug("ProxyStream requestFinished")
if self.connection is not None:
self.connection.transport.loseConnection()
def render(self, request):
if self.stream_url is None:
def got_playlist(result):
if result is None:
self.warning('Error to retrieve playlist - nothing retrieved')
return requestFinished(result)
result = result[0].split('\n')
for line in result:
if line.startswith('File1='):
self.stream_url = line[6:]
break
if self.stream_url is None:
self.warning('Error to retrieve playlist - inconsistent playlist file')
return requestFinished(result)
#self.resetUri(self.stream_url)
request.uri = self.stream_url
return self.render(request)
def got_error(error):
self.warning('Error to retrieve playlist - unable to retrieve data')
self.warning(error)
return None
playlist_url = self.uri
d = utils.getPage(playlist_url, timeout=20)
d.addCallbacks(got_playlist, got_error)
return server.NOT_DONE_YET
self.info("this is our render method %s %s %s %s", request.method, request.uri, request.client, request.clientproto)
self.info("render %s", request.getAllHeaders())
if request.clientproto == 'HTTP/1.1':
self.connection = request.getHeader('connection')
if self.connection:
tokens = map(str.lower, self.connection.split(' '))
if 'close' in tokens:
d = request.notifyFinish()
d.addBoth(self.requestFinished)
else:
d = request.notifyFinish()
d.addBoth(self.requestFinished)
return utils.ReverseProxyUriResource.render(self, request)
class IRadioItem(BackendItem):
logCategory = 'iradio'
def __init__(self, station_id, title, stream_url, mimetype):
BackendItem.__init__(self)
self.station_id = station_id
self.name = title
self.mimetype = mimetype
self.stream_url = stream_url
self.location = PlaylistStreamProxy(self.stream_url)
self.item = None
def replace_by (self, item):
# do nothing: we suppose the replacement item is the same
return
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.AudioBroadcast(upnp_id, upnp_parent_id, self.name)
res = Resource(self.url, 'http-get:*:%s:%s' % (self.mimetype,
';'.join(('DLNA.ORG_PN=MP3',
'DLNA.ORG_CI=0',
'DLNA.ORG_OP=01',
'DLNA.ORG_FLAGS=01700000000000000000000000000000'))))
res.size = 0 # None
self.item.res.append(res)
return self.item
def get_path(self):
self.url = self.store.urlbase + str(self.storage_id)
return self.url
def get_id(self):
return self.storage_id
class IRadioStore(AbstractBackendStore):
logCategory = 'iradio'
implements = ['MediaServer']
genre_parent_items = {} # will list the parent genre for every given genre
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name', 'iRadioStore')
self.refresh = int(kwargs.get('refresh', 60)) * 60
self.shoutcast_ws_url = self.config.get('genrelist', SHOUTCAST_WS_URL)
# set root item
root_item = Container(None, self.name)
self.set_root_item(root_item)
# set root-level genre family containers
# and populate the genre_parent_items dict from the family hierarchy information
for family, genres in genre_families.items():
family_item = self.append_genre(root_item, family)
if family_item is not None:
self.genre_parent_items[family] = root_item
for genre in genres:
self.genre_parent_items[genre] = family_item
# retrieve asynchronously the list of genres from the souhtcast server
# genres not already attached to a family will be attached to the "Misc" family
self.retrieveGenreList_attemptCount = 0
deferredRoot = self.retrieveGenreList()
# self.init_completed() # will be fired when the genre list is retrieved
def append_genre(self, parent, genre):
if genre in useless_genres:
return None
if synonym_genres.has_key(genre):
same_genres = synonym_genres[genre]
else:
same_genres = [genre]
title = genre.encode('utf-8')
family_item = LazyContainer(parent, title, genre, self.refresh, self.retrieveItemsForGenre, genres=same_genres, per_page=1)
# we will use a specific child items sorter
# in order to get the sub-genre containers first
def childs_sort(x, y):
if x.__class__ == y.__class__:
return cmp(x.name, y.name) # same class, we compare the names
else:
# the IRadioItem is deemed the lowest item class,
# other classes are compared by name (as usual)
if isinstance(x, IRadioItem):
return 1
elif isinstance(y, IRadioItem):
return -1
else:
return cmp(x.name, y.name)
family_item.sorting_method = childs_sort
parent.add_child(family_item, external_id=genre)
return family_item
def __repr__(self):
return self.__class__.__name__
def upnp_init(self):
self.current_connection_id = None
self.wmc_mapping = {'4': self.get_root_id()}
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:audio/mpeg:*',
'http-get:*:audio/x-scpls:*'],
default=True)
# populate a genre container (parent) with the sub-genre containers
# and corresponding IRadio (list retrieved from the shoutcast server)
def retrieveItemsForGenre (self, parent, genres, per_page=1, offset=0, page=0):
genre = genres[page]
if page < len(genres) - 1:
parent.childrenRetrievingNeeded = True
url = '%s?genre=%s' % (self.shoutcast_ws_url, genre)
if genre_families.has_key(genre):
family_genres = genre_families[genre]
for family_genre in family_genres:
self.append_genre(parent, family_genre)
def got_page(result):
self.info('connection to ShoutCast service successful for genre %s', genre)
result = utils.parse_xml(result, encoding='utf-8')
tunein = result.find('tunein')
if tunein != None:
tunein = tunein.get('base', '/sbin/tunein-station.pls')
prot, host_port, path, _, _ = urlsplit(self.shoutcast_ws_url)
tunein = prot + '://' + host_port + tunein
stations = {}
for stationResult in result.findall('station'):
mimetype = stationResult.get('mt')
station_id = stationResult.get('id')
bitrate = stationResult.get('br')
name = stationResult.get('name').encode('utf-8')
# remove useless substrings (eg. '[Shoutcast.com]' ) from title
for substring in useless_title_content:
name = name.replace(substring, "")
lower_name = name.lower()
url = '%s?id=%s' % (tunein, stationResult.get('id'))
sameStation = stations.get(lower_name)
if sameStation == None or bitrate > sameStation['bitrate']:
station = {'name': name,
'station_id': station_id,
'mimetype': mimetype,
'id': station_id,
'url': url,
'bitrate': bitrate}
stations[lower_name] = station
for station in stations.values():
station_id = station.get('station_id')
name = station.get('name')
url = station.get('url')
mimetype = station.get('mimetype')
item = IRadioItem(station_id, name, url, mimetype)
parent.add_child(item, external_id=station_id)
return True
def got_error(error):
self.warning("connection to ShoutCast service failed: %s", url)
self.debug("%r", error.getTraceback())
parent.childrenRetrievingNeeded = True # we retry
return Failure("Unable to retrieve stations for genre" % genre)
d = utils.getPage(url)
d.addCallbacks(got_page, got_error)
return d
# retrieve the whole list of genres from the shoutcast server
# to complete the population of the genre families classification
# (genres not previously classified are put into the "Misc" family)
# ...and fire mediaserver init completion
def retrieveGenreList(self):
def got_page(result):
if self.retrieveGenreList_attemptCount == 0:
self.info("Connection to ShoutCast service successful for genre listing")
else:
self.warning("Connection to ShoutCast service successful for genre listing after %d attempts.", self.retrieveGenreList_attemptCount)
result = utils.parse_xml(result, encoding='utf-8')
genres = {}
main_synonym_genre = {}
for main_genre, sub_genres in synonym_genres.items():
genres[main_genre] = sub_genres
for genre in sub_genres:
main_synonym_genre[genre] = main_genre
for genre in result.findall('genre'):
name = genre.get('name')
if name not in main_synonym_genre:
genres[name] = [name]
main_synonym_genre[name] = name
for main_genre, sub_genres in genres.items():
if not self.genre_parent_items.has_key(main_genre):
genre_families["Misc"].append(main_genre)
self.init_completed()
def got_error(error):
self.warning("connection to ShoutCast service for genre listing failed - Will retry! %r", error)
self.debug("%r", error.getTraceback())
self.retrieveGenreList_attemptCount += 1
reactor.callLater(5, self.retrieveGenreList)
d = utils.getPage(self.shoutcast_ws_url)
d.addCallback(got_page)
d.addErrback(got_error)
return d | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/readable-stream/lib/internal/streams/pipeline.js |
'use strict';
var eos;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
callback.apply(void 0, arguments);
};
}
var _require$codes = require('../../../errors').codes,
ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS,
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED;
function noop(err) {
// Rethrow the error if it exists to avoid swallowing it
if (err) throw err;
}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function destroyer(stream, reading, writing, callback) {
callback = once(callback);
var closed = false;
stream.on('close', function () {
closed = true;
});
if (eos === undefined) eos = require('./end-of-stream');
eos(stream, {
readable: reading,
writable: writing
}, function (err) {
if (err) return callback(err);
closed = true;
callback();
});
var destroyed = false;
return function (err) {
if (closed) return;
if (destroyed) return;
destroyed = true;
// request.destroy just do .end - .abort is what we want
if (isRequest(stream)) return stream.abort();
if (typeof stream.destroy === 'function') return stream.destroy();
callback(err || new ERR_STREAM_DESTROYED('pipe'));
};
}
function call(fn) {
fn();
}
function pipe(from, to) {
return from.pipe(to);
}
function popCallback(streams) {
if (!streams.length) return noop;
if (typeof streams[streams.length - 1] !== 'function') return noop;
return streams.pop();
}
function pipeline() {
for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) {
streams[_key] = arguments[_key];
}
var callback = popCallback(streams);
if (Array.isArray(streams[0])) streams = streams[0];
if (streams.length < 2) {
throw new ERR_MISSING_ARGS('streams');
}
var error;
var destroys = streams.map(function (stream, i) {
var reading = i < streams.length - 1;
var writing = i > 0;
return destroyer(stream, reading, writing, function (err) {
if (!error) error = err;
if (err) destroys.forEach(call);
if (reading) return;
destroys.forEach(call);
callback(error);
});
});
return streams.reduce(pipe);
}
module.exports = pipeline; | PypiClean |
/Homevee_Dev-0.0.0.0-py3-none-any.whl/Homevee/VoiceAssistant/Modules/ShoppingListModule/GetShoppingListModule.py | from Homevee.Functions import shopping_list
from Homevee.VoiceAssistant.Helper import generate_string
from Homevee.VoiceAssistant.Modules.ShoppingListModule import VoiceShoppingListModule
class VoiceGetShoppingListModule(VoiceShoppingListModule):
def get_pattern(self, db):
return [
['was', ['steht', 'ist'], ['einkaufszettel', 'einkaufsliste']],
['was', ['muss', 'soll'], 'kaufen'],
['wie', ['viel', 'viele'], ['muss', 'soll'], 'kaufen']
]
def get_label(self):
return "addshoppinglist"
def run_command(self, username, text, context, db):
return self.get_shopping_list(username, text, context, db)
def get_shopping_list(self, username, text, context, db):
items = self.find_items(text, db)
if len(items) is 0:
# Ganze Liste abfragen
items = shopping_list.get_shopping_list(username, db)['Item']
data = [
[['Das ', 'Folgendes '], 'steht ', 'auf ',
[[['der ', 'deiner '], 'Einkaufsliste'], [['dem ', 'deinem '], 'Einkaufszettel']], ': '],
[['Diese ', 'Folgende '], ['Artikel ', 'Produkte '], ['stehen ', 'sind '], 'auf ',
[[['der ', 'deiner '], 'Einkaufsliste'],
[['dem ', 'deinem '], 'Einkaufszettel']], ': ']
]
output = generate_string(data)
for i in range(0, len(items)):
item = items[i]
if len(items) > 1:
# Mehr als ein Element
if i is len(items) - 1:
# Letztes Element
output = output + " und "
elif i < len(items) - 1 and i > 0:
# Nicht erstes und nicht letztes Element
output = output + ", "
amount_string = str(item['amount'])
if(item['amount'] == 1):
amount_string = "ein"
output = output + amount_string + " mal " + item['item']
output += "."
return {'msg_speech': output, 'msg_text': output} | PypiClean |
/NuPlone-2.2.0.tar.gz/NuPlone-2.2.0/plonetheme/nuplone/static/bundle/chunks/482.cee2a3e4ce58de143547.min.js | (self.webpackChunknuplone=self.webpackChunknuplone||[]).push([[482],{96482:function(){Redactor.add("plugin","widget",{translations:{en:{widget:"Widget","widget-html-code":"Widget HTML Code"}},modals:{widget:'<form action=""> <div class="form-item"> <label for="modal-widget-input">## widget-html-code ##</label> <textarea id="modal-widget-input" name="widget" style="height: 200px;"></textarea> </div> </form>'},init:function(t){this.app=t,this.lang=t.lang,this.opts=t.opts,this.toolbar=t.toolbar,this.component=t.component,this.insertion=t.insertion,this.inspector=t.inspector,this.selection=t.selection},onmodal:{widget:{opened:function(t,e){if(e.getField("widget").focus(),this.$currentItem){var i=decodeURI(this.$currentItem.attr("data-widget-code"));e.getField("widget").val(i)}},insert:function(t,e){var i=e.getData();this._insert(i)}}},oncontextbar:function(t,e){var i=this.inspector.parse(t.target);if(!i.isFigcaption()&&i.isComponentType("widget")){var n=i.getComponent(),o={edit:{title:this.lang.get("edit"),api:"plugin.widget.open",args:n},remove:{title:this.lang.get("delete"),api:"plugin.widget.remove",args:n}};e.set(t,n,o,"bottom")}},onbutton:{widget:{observe:function(t){this._observeButton(t)}}},start:function(){var t={title:this.lang.get("widget"),api:"plugin.widget.open",observe:"widget"};this.toolbar.addButton("widget",t).setIcon('<i class="re-icon-widget"></i>')},open:function(){this.$currentItem=this._getCurrent();var t={title:this.lang.get("widget"),width:"600px",name:"widget",handle:"insert",commands:{insert:{title:this.$currentItem?this.lang.get("save"):this.lang.get("insert")},cancel:{title:this.lang.get("cancel")}}};this.app.api("module.modal.build",t)},remove:function(t){this.component.remove(t)},_getCurrent:function(){var t=this.selection.getCurrent(),e=this.inspector.parse(t);if(e.isComponentType("widget"))return this.component.build(e.getComponent())},_insert:function(t){if(this.app.api("module.modal.close"),""!==t.widget.trim()){var e=this._isHtmlString(t.widget)?t.widget:document.createTextNode(t.widget),i=this.component.create("widget",e);i.attr("data-widget-code",encodeURI(t.widget.trim())),this.insertion.insertHtml(i)}},_isHtmlString:function(t){return!("string"==typeof t&&!/^\s*<(\w+|!)[^>]*>/.test(t))},_observeButton:function(t){var e=this.selection.getCurrent();this.inspector.parse(e).isComponentType("table")?t.disable():t.enable()}})}}]);
//# sourceMappingURL=482.cee2a3e4ce58de143547.min.js.map | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/manager/io/file/load/parsers/collections.py | from typing import Union, List
import ast
AstDictOrList = Union[ast.Dict, ast.List]
DictOrList = Union[dict, list]
DictOrListOrNone = Union[DictOrList, None]
class AstDictListConverter(ast.NodeVisitor):
def __init__(self, convert_str_values: bool = False):
self.collections = []
self.convert_str_values = convert_str_values
def visit_Dict(self, node):
self.collections.append(
_ast_dict_to_dict(node, convert_str_values=self.convert_str_values)
)
# nested nodes being handled in function above
def visit_List(self, node):
self.collections.append(
_ast_list_to_list(node, convert_str_values=self.convert_str_values)
)
# nested nodes being handled in function above
def extract_collections_from_ast(ast_node: ast.AST, convert_str_values: bool = False) -> List[DictOrList]:
"""
returns a list of dicts or lists. Goes through ast, converting
ast.Dict to dict and ast.List to list, leaving the rest intact.
Returns a list of these created dicts and lists
Args:
ast_node:
Returns:
"""
adlc = AstDictListConverter(convert_str_values=convert_str_values)
adlc.visit(ast_node)
return adlc.collections
def extract_collection_from_ast(ast_node: ast.AST, convert_str_values: bool = False) -> DictOrListOrNone:
collections = extract_collections_from_ast(ast_node=ast_node, convert_str_values=convert_str_values)
if len(collections) == 0:
return None
if len(collections) > 1:
raise ValueError(f'expected to extract one assignment from ast. got {len(collections)} '
f'assigns: {collections}')
return collections[0]
def _ast_dict_or_list_to_dict_or_list(node: AstDictOrList, convert_str_values: bool = False) -> DictOrList:
if isinstance(node, ast.Dict):
return _ast_dict_to_dict(node, convert_str_values=convert_str_values)
elif isinstance(node, ast.List):
return _ast_list_to_list(node, convert_str_values=convert_str_values)
else:
raise ValueError(f'expected ast.Dict or ast.List. Got {node} of type {type(node)}')
def _ast_dict_to_dict(ast_dict: ast.Dict, convert_str_values: bool = False) -> dict:
out_dict = {}
for key, value in zip(ast_dict.keys, ast_dict.values):
key: ast.Str
key_string = key.s
if isinstance(value, (ast.Dict, ast.List)):
store_value = _ast_dict_or_list_to_dict_or_list(value, convert_str_values=convert_str_values)
else:
store_value = _convert_to_str_if_ast_str_and_desired(value, convert_desired=convert_str_values)
out_dict[key_string] = store_value
return out_dict
def _ast_list_to_list(ast_list: ast.List, convert_str_values: bool = False) -> list:
out_list = []
for item in ast_list.elts:
if isinstance(item, (ast.Dict, ast.List)):
store_item = _ast_dict_or_list_to_dict_or_list(item, convert_str_values=convert_str_values)
else:
store_item = _convert_to_str_if_ast_str_and_desired(item, convert_desired=convert_str_values)
out_list.append(store_item)
return out_list
def _convert_to_str_if_ast_str_and_desired(ast_node: ast.AST, convert_desired=False):
if not convert_desired:
return ast_node
if isinstance(ast_node, ast.Str):
return ast_node.s
return ast_node | PypiClean |
/MFD%20Floods-0.1.14.tar.gz/MFD Floods-0.1.14/bin/ogrmerge.py |
import glob
import os
import os.path
import sys
from osgeo import gdal
from osgeo import ogr
###############################################################
# Usage()
def Usage():
print('ogrmerge.py -o out_dsname src_dsname [src_dsname]*')
print(' [-f format] [-single] [-nln layer_name_template]')
print(' [-update | -overwrite_ds] [-append | -overwrite_layer]')
print(' [-src_geom_type geom_type_name[,geom_type_name]*]')
print(' [-dsco NAME=VALUE]* [-lco NAME=VALUE]*')
print(' [-s_srs srs_def] [-t_srs srs_def | -a_srs srs_def]')
print(' [-progress] [-skipfailures] [--help-general]')
print('')
print('Options specific to -single:')
print(' [-field_strategy FirstLayer|Union|Intersection]')
print(' [-src_layer_field_name name]')
print(' [-src_layer_field_content layer_name_template]')
print('')
print('* layer_name_template can contain the following substituable '
'variables:')
print(' {AUTO_NAME} : {DS_BASENAME}_{LAYER_NAME} if they are '
'different')
print(' or {LAYER_NAME} if they are identical')
print(' {DS_NAME} : name of the source dataset')
print(' {DS_BASENAME}: base name of the source dataset')
print(' {DS_INDEX} : index of the source dataset')
print(' {LAYER_NAME} : name of the source layer')
print(' {LAYER_INDEX}: index of the source layer')
return 1
def DoesDriverHandleExtension(drv, ext):
exts = drv.GetMetadataItem(gdal.DMD_EXTENSIONS)
return exts is not None and exts.lower().find(ext.lower()) >= 0
def GetExtension(filename):
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return ext
def GetOutputDriversFor(filename):
drv_list = []
ext = GetExtension(filename)
if ext.lower() == 'vrt':
return ['VRT']
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
if (drv.GetMetadataItem(gdal.DCAP_CREATE) is not None or
drv.GetMetadataItem(gdal.DCAP_CREATECOPY) is not None) and \
drv.GetMetadataItem(gdal.DCAP_VECTOR) is not None:
if ext and DoesDriverHandleExtension(drv, ext):
drv_list.append(drv.ShortName)
else:
prefix = drv.GetMetadataItem(gdal.DMD_CONNECTION_PREFIX)
if prefix is not None and filename.lower().startswith(prefix.lower()):
drv_list.append(drv.ShortName)
return drv_list
def GetOutputDriverFor(filename):
drv_list = GetOutputDriversFor(filename)
ext = GetExtension(filename)
if not drv_list:
if not ext:
return 'ESRI Shapefile'
else:
raise Exception("Cannot guess driver for %s" % filename)
elif len(drv_list) > 1:
print("Several drivers matching %s extension. Using %s" % (ext if ext else '', drv_list[0]))
return drv_list[0]
#############################################################################
def _VSIFPrintfL(f, s):
gdal.VSIFWriteL(s, 1, len(s), f)
#############################################################################
def EQUAL(x, y):
return x.lower() == y.lower()
#############################################################################
def _GetGeomType(src_geom_type_name):
if EQUAL(src_geom_type_name, "GEOMETRY"):
return ogr.wkbUnknown
try:
max_geom_type = ogr.wkbTriangle
except:
# GDAL 2.1 compat
max_geom_type = ogr.wkbSurface
for i in range(max_geom_type + 1):
if EQUAL(src_geom_type_name,
ogr.GeometryTypeToName(i).replace(' ', '')):
return i
return None
#############################################################################
def _Esc(x):
return gdal.EscapeString(x, gdal.CPLES_XML)
class XMLWriter(object):
def __init__(self, f):
self.f = f
self.inc = 0
self.elements = []
def _indent(self):
return ' ' * self.inc
def open_element(self, name, attrs=None):
xml_attrs = ''
if attrs is not None:
for key in attrs:
xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key].encode('utf-8')))
x = '%s<%s%s>\n' % (self._indent(), name, xml_attrs)
x = x.encode('utf-8')
_VSIFPrintfL(self.f, x)
self.inc = self.inc + 1
self.elements.append(name)
def write_element_value(self, name, value, attrs=None):
xml_attrs = ''
if attrs is not None:
for key in attrs:
xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key].encode('utf-8')))
x = '%s<%s%s>%s</%s>\n' % (self._indent(), name, xml_attrs,
_Esc(value.encode('utf-8')), name)
x = x.encode('utf-8')
_VSIFPrintfL(self.f, x)
def close_element(self, closing_name=None):
self.inc = self.inc - 1
name = self.elements[-1]
if closing_name is not None:
assert name == closing_name
self.elements = self.elements[0:-1]
_VSIFPrintfL(self.f, '%s</%s>\n' % (self._indent(), name))
###############################################################
# process()
def process(argv, progress=None, progress_arg=None):
if not argv:
return Usage()
dst_filename = None
output_format = None
src_datasets = []
overwrite_ds = False
overwrite_layer = False
update = False
append = False
single_layer = False
layer_name_template = None
skip_failures = False
src_geom_types = []
field_strategy = None
src_layer_field_name = None
src_layer_field_content = None
a_srs = None
s_srs = None
t_srs = None
dsco = []
lco = []
i = 0
while i < len(argv):
arg = argv[i]
if (arg == '-f' or arg == '-of') and i + 1 < len(argv):
i = i + 1
output_format = argv[i]
elif arg == '-o' and i + 1 < len(argv):
i = i + 1
dst_filename = argv[i]
elif arg == '-progress':
progress = ogr.TermProgress_nocb
progress_arg = None
elif arg == '-q' or arg == '-quiet':
pass
elif arg[0:5] == '-skip':
skip_failures = True
elif arg == '-update':
update = True
elif arg == '-overwrite_ds':
overwrite_ds = True
elif arg == '-overwrite_layer':
overwrite_layer = True
update = True
elif arg == '-append':
append = True
update = True
elif arg == '-single':
single_layer = True
elif arg == '-a_srs' and i + 1 < len(argv):
i = i + 1
a_srs = argv[i]
elif arg == '-s_srs' and i + 1 < len(argv):
i = i + 1
s_srs = argv[i]
elif arg == '-t_srs' and i + 1 < len(argv):
i = i + 1
t_srs = argv[i]
elif arg == '-nln' and i + 1 < len(argv):
i = i + 1
layer_name_template = argv[i]
elif arg == '-field_strategy' and i + 1 < len(argv):
i = i + 1
field_strategy = argv[i]
elif arg == '-src_layer_field_name' and i + 1 < len(argv):
i = i + 1
src_layer_field_name = argv[i]
elif arg == '-src_layer_field_content' and i + 1 < len(argv):
i = i + 1
src_layer_field_content = argv[i]
elif arg == '-dsco' and i + 1 < len(argv):
i = i + 1
dsco.append(argv[i])
elif arg == '-lco' and i + 1 < len(argv):
i = i + 1
lco.append(argv[i])
elif arg == '-src_geom_type' and i + 1 < len(argv):
i = i + 1
src_geom_type_names = argv[i].split(',')
for src_geom_type_name in src_geom_type_names:
src_geom_type = _GetGeomType(src_geom_type_name)
if src_geom_type is None:
print('ERROR: Unrecognized geometry type: %s' %
src_geom_type_name)
return 1
src_geom_types.append(src_geom_type)
elif arg[0] == '-':
print('ERROR: Unrecognized argument : %s' % arg)
return Usage()
else:
if '*' in arg:
if sys.version_info < (3,0,0):
src_datasets += [fn.decode(sys.getfilesystemencoding()) for fn in glob.glob(arg)]
else:
src_datasets += glob.glob(arg)
else:
src_datasets.append(arg)
i = i + 1
if dst_filename is None:
print('Missing -o')
return 1
if update:
if output_format is not None:
print('ERROR: -f incompatible with -update')
return 1
if dsco:
print('ERROR: -dsco incompatible with -update')
return 1
output_format = ''
else:
if output_format is None:
output_format = GetOutputDriverFor(dst_filename)
if src_layer_field_content is None:
src_layer_field_content = '{AUTO_NAME}'
elif src_layer_field_name is None:
src_layer_field_name = 'source_ds_lyr'
if not single_layer and output_format == 'ESRI Shapefile' and \
dst_filename.lower().endswith('.shp'):
print('ERROR: Non-single layer mode incompatible with non-directory '
'shapefile output')
return 1
if not src_datasets:
print('ERROR: No source datasets')
return 1
if layer_name_template is None:
if single_layer:
layer_name_template = 'merged'
else:
layer_name_template = '{AUTO_NAME}'
vrt_filename = None
if not EQUAL(output_format, 'VRT'):
dst_ds = gdal.OpenEx(dst_filename, gdal.OF_VECTOR | gdal.OF_UPDATE)
if dst_ds is not None:
if not update and not overwrite_ds:
print('ERROR: Destination dataset already exists, ' +
'but -update nor -overwrite_ds are specified')
return 1
if overwrite_ds:
drv = dst_ds.GetDriver()
dst_ds = None
if drv.GetDescription() == 'OGR_VRT':
# We don't want to destroy the sources of the VRT
gdal.Unlink(dst_filename)
else:
drv.Delete(dst_filename)
elif update:
print('ERROR: Destination dataset does not exist')
return 1
if dst_ds is None:
drv = gdal.GetDriverByName(output_format)
if drv is None:
print('ERROR: Invalid driver: %s' % output_format)
return 1
dst_ds = drv.Create(
dst_filename, 0, 0, 0, gdal.GDT_Unknown, dsco)
if dst_ds is None:
return 1
vrt_filename = '/vsimem/_ogrmerge_.vrt'
else:
if gdal.VSIStatL(dst_filename) and not overwrite_ds:
print('ERROR: Destination dataset already exists, ' +
'but -overwrite_ds are specified')
return 1
vrt_filename = dst_filename
f = gdal.VSIFOpenL(vrt_filename, 'wb')
if f is None:
print('ERROR: Cannot create %s' % vrt_filename)
return 1
writer = XMLWriter(f)
writer.open_element('OGRVRTDataSource')
if single_layer:
ogr_vrt_union_layer_written = False
for src_ds_idx, src_dsname in enumerate(src_datasets):
src_ds = ogr.Open(src_dsname)
if src_ds is None:
print('ERROR: Cannot open %s' % src_dsname)
if skip_failures:
continue
gdal.VSIFCloseL(f)
gdal.Unlink(vrt_filename)
return 1
for src_lyr_idx, src_lyr in enumerate(src_ds):
if src_geom_types:
gt = ogr.GT_Flatten(src_lyr.GetGeomType())
if gt not in src_geom_types:
continue
if not ogr_vrt_union_layer_written:
ogr_vrt_union_layer_written = True
writer.open_element('OGRVRTUnionLayer',
attrs={'name': layer_name_template})
if src_layer_field_name is not None:
writer.write_element_value('SourceLayerFieldName',
src_layer_field_name)
if field_strategy is not None:
writer.write_element_value('FieldStrategy',
field_strategy)
layer_name = src_layer_field_content
src_lyr_name = src_lyr.GetName()
try:
src_lyr_name = src_lyr_name.decode('utf-8')
except AttributeError:
pass
basename = None
if os.path.exists(src_dsname):
basename = os.path.basename(src_dsname)
if '.' in basename:
basename = '.'.join(basename.split(".")[0:-1])
if basename == src_lyr_name:
layer_name = layer_name.replace('{AUTO_NAME}', basename)
elif basename is None:
layer_name = layer_name.replace(
'{AUTO_NAME}',
'Dataset%d_%s' % (src_ds_idx, src_lyr_name))
else:
layer_name = layer_name.replace(
'{AUTO_NAME}', basename + '_' + src_lyr_name)
if basename is not None:
layer_name = layer_name.replace('{DS_BASENAME}', basename)
else:
layer_name = layer_name.replace('{DS_BASENAME}',
src_dsname)
layer_name = layer_name.replace('{DS_NAME}', '%s' %
src_dsname)
layer_name = layer_name.replace('{DS_INDEX}', '%d' %
src_ds_idx)
layer_name = layer_name.replace('{LAYER_NAME}',
src_lyr_name)
layer_name = layer_name.replace('{LAYER_INDEX}', '%d' %
src_lyr_idx)
if t_srs is not None:
writer.open_element('OGRVRTWarpedLayer')
writer.open_element('OGRVRTLayer',
attrs={'name': layer_name})
attrs = {}
if EQUAL(output_format, 'VRT') and \
os.path.exists(src_dsname) and \
not os.path.isabs(src_dsname) and \
'/' not in vrt_filename and \
'\\' not in vrt_filename:
attrs['relativeToVRT'] = '1'
if single_layer:
attrs['shared'] = '1'
writer.write_element_value('SrcDataSource', src_dsname,
attrs=attrs)
writer.write_element_value('SrcLayer', src_lyr.GetName())
if a_srs is not None:
writer.write_element_value('LayerSRS', a_srs)
writer.close_element('OGRVRTLayer')
if t_srs is not None:
if s_srs is not None:
writer.write_element_value('SrcSRS', s_srs)
writer.write_element_value('TargetSRS', t_srs)
writer.close_element('OGRVRTWarpedLayer')
if ogr_vrt_union_layer_written:
writer.close_element('OGRVRTUnionLayer')
else:
for src_ds_idx, src_dsname in enumerate(src_datasets):
src_ds = ogr.Open(src_dsname)
if src_ds is None:
print('ERROR: Cannot open %s' % src_dsname)
if skip_failures:
continue
gdal.VSIFCloseL(f)
gdal.Unlink(vrt_filename)
return 1
for src_lyr_idx, src_lyr in enumerate(src_ds):
if src_geom_types:
gt = ogr.GT_Flatten(src_lyr.GetGeomType())
if gt not in src_geom_types:
continue
src_lyr_name = src_lyr.GetName()
try:
src_lyr_name = src_lyr_name.decode('utf-8')
except AttributeError:
pass
layer_name = layer_name_template
basename = None
if os.path.exists(src_dsname):
basename = os.path.basename(src_dsname)
if '.' in basename:
basename = '.'.join(basename.split(".")[0:-1])
if basename == src_lyr_name:
layer_name = layer_name.replace('{AUTO_NAME}', basename)
elif basename is None:
layer_name = layer_name.replace(
'{AUTO_NAME}',
'Dataset%d_%s' % (src_ds_idx, src_lyr_name))
else:
layer_name = layer_name.replace(
'{AUTO_NAME}', basename + '_' + src_lyr_name)
if basename is not None:
layer_name = layer_name.replace('{DS_BASENAME}', basename)
elif '{DS_BASENAME}' in layer_name:
if skip_failures:
if '{DS_INDEX}' not in layer_name:
layer_name = layer_name.replace(
'{DS_BASENAME}', 'Dataset%d' % src_ds_idx)
else:
print('ERROR: Layer name template %s '
'includes {DS_BASENAME} '
'but %s is not a file' %
(layer_name_template, src_dsname))
gdal.VSIFCloseL(f)
gdal.Unlink(vrt_filename)
return 1
layer_name = layer_name.replace('{DS_NAME}', '%s' %
src_dsname)
layer_name = layer_name.replace('{DS_INDEX}', '%d' %
src_ds_idx)
layer_name = layer_name.replace('{LAYER_NAME}',
src_lyr_name)
layer_name = layer_name.replace('{LAYER_INDEX}', '%d' %
src_lyr_idx)
if t_srs is not None:
writer.open_element('OGRVRTWarpedLayer')
writer.open_element('OGRVRTLayer',
attrs={'name': layer_name})
attrs = {}
if EQUAL(output_format, 'VRT') and \
os.path.exists(src_dsname) and \
not os.path.isabs(src_dsname) and \
'/' not in vrt_filename and \
'\\' not in vrt_filename:
attrs['relativeToVRT'] = '1'
if single_layer:
attrs['shared'] = '1'
writer.write_element_value('SrcDataSource', src_dsname,
attrs=attrs)
writer.write_element_value('SrcLayer', src_lyr_name)
if a_srs is not None:
writer.write_element_value('LayerSRS', a_srs)
writer.close_element('OGRVRTLayer')
if t_srs is not None:
if s_srs is not None:
writer.write_element_value('SrcSRS', s_srs)
writer.write_element_value('TargetSRS', t_srs)
writer.close_element('OGRVRTWarpedLayer')
writer.close_element('OGRVRTDataSource')
gdal.VSIFCloseL(f)
ret = 0
if not EQUAL(output_format, 'VRT'):
accessMode = None
if append:
accessMode = 'append'
elif overwrite_layer:
accessMode = 'overwrite'
ret = gdal.VectorTranslate(dst_ds, vrt_filename,
accessMode=accessMode,
layerCreationOptions=lco,
skipFailures=skip_failures,
callback=progress,
callback_data=progress_arg)
if ret == 1:
ret = 0
else:
ret = 1
gdal.Unlink(vrt_filename)
return ret
###############################################################
# Entry point
def main():
argv = sys.argv
if sys.version_info < (3,0,0):
argv = [fn.decode(sys.getfilesystemencoding()) for fn in argv]
argv = ogr.GeneralCmdLineProcessor(argv)
if argv is None:
return 1
return process(argv[1:])
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/Hydro-0.1.7.tar.gz/Hydro-0.1.7/src/hydro/connectors/base_classes.py | __author__ = 'moshebasanchig'
import pandas as pd
from hydro.exceptions import HydroException
DSN = 'dsn'
CONNECTION_STRING = 'connection string'
class ConnectorBase(object):
_conn = None
def __init__(self):
self.logger = None
def _verify_connection_definitions(self):
raise HydroException("Not implemented")
def _connect(self):
raise HydroException("Not implemented")
def _close(self):
raise HydroException('Not implemented')
def execute(self):
raise HydroException('Not implemented')
def close(self):
self.logger.debug('Closing connection')
self._close()
self._conn = None
return True
def connect(self):
if not self._conn:
self.logger.debug('Connection does not exist, Verify definitions of connection')
self._verify_connection_definitions()
self._connect()
return True
def execute(self, command):
self.connect()
try:
self.logger.debug('Executing command: {0}'.format(command))
res = self._execute(command)
return res
except Exception, err:
self.logger.error('Error: {0}'.format(err.message))
self.close()
raise err
def set_logger(self, logger):
self.logger = logger
class DBBaseConnector(ConnectorBase):
"""
implementation of DB base connector, base function that need to be implemented are _connect, _close and _execute
"""
def __init__(self, conn_definitions):
self._conn = None
self._conf_defs = conn_definitions
super(DBBaseConnector, self).__init__()
def _convert_results_to_dataframe(self, cursor):
"""
This is deprecated - use SQLAlchemy and pandas' read_sql method instead
"""
rows = cursor.fetchall()
columns = [col[0] for col in cursor.description]
if isinstance(rows, tuple):
rows = list(rows)
data = pd.DataFrame.from_records(rows, columns=columns)
return data
def _verify_connection_definitions(self):
"""
Verifies if connection configuration is complete
"""
if self._conf_defs['connection_type'] in [DSN, CONNECTION_STRING]:
if not self._conf_defs['connection_string']:
raise HydroException('Connection dsn is Null')
else:
for att in ('db_user', 'db_password', 'connection_string'):
if not self._conf_defs.get(att):
raise HydroException('Connection {0} is Null'.format(att))
def _execute(self, command):
"""
base class
"""
cursor = self._conn.cursor()
cursor.execute(command)
result = self._convert_results_to_dataframe(cursor)
cursor.close()
return result
def _close(self):
self._conn.close() | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/formats/ttkit.py | """Translate Toolkit based file-format wrappers."""
import importlib
import inspect
import os
import re
import subprocess
from typing import Callable, List, Optional, Tuple, Union
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from lxml import etree
from lxml.etree import XMLSyntaxError
from translate.misc import quote
from translate.misc.multistring import multistring
from translate.misc.xml_helpers import setXMLspace
from translate.storage.base import TranslationStore
from translate.storage.csvl10n import csv, csvunit
from translate.storage.jsonl10n import BaseJsonUnit, JsonFile
from translate.storage.lisa import LISAfile
from translate.storage.po import pofile, pounit
from translate.storage.poxliff import PoXliffFile
from translate.storage.resx import RESXFile
from translate.storage.tbx import tbxfile, tbxunit
from translate.storage.ts2 import tsfile, tsunit
from translate.storage.xliff import ID_SEPARATOR, xlifffile, xliffunit
import weblate.utils.version
from weblate.checks.flags import Flags
from weblate.formats.base import (
BilingualUpdateMixin,
TranslationFormat,
TranslationUnit,
UpdateError,
)
from weblate.lang.data import FORMULA_WITH_ZERO, ZERO_PLURAL_TYPES
from weblate.lang.models import Plural
from weblate.trans.util import (
get_clean_env,
get_string,
rich_to_xliff_string,
xliff_string_to_rich,
)
from weblate.utils.errors import report_error
from weblate.utils.state import STATE_APPROVED, STATE_FUZZY, STATE_TRANSLATED
LOCATIONS_RE = re.compile(r"^([+-]|.*, [+-]|.*:[+-])")
PO_DOCSTRING_LOCATION = re.compile(r":docstring of [a-zA-Z0-9._]+:[0-9]+")
SUPPORTS_FUZZY = (pounit, tsunit, csvunit)
XLIFF_FUZZY_STATES = {"new", "needs-translation", "needs-adaptation", "needs-l10n"}
class TTKitUnit(TranslationUnit):
@cached_property
def locations(self):
"""Return a comma-separated list of locations."""
return ", ".join(x for x in self.mainunit.getlocations() if x is not None)
@cached_property
def source(self):
"""Return source string from a Translate Toolkit unit."""
if self.template is not None:
return get_string(self.template.target)
return get_string(self.unit.source)
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
if self.parent.is_template:
return get_string(self.template.target)
return ""
return get_string(self.unit.target)
@cached_property
def context(self):
"""Return context of message.
In some cases we have to use ID here to make all the back-ends consistent.
"""
return self.mainunit.getcontext()
@cached_property
def notes(self):
"""Return notes or notes from units."""
comment = ""
if self.unit is not None:
comment = self.unit.getnotes()
if self.template is not None:
# Avoid duplication in case template has same notes
template_comment = self.template.getnotes()
if template_comment != comment:
comment = template_comment + "\n" + comment
return comment
def is_translated(self):
"""Check whether unit is translated."""
if self.unit is None:
return False
return self.unit.istranslated()
def is_fuzzy(self, fallback=False):
"""Check whether unit needs editing."""
if self.unit is None:
return fallback
# Most of the formats do not support this, but they
# happily return False
if isinstance(self.unit, SUPPORTS_FUZZY):
return self.unit.isfuzzy()
return fallback
def has_content(self):
"""Check whether unit has content."""
return (
not self.mainunit.isheader()
and not self.mainunit.isblank()
and not self.mainunit.isobsolete()
)
def is_readonly(self):
return not self.mainunit.istranslatable()
def set_target(self, target: Union[str, List[str]]):
"""Set translation unit target."""
self._invalidate_target()
if isinstance(target, list):
target = multistring(target)
self.unit.target = target
def set_state(self, state):
"""Set fuzzy /approved flag on translated unit."""
if "flags" in self.__dict__:
del self.__dict__["flags"]
self.unit.markfuzzy(state == STATE_FUZZY)
if hasattr(self.unit, "markapproved"):
self.unit.markapproved(state == STATE_APPROVED)
@cached_property
def flags(self):
"""Return flags from unit.
We currently extract maxwidth attribute.
"""
flags = Flags()
if hasattr(self.unit, "xmlelement"):
flags.merge(self.unit.xmlelement)
if hasattr(self.template, "xmlelement"):
flags.merge(self.template.xmlelement)
return flags.format()
class KeyValueUnit(TTKitUnit):
@cached_property
def source(self):
"""Return source string from a Translate Toolkit unit."""
if self.template is not None:
return get_string(self.template.source)
return get_string(self.unit.name)
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
return get_string(self.unit.source)
@cached_property
def context(self):
"""Return context of message.
In some cases we have to use ID here to make all the back-ends consistent.
"""
context = super().context
if not context:
return self.mainunit.getid()
return context
def is_translated(self):
"""Check whether unit is translated."""
if self.unit is None:
return False
# The hasattr check here is needed for merged storages
# where template is different kind than translations
if hasattr(self.unit, "value"):
return not self.unit.isfuzzy() and self.unit.value != ""
return self.unit.istranslated()
def set_target(self, target: Union[str, List[str]]):
"""Set translation unit target."""
super().set_target(target)
# Propagate to value so that searializing of empty values works correctly
if not target:
self.unit.value = self.unit.target
class TTKitFormat(TranslationFormat):
unit_class = TTKitUnit
loader = ("", "")
set_context_bilingual = True
# Use settarget/setsource to set language as well
use_settarget = False
force_encoding = None
def __init__(
self,
storefile,
template_store=None,
language_code: Optional[str] = None,
source_language: Optional[str] = None,
is_template: bool = False,
):
super().__init__(
storefile,
template_store=template_store,
language_code=language_code,
is_template=is_template,
source_language=source_language,
)
# Set language (needed for some which do not include this)
if language_code is not None and self.store.gettargetlanguage() is None:
# This gets already native language code, so no conversion is needed
self.store.settargetlanguage(language_code)
if source_language is not None and self.store.getsourcelanguage() is None:
# This gets already native language code, so no conversion is needed
self.store.setsourcelanguage(source_language)
@staticmethod
def serialize(store):
"""Serialize given Translate Toolkit store."""
return bytes(store)
def fixup(self, store):
"""Perform optional fixups on store."""
if self.force_encoding is not None:
store.encoding = self.force_encoding
return
def load(self, storefile, template_store):
"""Load file using defined loader."""
if isinstance(storefile, TranslationStore):
# Used by XLSX writer
return storefile
return self.parse_store(storefile)
@classmethod
def get_class(cls):
"""Return class for handling this module."""
# Direct class
if inspect.isclass(cls.loader):
return cls.loader
# Tuple style loader, import from translate toolkit
module_name, class_name = cls.loader
if "." not in module_name:
module_name = f"translate.storage.{module_name}"
module = importlib.import_module(module_name)
# Get the class
return getattr(module, class_name)
@staticmethod
def get_class_kwargs():
return {}
def get_store_instance(self, **kwargs):
kwargs.update(self.get_class_kwargs())
store = self.get_class()(**kwargs)
# Apply possible fixups
self.fixup(store)
return store
def parse_store(self, storefile):
"""Parse the store."""
store = self.get_store_instance()
# Read the content
if isinstance(storefile, str):
with open(storefile, "rb") as handle:
content = handle.read()
else:
content = storefile.read()
# Parse the content
store.parse(content)
return store
def add_unit(self, ttkit_unit):
"""Add new unit to underlying store."""
if isinstance(self.store, LISAfile):
# LISA based stores need to know this
self.store.addunit(ttkit_unit, new=True)
else:
self.store.addunit(ttkit_unit)
def save_content(self, handle):
"""Store content to file."""
self.store.serialize(handle)
def save(self):
"""Save underlying store to disk."""
self.save_atomic(self.storefile, self.save_content)
@classmethod
def mimetype(cls):
"""Return most common media type for format."""
return cls.get_class().Mimetypes[0]
@classmethod
def extension(cls):
"""Return most common file extension for format."""
return cls.get_class().Extensions[0]
def is_valid(self):
"""Check whether store seems to be valid.
In some cases Translate Toolkit happily "parses" the file, even though it really
did not do so (e.g. gettext parser on a random textfile).
"""
if not super().is_valid():
return False
if self.store is None:
return False
return True
def construct_unit(self, source: str):
if self.use_settarget and self.source_language:
# Setting source on LISAunit will make it use default language
unit = self.store.UnitClass(None)
unit.setsource(source, self.source_language)
else:
unit = self.store.UnitClass(source)
# Needed by some formats (Android) to set target
unit._store = self.store
return unit
def create_unit_key(
self, key: str, source: Union[str, List[str], multistring]
) -> Union[str, multistring]:
return key
def create_unit(
self,
key: str,
source: Union[str, List[str]],
target: Optional[Union[str, List[str]]] = None,
):
# Make sure target is a string
if target is None:
target = ""
# Process source
if isinstance(source, list):
context = source[0]
if len(source) == 1:
# Single string passed plain
source = context
else:
# List passed as multistirng
source = multistring(source)
else:
# This is string
context = source
# Process target
if isinstance(target, list):
if len(target) == 1:
target = target[0]
else:
target = multistring(target)
# Build the unit
unit = self.construct_unit(context)
if self.is_template or self.template_store:
# Monolingual translation
unit.setid(key)
target = source
source = self.create_unit_key(key, source)
else:
# Bilingual translation
if isinstance(unit, (tbxunit, xliffunit)) and key:
unit.setid(key)
elif self.set_context_bilingual and key:
unit.setcontext(key)
elif isinstance(unit, BaseJsonUnit):
unit.setid(context)
if self.use_settarget and self.source_language:
unit.setsource(source, self.source_language)
else:
unit.source = source
if self.use_settarget and self.language_code:
unit.settarget(target, self.language_code)
else:
unit.target = target
return unit
def untranslate_unit(self, unit, plural, fuzzy: bool):
if hasattr(unit, "markapproved"):
# Xliff only
unit.markapproved(False)
else:
unit.markfuzzy(fuzzy)
if unit.hasplural():
unit.target = [""] * plural.number
else:
unit.target = ""
def untranslate_store(self, language, fuzzy: bool = False):
"""Remove translations from Translate Toolkit store."""
self.store.settargetlanguage(self.get_language_code(language.code))
plural = language.plural
for unit in self.store.units:
if unit.istranslatable() and (unit.istranslated() or unit.isfuzzy()):
self.untranslate_unit(unit, plural, fuzzy)
@classmethod
def get_new_file_content(cls):
result = cls.new_translation
if isinstance(result, str):
result = result.encode()
return result
@classmethod
def create_new_file(
cls,
filename: str,
language: str,
base: str,
callback: Optional[Callable] = None,
):
"""Handle creation of new translation file."""
if base:
# Parse file
store = cls(base)
if callback:
callback(store)
store.untranslate_store(language)
store.store.savefile(filename)
elif cls.new_translation is None:
raise ValueError("Not supported")
else:
with open(filename, "wb") as output:
output.write(cls.get_new_file_content())
@classmethod
def is_valid_base_for_new(
cls,
base: str,
monolingual: bool,
errors: Optional[List] = None,
fast: bool = False,
) -> bool:
"""Check whether base is valid."""
if not base:
if cls.create_empty_bilingual:
return True
return monolingual and cls.new_translation is not None
try:
if not fast:
cls(base)
return os.path.exists(base)
except Exception as exception:
if errors is not None:
errors.append(exception)
report_error(cause="File-parsing error")
return False
@property
def all_store_units(self):
"""Wrapper for all store unit filtering out obsolete."""
return (
unit
for unit in self.store.units
if not unit.isobsolete() and not unit.isheader()
)
def delete_unit(self, ttkit_unit) -> Optional[str]:
self.store.removeunit(ttkit_unit)
class PropertiesUnit(KeyValueUnit):
"""Wrapper for properties-based units."""
@cached_property
def locations(self):
"""Return a comma-separated list of locations."""
return ""
@cached_property
def source(self):
"""Return source string from a Translate Toolkit unit."""
if self.template is not None:
return get_string(self.template.source)
# Need to decode property encoded string
return get_string(quote.propertiesdecode(self.unit.name))
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
return get_string(self.unit.target or self.unit.source)
class PoUnit(TTKitUnit):
"""Wrapper for gettext PO unit."""
def set_state(self, state):
"""Set fuzzy /approved flag on translated unit."""
super().set_state(state)
if state != STATE_FUZZY:
self.unit.prev_msgid = []
self.unit.prev_msgid_plural = []
self.unit.prev_msgctxt = []
@cached_property
def flags(self):
"""Return flags or typecomments from units."""
flags = Flags(*self.mainunit.typecomments)
flags.remove({"fuzzy"})
return flags.format()
@cached_property
def previous_source(self):
"""Return previous message source if there was any."""
if not self.is_fuzzy():
return ""
return get_string(self.unit.prev_source)
@cached_property
def locations(self):
"""
Return comma separated list of locations.
Here we clean up Sphinx-generated "docstring of ..." part.
"""
locations = " ".join(self.mainunit.getlocations())
locations = PO_DOCSTRING_LOCATION.sub("", locations)
return ", ".join(locations.split())
class PoMonoUnit(PoUnit):
@cached_property
def context(self):
"""Return context of message.
In some cases we have to use ID here to make all the backends consistent.
"""
# Monolingual PO files
if self.template is not None:
context = self.template.getcontext()
source = self.template.source
if source and context:
return f"{context}.{source}"
return source or context
return super().context
@cached_property
def notes(self):
result = []
notes = super().notes
if notes:
result.append(notes)
# Use unit context as note only in case source is present, otherwise
# it is used as a context (see above)
if self.template is not None and self.template.source:
context = self.template.getcontext()
if context:
result.append(context)
return "\n".join(result)
def set_target(self, target: Union[str, List[str]]):
"""Set translation unit target."""
# Add blank msgid_plural to store plural
if isinstance(target, (list, multistring)) and not self.unit.hasplural():
self.unit.msgid_plural = ['""']
super().set_target(target)
class XliffUnit(TTKitUnit):
"""Wrapper unit for XLIFF.
XLIFF is special in Translate Toolkit — it uses locations for what
is context in other formats.
"""
def _invalidate_target(self):
"""Invalidate target cache."""
super()._invalidate_target()
if "xliff_node" in self.__dict__:
del self.__dict__["xliff_node"]
def get_xliff_node(self):
try:
return self.unit.getlanguageNode(lang=None, index=1)
except AttributeError:
return None
@cached_property
def xliff_node(self):
return self.get_xliff_node()
@property
def xliff_state(self):
node = self.xliff_node
if node is None:
return None
return node.get("state", None)
@cached_property
def context(self):
"""Return context of message.
Use resname if available as it usually is more interesting for the translator
than ID.
"""
resname = self.mainunit.xmlelement.get("resname")
if resname:
return resname
return self.mainunit.getid().replace(ID_SEPARATOR, "///")
@cached_property
def locations(self):
"""Return comma separated list of locations."""
return ""
def is_translated(self):
"""Check whether unit is translated.
We replace Translate Toolkit logic here as the isfuzzy is pretty much wrong
there, see is_fuzzy docs.
"""
return bool(self.target)
def is_fuzzy(self, fallback=False):
"""Check whether unit needs edit.
The isfuzzy on XLIFF is really messing up the "approved" flag with "fuzzy"
flag, leading to various problems.
That's why we handle it on our own.
"""
return self.target and self.xliff_state in XLIFF_FUZZY_STATES
def set_state(self, state):
"""Set fuzzy /approved flag on translated unit."""
self.unit.markapproved(state == STATE_APPROVED)
if state == STATE_FUZZY:
# Always set state for fuzzy
self.xliff_node.set("state", "needs-translation")
elif state == STATE_TRANSLATED:
# Always set state for translated
self.xliff_node.set("state", "translated")
elif state == STATE_APPROVED:
self.xliff_node.set("state", "final")
elif self.xliff_state:
# Only update state if it exists
self.xliff_node.set("state", "new")
def is_approved(self, fallback=False):
"""Check whether unit is approved."""
if self.unit is None:
return fallback
if hasattr(self.unit, "isapproved"):
return self.unit.isapproved()
return fallback
def has_content(self):
"""Check whether unit has content.
For some reason, blank string does not mean non-translatable unit in XLIFF, so
lets skip those as well.
"""
return (
not self.mainunit.isheader()
and bool(self.source)
and not self.mainunit.isobsolete()
)
def set_target(self, target: Union[str, List[str]]):
"""Set translation unit target."""
self._invalidate_target()
if isinstance(target, list):
target = multistring(target)
if self.template is not None:
if self.parent.is_template:
# Use source for monolingual files if editing template
self.unit.source = target
elif self.unit.source:
# Update source to match current source
self.unit.source = self.template.source
# Always set target, even in monolingual template
self.unit.target = target
@cached_property
def source(self):
"""Return source string from a Translate Toolkit unit."""
if self.template is not None:
# Use target if set, otherwise fall back to source
if self.template.target:
return get_string(self.template.target)
return get_string(self.template.source)
return get_string(self.unit.source)
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
# Use source for monolingual base if target is not set
if self.unit.target is None:
if self.parent.is_template:
return get_string(self.unit.source)
return ""
return get_string(self.unit.target)
class RichXliffUnit(XliffUnit):
"""Wrapper unit for XLIFF with XML elements."""
@cached_property
def source(self):
"""Return source string from a Translate Toolkit unit."""
if self.template is not None:
# Use target if set, otherwise fall back to source
if self.template.target:
return rich_to_xliff_string(self.template.rich_target)
return rich_to_xliff_string(self.template.rich_source)
return rich_to_xliff_string(self.unit.rich_source)
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
# Use source for monolingual base if target is not set
if self.unit.target is None:
if self.parent.is_template:
return rich_to_xliff_string(self.unit.rich_source)
return ""
return rich_to_xliff_string(self.unit.rich_target)
@cached_property
def flags(self):
flags = Flags(super().flags)
flags.merge("xml-text")
return flags.format()
def set_target(self, target: Union[str, List[str]]):
"""Set translation unit target."""
self._invalidate_target()
# Delete the empty target element
if not target:
xmlnode = self.get_xliff_node()
if xmlnode is not None:
xmlnode.getparent().remove(xmlnode)
return
try:
converted = xliff_string_to_rich(target)
except (XMLSyntaxError, TypeError, KeyError):
# KeyError happens on missing attribute
converted = [target]
if self.template is not None:
if self.parent.is_template:
# Use source for monolingual files if editing template
self.unit.rich_source = converted
elif self.unit.source:
# Update source to match current source
self.unit.rich_source = self.template.rich_source
# Always set target, even in monolingual template
self.unit.rich_target = converted
class FlatXMLUnit(TTKitUnit):
@cached_property
def context(self):
if self.template is not None:
return self.template.source
return self.mainunit.source
@cached_property
def source(self):
return get_string(self.mainunit.target)
def has_content(self):
"""Check whether unit has content.
The attribute-less units will have context None.
"""
if self.context is None:
return False
return super().has_content()
class MonolingualIDUnit(TTKitUnit):
@cached_property
def context(self):
if self.template is not None:
return self.template.getid()
return self.mainunit.getcontext()
class TSUnit(MonolingualIDUnit):
@cached_property
def source(self):
if self.template is None and self.mainunit.hasplural():
# Need to apply special magic for plurals here
# as there is no singular/plural in the source string
source = self.unit.source
return get_string([source.replace("(s)", ""), source.replace("(s)", "s")])
return super().source
@cached_property
def locations(self):
"""Return a comma-separated list of locations."""
result = super().locations
# Do not try to handle relative locations in Qt TS, see
# http://doc.qt.io/qt-5/linguist-ts-file-format.html
if LOCATIONS_RE.match(result):
return ""
return result
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
if not self.unit.isreview() and not self.unit.istranslated():
# For Qt ts, empty translated string means source should be used
return self.source
return super().target
def is_translated(self):
"""Check whether unit is translated."""
if self.unit is None:
return False
# For Qt ts, empty translated string means source should be used
return not self.unit.isreview() or self.unit.istranslated()
class MonolingualSimpleUnit(MonolingualIDUnit):
@cached_property
def locations(self):
return ""
@cached_property
def source(self):
if self.template is None:
return self.mainunit.getid().lstrip(".")
return get_string(self.template.target)
def has_content(self):
return not self.mainunit.isheader()
def is_readonly(self):
return False
class JSONUnit(MonolingualSimpleUnit):
@cached_property
def context(self):
context = super().context
if context.startswith("."):
return context[1:]
return context
class PlaceholdersJSONUnit(JSONUnit):
@cached_property
def flags(self):
placeholders = self.mainunit.placeholders
if not placeholders:
return ""
return "placeholders:{},case-insensitive".format(
":".join(
Flags.format_value(f"${key.upper()}$") for key in placeholders.keys()
)
)
class CSVUnit(MonolingualSimpleUnit):
@staticmethod
def unescape_csv(string):
r"""
Removes Excel-specific escaping from CSV.
See weblate.formats.exporters.CSVExporter.string_filter
Note: | is replaced by \ in the condition as it is escaped
"""
if (
len(string) > 2
and string[0] == "'"
and string[-1] == "'"
and string[1] in ("=", "+", "-", "@", "\\", "%")
):
return get_string(string[1:-1].replace("\\|", "|"))
return get_string(string)
@cached_property
def context(self):
def get_context(unit):
if unit.id:
return unit.id
if unit.context:
return unit.context
return unit.getid()
# Needed to avoid Translate Toolkit construct ID
# as context\04source
if self.template is not None:
return get_context(self.template)
if self.parent.is_template:
return get_context(self.unit)
return self.unescape_csv(self.mainunit.getcontext())
@cached_property
def locations(self):
return self.mainunit.location
@cached_property
def source(self):
# Needed to avoid Translate Toolkit construct ID
# as context\04source
if self.template is None:
return self.unescape_csv(get_string(self.mainunit.source))
return self.unescape_csv(super().source)
@cached_property
def target(self):
return self.unescape_csv(super().target)
def is_fuzzy(self, fallback=False):
# Report fuzzy state only if present in the fields
if "fuzzy" not in self.parent.store.fieldnames:
return fallback
return super().is_fuzzy()
class RESXUnit(TTKitUnit):
@cached_property
def locations(self):
return ""
@cached_property
def context(self):
if self.template is not None:
return self.template.getid()
return self.unit.getid()
@cached_property
def source(self):
if self.template is None:
return self.mainunit.getid()
return get_string(self.template.target)
class PHPUnit(KeyValueUnit):
@cached_property
def locations(self):
return ""
@cached_property
def source(self):
if self.template is not None:
return get_string(self.template.source)
return get_string(self.unit.getid())
@cached_property
def target(self):
if self.unit is None:
return ""
return get_string(self.unit.source)
class INIUnit(TTKitUnit):
@cached_property
def locations(self):
return ""
@cached_property
def context(self):
if self.template is not None:
return self.template.location
return self.unit.location
def has_content(self):
return True
def is_readonly(self):
return False
class BasePoFormat(TTKitFormat, BilingualUpdateMixin):
loader = pofile
@classmethod
def get_plural(cls, language, store=None):
"""Return matching plural object."""
if store:
header = store.store.parseheader()
else:
# This will trigger KeyError later
header = {}
try:
number, formula = Plural.parse_plural_forms(header["Plural-Forms"])
except (ValueError, KeyError):
return super().get_plural(language, store)
# Find matching one
for plural in language.plural_set.iterator():
if plural.same_plural(number, formula):
return plural
# Create new one
return Plural.objects.create(
language=language,
source=Plural.SOURCE_GETTEXT,
number=number,
formula=formula,
)
def untranslate_store(self, language, fuzzy=False):
"""Remove translations from Translate Toolkit store."""
super().untranslate_store(language, fuzzy)
plural = language.plural
self.store.updateheader(
last_translator="Automatically generated",
plural_forms=plural.plural_form,
language_team="none",
)
def update_header(self, **kwargs):
"""Update store header if available."""
kwargs["x_generator"] = f"Weblate {weblate.utils.version.VERSION}"
# Adjust Content-Type header if needed
header = self.store.parseheader()
if (
"Content-Type" not in header
or "charset=CHARSET" in header["Content-Type"]
or "charset=ASCII" in header["Content-Type"]
):
kwargs["Content_Type"] = "text/plain; charset=UTF-8"
self.store.updateheader(**kwargs)
@classmethod
def do_bilingual_update(cls, in_file: str, out_file: str, template: str, **kwargs):
"""Wrapper around msgmerge."""
args = [
"--output-file",
out_file,
in_file,
template,
]
if "args" in kwargs:
args = kwargs["args"] + args
else:
args = ["--previous"] + args
cmd = ["msgmerge"] + args
try:
result = subprocess.run(
cmd,
env=get_clean_env(),
cwd=os.path.dirname(out_file),
capture_output=True,
check=True,
text=True,
)
# The warnings can cause corruption (for example in case
# PO file header is missing ASCII encoding is assumed)
errors = []
for line in result.stderr.splitlines():
if (
"warning: internationalized messages should not contain the" in line
or ". done." in line
):
continue
errors.append(line)
if errors:
raise UpdateError(" ".join(cmd), "\n".join(errors))
except OSError as error:
report_error(cause="Failed msgmerge")
raise UpdateError(" ".join(cmd), error)
except subprocess.CalledProcessError as error:
report_error(cause="Failed msgmerge")
raise UpdateError(" ".join(cmd), error.output + error.stderr)
def add_unit(self, ttkit_unit):
self.store.require_index()
# Check if there is matching obsolete unit
old_unit = self.store.id_index.get(ttkit_unit.getid())
if old_unit and old_unit.isobsolete():
self.store.removeunit(old_unit)
super().add_unit(ttkit_unit)
class PoFormat(BasePoFormat):
name = _("gettext PO file")
format_id = "po"
monolingual = False
autoload = ("*.po", "*.pot")
unit_class = PoUnit
@classmethod
def get_new_file_content(cls):
"""Empty PO file content."""
return b""
class PoMonoFormat(BasePoFormat):
name = _("gettext PO file (monolingual)")
format_id = "po-mono"
monolingual = True
autoload = ()
new_translation = (
'msgid ""\n'
'msgstr "X-Generator: Weblate\\n'
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
'Content-Transfer-Encoding: 8bit"'
)
unit_class = PoMonoUnit
bilingual_class = PoFormat
set_context_bilingual = False
def create_unit_key(
self, key: str, source: Union[str, List[str], multistring]
) -> Union[str, multistring]:
if isinstance(source, (list, multistring)):
return multistring([key, f"{key}_plural"])
return key
class TSFormat(TTKitFormat):
name = _("Qt Linguist translation file")
format_id = "ts"
loader = tsfile
autoload = ("*.ts",)
unit_class = TSUnit
set_context_bilingual = False
def untranslate_store(self, language, fuzzy: bool = False):
"""Remove translations from Translate Toolkit store."""
# We need to mark all units as fuzzy to get
# type="unfinished" on empty strings, which are otherwise
# treated as translated same as source
super().untranslate_store(language, True)
class XliffFormat(TTKitFormat):
name = _("XLIFF translation file")
format_id = "plainxliff"
loader = xlifffile
autoload = ()
unit_class = XliffUnit
language_format = "bcp"
use_settarget = True
def untranslate_unit(self, unit, plural, fuzzy: bool):
super().untranslate_unit(unit, plural, fuzzy)
# Delete empty <target/> tag
try:
xmlnode = self.unit.getlanguageNode(lang=None, index=1)
if xmlnode is not None:
xmlnode.getparent().remove(xmlnode)
except AttributeError:
pass
def construct_unit(self, source: str):
unit = super().construct_unit(source)
# Make sure new unit is using same namespace as the original
# file (xliff 1.1/1.2)
unit.namespace = self.store.namespace
unit.xmlelement = etree.Element(unit.namespaced(unit.rootNode))
setXMLspace(unit.xmlelement, "preserve")
return unit
def create_unit(
self,
key: str,
source: Union[str, List[str]],
target: Optional[Union[str, List[str]]] = None,
):
unit = super().create_unit(key, source, target)
unit.marktranslated()
unit.markapproved(False)
return unit
class RichXliffFormat(XliffFormat):
name = _("XLIFF with placeables support")
format_id = "xliff"
autoload: Tuple[str, ...] = ("*.xlf", "*.xliff", "*.sdlxliff", "*.mxliff")
unit_class = RichXliffUnit
class PoXliffFormat(XliffFormat):
name = _("XLIFF with gettext extensions")
format_id = "poxliff"
autoload = ("*.poxliff",)
loader = PoXliffFile
class PropertiesBaseFormat(TTKitFormat):
unit_class = PropertiesUnit
def is_valid(self):
result = super().is_valid()
if not result:
return False
# Accept empty file, but reject file without a delimiter.
# Translate Toolkit happily parses anything into a property
# even if there is no delimiter used in the line.
return not self.store.units or self.store.units[0].delimiter
@staticmethod
def mimetype():
"""Return most common media type for format."""
# Properties files do not expose mimetype
return "text/plain"
def construct_unit(self, source: str):
return self.store.UnitClass(source, personality=self.store.personality.name)
class StringsFormat(PropertiesBaseFormat):
name = _("iOS strings (UTF-16)")
format_id = "strings"
loader = ("properties", "stringsfile")
new_translation: Optional[Union[str, bytes]] = "\n".encode("utf-16")
autoload = ("*.strings",)
language_format = "bcp"
class StringsUtf8Format(StringsFormat):
name = _("iOS strings (UTF-8)")
format_id = "strings-utf8"
loader = ("properties", "stringsutf8file")
new_translation = "\n"
class PropertiesUtf8Format(PropertiesBaseFormat):
name = _("Java Properties (UTF-8)")
format_id = "properties-utf8"
loader = ("properties", "javautf8file")
new_translation = "\n"
language_format = "linux"
check_flags = ("auto-java-messageformat",)
class PropertiesUtf16Format(PropertiesBaseFormat):
name = _("Java Properties (UTF-16)")
format_id = "properties-utf16"
loader = ("properties", "javafile")
language_format = "linux"
new_translation = "\n"
# Translate Toolkit autodetection might fail in some cases.
force_encoding = "utf-16"
class PropertiesFormat(PropertiesBaseFormat):
name = _("Java Properties (ISO 8859-1)")
format_id = "properties"
loader = ("properties", "javafile")
language_format = "linux"
new_translation = "\n"
autoload = ("*.properties",)
# Java properties need to be ISO 8859-1, but Translate Toolkit converts
# them to UTF-8.
force_encoding = "iso-8859-1"
class JoomlaFormat(PropertiesBaseFormat):
name = _("Joomla language file")
format_id = "joomla"
loader = ("properties", "joomlafile")
monolingual = True
new_translation = "\n"
autoload = ("*.ini",)
class GWTFormat(StringsFormat):
name = _("GWT properties")
format_id = "gwt"
loader = ("properties", "gwtfile")
new_translation = "\n"
check_flags = ("auto-java-messageformat",)
language_format = "linux"
class PhpFormat(TTKitFormat):
name = _("PHP strings")
format_id = "php"
loader = ("php", "phpfile")
new_translation = "<?php\n"
autoload = ("*.php",)
unit_class = PHPUnit
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "text/x-php"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "php"
class LaravelPhpFormat(PhpFormat):
name = _("Laravel PHP strings")
format_id = "laravel"
loader = ("php", "LaravelPHPFile")
class RESXFormat(TTKitFormat):
name = _(".NET resource file")
format_id = "resx"
loader = RESXFile
monolingual = True
unit_class = RESXUnit
new_translation = RESXFile.XMLskeleton
autoload = ("*.resx",)
language_format = "bcp"
class AndroidFormat(TTKitFormat):
name = _("Android String Resource")
format_id = "aresource"
loader = ("aresource", "AndroidResourceFile")
monolingual = True
unit_class = MonolingualIDUnit
new_translation = '<?xml version="1.0" encoding="utf-8"?>\n<resources></resources>'
autoload = ("strings*.xml", "values*.xml")
language_format = "android"
check_flags = ("java-printf-format",)
autoaddon = {"weblate.cleanup.blank": {}}
plural_preference = (
Plural.SOURCE_ANDROID,
Plural.SOURCE_CLDR,
Plural.SOURCE_DEFAULT,
)
class DictStoreMixin:
@classmethod
def validate_context(cls, context: str):
id_class = cls.get_class().UnitClass.IdClass
try:
id_class.from_string(context)
except Exception as error:
raise ValidationError(gettext("Failed to parse the key: %s") % error)
class JSONFormat(DictStoreMixin, TTKitFormat):
name = _("JSON file")
format_id = "json"
loader = JsonFile
unit_class = JSONUnit
autoload: Tuple[str, ...] = ("*.json",)
new_translation = "{}\n"
set_context_bilingual = False
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "application/json"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "json"
class JSONNestedFormat(JSONFormat):
name = _("JSON nested structure file")
format_id = "json-nested"
loader = ("jsonl10n", "JsonNestedFile")
autoload = ()
class WebExtensionJSONFormat(JSONFormat):
name = _("WebExtension JSON file")
format_id = "webextension"
loader = ("jsonl10n", "WebExtensionJsonFile")
monolingual = True
autoload = ("messages*.json",)
unit_class = PlaceholdersJSONUnit
class I18NextFormat(JSONFormat):
name = _("i18next JSON file v3")
format_id = "i18next"
loader = ("jsonl10n", "I18NextFile")
autoload = ()
check_flags = ("i18next-interpolation",)
class GoI18JSONFormat(JSONFormat):
name = _("go-i18n JSON file")
format_id = "go-i18n-json"
loader = ("jsonl10n", "GoI18NJsonFile")
autoload = ()
class ARBFormat(JSONFormat):
name = _("ARB file")
format_id = "arb"
loader = ("jsonl10n", "ARBJsonFile")
autoload = ("*.arb",)
unit_class = PlaceholdersJSONUnit
check_flags = ("icu-message-format",)
class CSVFormat(TTKitFormat):
name = _("CSV file")
format_id = "csv"
loader = ("csvl10n", "csvfile")
unit_class = CSVUnit
autoload: Tuple[str, ...] = ("*.csv",)
force_encoding = "auto"
def __init__(
self,
storefile,
template_store=None,
language_code: Optional[str] = None,
source_language: Optional[str] = None,
is_template: bool = False,
):
super().__init__(
storefile,
template_store=template_store,
language_code=language_code,
source_language=source_language,
is_template=is_template,
)
# Remove template if the file contains source, this is needed
# for import, but probably usable elsewhere as well
if "source" in self.store.fieldnames and not isinstance(
template_store, CSVFormat
):
self.template_store = None
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "text/csv"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "csv"
@staticmethod
def get_content_and_filename(storefile):
# Did we get file or filename or file object?
if hasattr(storefile, "read"):
filename = getattr(storefile, "name", getattr(storefile, "filename", None))
content = storefile.read()
storefile.close()
else:
filename = storefile
with open(filename, "rb") as handle:
content = handle.read()
return content, filename
def parse_store(self, storefile):
"""Parse the store."""
content, filename = self.get_content_and_filename(storefile)
# Parse file
store = self.get_store_instance()
store.parse(content, sample_length=40000)
# Did detection of headers work?
if store.fieldnames != ["location", "source", "target"]:
return store
fileobj = csv.StringIO(
store.detect_encoding(content, default_encodings=["utf-8", "utf-16"])[0]
)
# Try reading header
reader = csv.reader(fileobj, store.dialect)
header = next(reader)
fileobj.close()
# Check if the file is not two column only
if len(header) != 2:
return store
return self.parse_simple_csv(content, filename)
def parse_simple_csv(self, content, filename):
result = self.get_store_instance(fieldnames=["source", "target"])
result.parse(content, sample_length=None)
result.filename = filename
return result
class CSVUtf8Format(CSVFormat):
name = _("CSV file (UTF-8)")
format_id = "csv-utf-8"
autoload = ()
force_encoding = "utf-8"
class CSVSimpleFormat(CSVFormat):
name = _("Simple CSV file")
format_id = "csv-simple"
autoload: Tuple[str, ...] = ("*.txt",)
force_encoding = "auto"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "csv"
def parse_store(self, storefile):
"""Parse the store."""
content, filename = self.get_content_and_filename(storefile)
return self.parse_simple_csv(content, filename)
class CSVSimpleFormatISO(CSVSimpleFormat):
name = _("Simple CSV file (ISO-8859-1)")
format_id = "csv-simple-iso"
force_encoding = "iso-8859-1"
autoload = ()
class CSVUtf8SimpleFormat(CSVSimpleFormat):
name = _("Simple CSV file (UTF-8)")
format_id = "csv-simple-utf-8"
force_encoding = "utf-8"
autoload = ()
class YAMLFormat(DictStoreMixin, TTKitFormat):
name = _("YAML file")
format_id = "yaml"
loader = ("yaml", "YAMLFile")
unit_class = MonolingualSimpleUnit
autoload: Tuple[str, ...] = ("*.pyml",)
new_translation = "{}\n"
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "text/yaml"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "yml"
class RubyYAMLFormat(YAMLFormat):
name = _("Ruby YAML file")
format_id = "ruby-yaml"
loader = ("yaml", "RubyYAMLFile")
autoload = ("*.ryml", "*.yml", "*.yaml")
class DTDFormat(TTKitFormat):
name = _("DTD file")
format_id = "dtd"
loader = ("dtd", "dtdfile")
autoload = ("*.dtd",)
unit_class = MonolingualSimpleUnit
new_translation = "\n"
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "application/xml-dtd"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "dtd"
@property
def all_store_units(self):
"""Wrapper for all store unit filtering out null."""
return (unit for unit in self.store.units if not unit.isblank())
class SubtitleUnit(MonolingualIDUnit):
@cached_property
def source(self):
if self.template is not None:
return self.template.source
return self.unit.source
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
return get_string(self.unit.source)
def is_translated(self):
"""Check whether unit is translated."""
return bool(self.target)
class SubRipFormat(TTKitFormat):
name = _("SubRip subtitle file")
format_id = "srt"
loader = ("subtitles", "SubRipFile")
unit_class = SubtitleUnit
autoload = ("*.srt",)
monolingual = True
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "text/plain"
class MicroDVDFormat(SubRipFormat):
name = _("MicroDVD subtitle file")
format_id = "sub"
loader = ("subtitles", "MicroDVDFile")
autoload = ("*.sub",)
class AdvSubStationAlphaFormat(SubRipFormat):
name = _("Advanced SubStation Alpha subtitle file")
format_id = "ass"
loader = ("subtitles", "AdvSubStationAlphaFile")
autoload = ("*.ass",)
class SubStationAlphaFormat(SubRipFormat):
name = _("SubStation Alpha subtitle file")
format_id = "ssa"
loader = ("subtitles", "SubStationAlphaFile")
autoload = ("*.ssa",)
class FlatXMLFormat(TTKitFormat):
name = _("Flat XML file")
format_id = "flatxml"
loader = ("flatxml", "FlatXMLFile")
monolingual = True
unit_class = FlatXMLUnit
new_translation = '<?xml version="1.0" encoding="utf-8"?>\n<root></root>'
class ResourceDictionaryFormat(FlatXMLFormat):
name = _("ResourceDictionary file")
format_id = "resourcedictionary"
loader = ("resourcedictionary", "ResourceDictionaryFile")
check_flags = ("c-sharp-format",)
language_format = "bcp_legacy"
new_translation = """<ResourceDictionary
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:system="clr-namespace:System;assembly=mscorlib">
</ResourceDictionary>"""
class INIFormat(TTKitFormat):
name = _("INI file")
format_id = "ini"
loader = ("ini", "inifile")
monolingual = True
unit_class = INIUnit
new_translation = "\n"
@staticmethod
def mimetype():
"""Return most common media type for format."""
# INI files do not expose mimetype
return "text/plain"
@classmethod
def extension(cls):
"""Return most common file extension for format."""
# INI files do not expose extension
return "ini"
def load(self, storefile, template_store):
store = super().load(storefile, template_store)
# Adjust store to have translations
for unit in store.units:
unit.target = unit.source
unit.rich_target = unit.rich_source
return store
def create_unit(
self,
key: str,
source: Union[str, List[str]],
target: Optional[Union[str, List[str]]] = None,
):
unit = super().create_unit(key, source, target)
unit.location = key
return unit
class InnoSetupINIFormat(INIFormat):
name = _("Inno Setup INI file")
format_id = "islu"
loader = ("ini", "inifile")
@classmethod
def extension(cls):
"""Return most common file extension for format."""
# INI files do not expose extension
return "islu"
@staticmethod
def get_class_kwargs():
return {"dialect": "inno"}
class XWikiUnit(PropertiesUnit):
"""Dedicated unit for XWiki.
Inspired from PropertiesUnit, allow to override the methods to use the right
XWikiDialect methods for decoding properties.
"""
@cached_property
def source(self):
# Need to decode property encoded string
return get_string(quote.xwiki_properties_decode(super().source))
@cached_property
def target(self):
"""Return target string from a Translate Toolkit unit."""
if self.unit is None:
return ""
# Need to decode property encoded string
# This is basically stolen from
# translate.storage.properties.propunit.gettarget
# which for some reason does not return translation
value = quote.xwiki_properties_decode(self.unit.value)
value = re.sub("\\\\ ", " ", value)
return get_string(value)
class XWikiPropertiesFormat(PropertiesBaseFormat):
"""Represents an XWiki Java Properties translation file.
This format specification is detailed in
https://dev.xwiki.org/xwiki/bin/view/Community/XWiki%20Translations%20Formats/#HXWikiJavaProperties
"""
unit_class = XWikiUnit
name = "XWiki Java Properties"
format_id = "xwiki-java-properties"
loader = ("properties", "xwikifile")
language_format = "bcp_legacy"
autoload = ("*.properties",)
new_translation = "\n"
can_add_unit: bool = False
set_context_bilingual: bool = True
# Ensure that untranslated units are saved too as missing properties and
# comments are preserved as in the original source file.
def save_content(self, handle):
current_units = self.all_units
store_units = self.store.units
# We empty the store units since we want to control what we'll serialize
self.store.units = []
for unit in current_units:
# If the translation unit is missing and the current unit is not
# only about comment.
if unit.unit is None and unit.has_content():
# We first check if the unit has not been translated as part of a
# new language: in that case the unit is not linked yet.
found_store_unit = None
for store_unit in store_units:
if unit.context == store_unit.name:
found_store_unit = store_unit
# If we found a unit for same context not linked, we just link it.
if found_store_unit is not None:
unit.unit = found_store_unit
# else it's a missing unit: we need to mark it as missing.
else:
missingunit = self.unit_class(self, unit.mainunit, unit.template)
unit.unit = missingunit.unit
unit.unit.missing = True
# if the unit was only a comment, we take back the original source file unit
# to avoid any change.
elif not unit.has_content():
unit.unit = unit.mainunit
self.add_unit(unit.unit)
self.store.serialize(handle)
class XWikiPagePropertiesFormat(XWikiPropertiesFormat):
"""Represents an XWiki Page Properties translation file.
This format specification is detailed in
https://dev.xwiki.org/xwiki/bin/view/Community/XWiki%20Translations%20Formats/#HXWikiPageProperties
"""
name = "XWiki Page Properties"
format_id = "xwiki-page-properties"
loader = ("properties", "XWikiPageProperties")
force_encoding = "utf-8"
def save_content(self, handle):
if self.store.root is None:
self.store.root = self.template_store.store.root
super().save_content(handle)
class XWikiFullPageFormat(XWikiPagePropertiesFormat):
"""Represents an XWiki Full Page translation file.
This format specification is detailed in
https://dev.xwiki.org/xwiki/bin/view/Community/XWiki%20Translations%20Formats/#HXWikiFullContentTranslation
"""
name = "XWiki Full Page"
format_id = "xwiki-fullpage"
loader = ("properties", "XWikiFullPage")
class TBXUnit(TTKitUnit):
@cached_property
def notes(self):
"""Return notes or notes from units."""
notes = []
for origin in ("pos", "definition", "developer"):
note = self.unit.getnotes(origin)
if note:
notes.append(note)
return "\n".join(notes)
@cached_property
def context(self):
return self.unit.xmlelement.get("id") or ""
class TBXFormat(TTKitFormat):
name = _("TermBase eXchange file")
format_id = "tbx"
loader = tbxfile
autoload: Tuple[str, ...] = ("*.tbx",)
new_translation = tbxfile.XMLskeleton
unit_class = TBXUnit
create_empty_bilingual: bool = True
use_settarget = True
monolingual = False
def __init__(
self,
storefile,
template_store=None,
language_code: Optional[str] = None,
source_language: Optional[str] = None,
is_template: bool = False,
):
super().__init__(
storefile,
template_store=template_store,
language_code=language_code,
is_template=is_template,
source_language=source_language,
)
# Add language header if not present
self.store.addheader()
class PropertiesMi18nFormat(PropertiesUtf8Format):
name = _("mi18n lang file")
format_id = "mi18n-lang"
new_translation = "\n"
language_format = "bcp_legacy"
check_flags = ("es-format",)
monolingual = True
class StringsdictFormat(DictStoreMixin, TTKitFormat):
name = _("Stringsdict file")
format_id = "stringsdict"
loader = ("stringsdict", "StringsDictFile")
unit_class = MonolingualSimpleUnit
autoload: Tuple[str, ...] = ("*.stringsdict",)
new_translation = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
</dict>
</plist>
""" # noqa: E501
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "application/xml"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "stringsdict"
@classmethod
def get_plural(cls, language, store=None):
"""Return matching plural object."""
plural = super().get_plural(language, store)
if plural.type in ZERO_PLURAL_TYPES:
return plural
from weblate.lang.models import Plural
return language.plural_set.get_or_create(
source=Plural.SOURCE_STRINGSDICT,
defaults={
"formula": FORMULA_WITH_ZERO[plural.formula],
"number": plural.number + 1,
},
)[0]
def fixup(self, store):
if self.language_code:
store.settargetlanguage(self.language_code)
elif self.source_language:
store.settargetlanguage(self.source_language)
class FluentUnit(MonolingualSimpleUnit):
def set_target(self, target: Union[str, List[str]]):
super().set_target(target)
self.unit.source = target
class FluentFormat(TTKitFormat):
name = _("Fluent file")
format_id = "fluent"
loader = ("fluent", "FluentFile")
unit_class = FluentUnit
autoload: Tuple[str, ...] = ("*.ftl",)
new_translation = ""
@staticmethod
def mimetype():
"""Return most common media type for format."""
return "text/x-fluent"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "ftl"
def create_unit(
self,
key: str,
source: Union[str, List[str]],
target: Optional[Union[str, List[str]]] = None,
):
unit = super().create_unit(key, source, target)
unit.source = unit.target
return unit
def is_valid(self):
"""Check whether store seems to be valid."""
# Workaround for https://github.com/translate/translate/issues/4615
for unit in self.store.units:
errors = unit.geterrors()
if errors:
raise ValueError(
"Syntax error: {}".format(
", ".join(
f"{errorname}: {errortext}"
for errorname, errortext in errors.items()
)
)
)
return super().is_valid() | PypiClean |
/Chula-0.13.3.tar.gz/Chula-0.13.3/chula/app.py | import imp
import optparse
import os
import sys
# Project imports
from chula.www.adapters.wsgi import adapter
def usage():
return """Usage: %prog [options] path/to/app
Help:
This script relies upon a somewhat consistent application
structure. The typical structure of an app would look something
like:
user# tree -d apps/example/webapp
apps/example/webapp/
|-- controller
|-- model
|-- view
`-- www
Typically "controller" and "model" are python packages, though
their names can be different, it's really up to you. This script
does make an assumption though that the configuration and
controllers can be found in the first level of directories (say
configuration.py in model, and home.py in controller).
Examples:
# If you have configuration.py and an "app" variable inside it:
%prog /path/to/app
# For an app with config.py and a "prod" variable inside it:
%prog -c config -o prod /path/to/app
"""
def getopts():
p = optparse.OptionParser(usage())
p.add_option('-a', '--access-log',
dest='access_log',
help='Access log to write to, "-" for stdout')
p.add_option('-c', '--config',
dest='config_module',
help='Module name containing app configuration')
p.add_option('-k', '--keep-alive',
dest='keep_alive',
help='Keep-Alive in seconds if supported by provider')
p.add_option('-l', '--preload',
action='store_true',
help='Preload prior to forking if supported by provider')
p.add_option('-m', '--max-requests',
dest='max_requests',
help='Max requests per worker before re-spawning')
p.add_option('-o', '--config-object',
dest='config_obj',
help='Configuration object inside the config')
p.add_option('-p', '--port',
help='TCP port to run the webserver on')
p.add_option('-t', '--timeout',
help='Max time in sec per request if provider supported')
p.add_option('-w', '--workers',
help='Number of workers if the provider supports it')
p.add_option('-W', '--worker-provider',
dest='worker_provider',
help='Type of worker class to use if supported by provider')
p.add_option('-P', '--provider',
help='Use the specified provider (gevent, gunicorn, etc)')
# Defaults
p.set_defaults(access_log='-')
p.set_defaults(config_module='configuration')
p.set_defaults(config_obj='app')
p.set_defaults(debug=False)
p.set_defaults(keep_alive=2)
p.set_defaults(max_requests=0)
p.set_defaults(port=8080)
p.set_defaults(preload=False)
p.set_defaults(timeout=120)
p.set_defaults(worker_provider='sync')
p.set_defaults(workers=4)
return (p, p.parse_args())
def _builtin(application, options):
print 'WSGI provider: wsgiref.simple_server (builtin)'
from wsgiref.simple_server import make_server
httpd = make_server('', int(options.port), application)
return httpd
def _eventlet(application, options):
print 'WSGI provider: eventlet.wsgi'
import eventlet
from eventlet import wsgi
e = eventlet.listen(('', int(options.port)))
return lambda : wsgi.server(e, application)
def _gevent(application, options):
print 'WSGI provider: gevent.wsgi'
from gevent import wsgi
httpd = wsgi.WSGIServer(('', int(options.port)), application)
return httpd
def _gunicorn(application, options):
print 'WSGI provider: gunicorn.app.base.Application'
from gunicorn.app import base
from gunicorn import config
class Gunicorn(base.Application):
sys.argv = [] # Stop gunicorn from choking on our optparse options
def init(self, parser, opts, args):
c = {'bind': '0.0.0.0:%s' % options.port,
'max_requests': int(options.max_requests),
'preload_app': options.preload,
'keepalive': int(options.keep_alive),
'timeout': int(options.timeout),
'worker_class': options.worker_provider,
'workers': options.workers}
if hasattr(config, 'AccessLog'):
c.update({'accesslog':options.access_log})
return c
def load(self):
return application
httpd = Gunicorn()
return httpd
def _tornado(application, options):
print 'WSGI provider: tornado.httpserver.HTTPServer'
from tornado import httpserver, ioloop, wsgi
container = wsgi.WSGIContainer(application)
httpd = httpserver.HTTPServer(container)
httpd.listen(int(options.port))
return ioloop.IOLoop.instance()
def wsgi_provider(application, options):
if options.provider:
providers = [getattr(sys.modules[__name__], '_%s' % options.provider)]
else:
providers = [_gevent, _gunicorn, _eventlet, _tornado, _builtin]
for provider in providers:
try:
return provider(application, options)
except (ImportError, NameError):
pass
raise Exception('Unable to find a wsgi provider')
def run():
# Parse command line options
parser, (options, args) = getopts()
if not args:
print 'Error: Please specify the path to your app'
parser.print_help()
sys.exit(1)
options.app = args.pop(0)
if not os.path.exists(options.app):
print 'Error: Specified path does not exist:', options.app
parser.print_help()
sys.exit(1)
# Expose the application's top level package(s)
app_root = os.path.expanduser(options.app)
sys.path.append(app_root)
for d in os.listdir(app_root):
sys.path.append(os.path.join(options.app, d))
fp, pathname, description = imp.find_module(options.config_module)
app_config_module = imp.load_module('app', fp, pathname, description)
try:
app_config = getattr(app_config_module, options.config_obj)
except AttributeError, ex:
print 'Error: Unable to find your application, sorry :/'
print 'CONFIG_MODULE searched for: %s' % options.config_module
print 'CONFIG_OBJ searched for: %s' % options.config_obj
parser.print_help()
sys.exit(1)
@adapter.wsgi
def application():
return app_config
httpd = wsgi_provider(application, options)
try:
print 'Starting server on: http://localhost:%s' % options.port
if 'log' in app_config:
print 'Errors log:', app_config.log
if app_config.debug:
print 'Debug log: ', app_config.log + '.debug'
for attribute in ['run', 'serve_forever', 'start', None]:
if attribute and hasattr(httpd, attribute):
httpd = getattr(httpd, attribute)()
break
if callable(httpd):
httpd()
else:
print 'Chula does not know how to use this wsgi provider'
print 'Type of provider given:', httpd
sys.exit(1)
except KeyboardInterrupt:
sys.exit() | PypiClean |
/NoseHTML-0.4.6.tar.gz/NoseHTML-0.4.6/nosehtml/plugin.py | from __future__ import print_function
import errno
import io
import os
import sys
import traceback
try:
from html import escape
except ImportError:
# fallback for python 2
from cgi import escape
from nose.plugins.base import Plugin
def unicodify(value):
"""
Given a string, returns a Unicode string.
"""
if value is None:
return None
if sys.version_info[0] == 2 and not isinstance(value, unicode):
value = unicode(value, 'utf-8', 'replace')
return value
class LogFile(object):
def __init__(self, file, counter):
self.file = file
self.counter = counter
class NoseHTML(Plugin):
"""
Styled HTML output plugin for nose.
"""
def help(self):
return "Output HTML report of test status into reportfile (specifiable with --html-report-file)"
def add_options(self, parser, env=os.environ):
Plugin.add_options(self, parser, env)
parser.add_option("--html-report-file", action="store", default="nose_report.html", dest="report_file", help="File to output HTML report to")
parser.add_option("--html-error-file", action="store", default="/dev/null", dest="error_file", help="File to output HTML error report to")
def configure(self, options, config):
Plugin.configure(self, options, config)
self.conf = config
self.report_fname = options.report_file
self.error_fname = options.error_file
def begin(self):
self.reportlog = LogFile(io.open(self.report_fname, "w", encoding='utf-8'), 0)
self.errorlog = LogFile(io.open(self.error_fname, "w", encoding='utf-8'), 0)
for f in (self.reportlog.file, self.errorlog.file):
print(HTML_START, file=f)
f.flush()
def finalize(self, result):
for f in (self.reportlog.file, self.errorlog.file):
print(HTML_END, file=f)
# When run via buildbot on NFS on Solaris, this close() will encounter
# the NFS bug described in OpenSolaris bug ID #6708290. So we work
# around that bug.
try:
f.close()
except IOError as e:
if e.errno != errno.EINVAL:
raise
def print_test(self, status, test, error=None):
fs = [self.reportlog]
if error:
fs.append(self.errorlog)
for f in fs:
f.counter += 1
print(u"<div class='test %s'>" % unicodify(status), file=f.file)
if test.id():
print(u"<div><span class='label'>ID:</span> %s</div>" % unicodify(test.id()), file=f.file)
if test.shortDescription():
print(u"<div><span class='label'>Description:</span> %s</div>" % unicodify(test.shortDescription()), file=f.file)
if status:
print(u"<div><span class='label'>Status:</span> %s</div>" % unicodify(status), file=f.file)
if test.capturedOutput:
print(u"<div><span class='label'>Output:</span> <a href=\"javascript:toggle('capture_%d')\">...</a></div>" % f.counter, file=f.file)
print(u"<div id='capture_%d' style='display: none'><pre class='capture'>%s</pre></div>" % (f.counter, unicodify(escape(test.capturedOutput, quote=True))), file=f.file)
if hasattr(test, 'capturedLogging') and test.capturedLogging:
print(u"<div><span class='label'>Log:</span> <a href=\"javascript:toggle('log_%d')\">...</a></div>" % f.counter, file=f.file)
print(u"<div id='log_%d' style='display: none'><pre class='log'>%s</pre></div>" % (f.counter, unicodify(escape("\n".join(test.capturedLogging), quote=True))), file=f.file)
if error:
print(u"<div><span class='label'>Exception:</span> <a href=\"javascript:toggle('exception_%d')\">...</a></div>" % f.counter, file=f.file)
print(u"<div id='exception_%d' style='display: none'><pre class='exception'>%s</pre></div>" % (f.counter, unicodify(escape(error, quote=True))), file=f.file)
print(u"</div>", file=f.file)
f.file.flush()
def addSkip(self, test):
"""
Test was skipped
"""
self.print_test('skipped', test)
def addSuccess(self, test):
"""
Test was successful
"""
self.print_test('success', test)
def addFailure(self, test, err):
"""
Test failed
"""
err_type, err_value, err_traceback = err
if not isinstance(err_value, Exception):
err_value = Exception(err_value)
self.print_test('failure', test, '\n'.join(traceback.format_exception(err_type, err_value, err_traceback)))
def addError(self, test, err):
"""
Test errored.
"""
err_type, err_value, err_traceback = err
if not isinstance(err_value, Exception):
err_value = Exception(err_value)
self.print_test('error', test, '\n'.join(traceback.format_exception(err_type, err_value, err_traceback)))
def addDeprecated(self, test):
"""
Test is deprecated
"""
self.print_test('deprecated', test)
HTML_START = u"""
<html>
<head>
<style>
body
{
font: 12px verdana, "Bitstream Vera Sans", geneva, arial, helvetica, helve, sans-serif;
line-height: 160%;
}
div.test
{
margin: 5px;
padding: 5px;
border: solid black 1px;
background: lightgray;
}
div.success
{
background: #CCFFCC;
border: solid #66AA66 1px;
}
div.error, div.failure
{
background: #FFCCCC;
border: solid #AA6666 1px;
}
span.label
{
font-weight: bold;
}
pre
{
background: white;
padding: 5px;
border: solid black 1px;
display: block;
overflow: auto;
}
</style>
<script>
function toggle(name){
var elem = document.getElementById(name)
if (elem) {
if (elem.style.display=="none"){
elem.style.display="block"
} else {
elem.style.display="none"
}
}
}
</script>
</head>
<body>
"""
HTML_END = u"""
</body>
</html>
""" | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_om-et.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"WD",
"WB"
],
"DAY": [
"Dilbata",
"Wiixata",
"Qibxata",
"Roobii",
"Kamiisa",
"Jimaata",
"Sanbata"
],
"MONTH": [
"Amajjii",
"Guraandhala",
"Bitooteessa",
"Elba",
"Caamsa",
"Waxabajjii",
"Adooleessa",
"Hagayya",
"Fuulbana",
"Onkololeessa",
"Sadaasa",
"Muddee"
],
"SHORTDAY": [
"Dil",
"Wix",
"Qib",
"Rob",
"Kam",
"Jim",
"San"
],
"SHORTMONTH": [
"Ama",
"Gur",
"Bit",
"Elb",
"Cam",
"Wax",
"Ado",
"Hag",
"Ful",
"Onk",
"Sad",
"Mud"
],
"fullDate": "EEEE, MMMM d, y",
"longDate": "dd MMMM y",
"medium": "dd-MMM-y h:mm:ss a",
"mediumDate": "dd-MMM-y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/yy h:mm a",
"shortDate": "dd/MM/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "Birr",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "om-et",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Abstract_Exchange-0.0.2.tar.gz/Abstract_Exchange-0.0.2/Abstract_Exchange/CustomExchange.py | from .functions import convert_baseAmount_to_quoteAmount
from ccxt.base.exchange import Exchange
class CustomExchange(Exchange):
def __init__(self, config={}):
super().__init__(config=config)
self.tickers = dict()
self.currencies = dict()
self.apiName = None
def init(self, apiName):
self.apiName = apiName
def load_tickers(self):
self.tickers = super().fetch_tickers()
return self.tickers
def load_currencies(self): # it is not needed for now
self.currencies = super().fetch_currencies()
return self.currencies
def get_ask(self, symbol):
try:
return float(self.tickers[symbol]['ask'])
except:
return None
def get_bid(self, symbol):
try:
return float(self.tickers[symbol]['bid'])
except:
return None
def get_lastprice(self, symbol):
try:
return float(self.tickers[symbol]['last'])
except:
return None
def get_fee(self, code):
""" releated to child """
try:
return float(self.currencies[code]['fee'])
except:
return None
def check_withdrawal(self, code):
""" releated to child """
return self.currencies[code]['payout']
def check_deposit(self, code):
""" releated to child """
return self.currncies[code]['payin']
def convert_currency(self, active, passive):
quotes = {'DOGE', 'USDT', 'UST', 'USDC', 'TUSD',
'BTC', 'KCS', 'PAX', 'TRX', 'DAI', 'ETH'}
active = active.split(' ')
amount = float(active[0])
active_code = active[1].upper()
passive_code = passive.upper()
if active_code in quotes:
try:
price = self.fetch_custom_price(f'{passive_code}/{active_code}')
return float(amount / price)
except:
price = self.fetch_custom_price(f'{active_code}/{passive_code}')
return float(amount * price)
elif passive_code in quotes:
price = self.fetch_custom_price(f'{active_code}/{passive_code}')
return float(amount * price)
def fetch_custom_total_balance(self, currency):
return super().fetch_total_balance()[currency]
def fetch_custom_free_balance(self, currency):
return super().fetch_free_balance()[currency]
def fetch_custom_price(self, symbol):
return super().fetch_ticker(symbol)['last']
def fetch_custom_ask(self, symbol):
return super().fetch_ticker(symbol)['ask']
def fetch_custom_bid(self, symbol):
return super().fetch_ticker(symbol)['bid']
def is_order_successfull(self, orderId):
trades = super().fetch_my_trades()
for trade in trades:
if orderId == trade['info']['orderId']:
return True
return False
def fetch_BaseMinSize(self, symbol):
baseMinSize = self.fetch_market(symbol)['limits']['amount']['min']
return baseMinSize
def fetch_BaseMinSizeViaQuote(self, symbol):
baseMinSize = self.fetch_BaseMinSize(symbol)
quotePrice = self.fetch_custom_price(symbol)
return convert_baseAmount_to_quoteAmount(baseMinSize, quotePrice)
def fetch_market(self, symbol):
for i in super().fetch_markets():
if i['symbol'] == symbol:
return i
# if __name__ == '__main__':
# a = CustomExchange()
# a.apiName = "baby"
# print(a.__str__) | PypiClean |
/AnyBlok-2.1.0.tar.gz/AnyBlok-2.1.0/anyblok/bloks/anyblok_core/system/field.py | from anyblok import Declarations
from anyblok.column import String
from anyblok.schema import ForeignKeyConstraint
register = Declarations.register
System = Declarations.Model.System
Mixin = Declarations.Mixin
@register(System) # noqa
class Field:
name = String(primary_key=True)
code = String(nullable=True)
model = String(primary_key=True)
# FIXME, foreign_key=System.Model.use('name'))
label = String()
ftype = String(label="Type", nullable=True)
entity_type = String(nullable=True)
@classmethod
def define_table_args(cls):
table_args = super(Field, cls).define_table_args()
if cls.__registry_name__ != System.Field.__registry_name__:
F = cls.anyblok.System.Field
return table_args + (
ForeignKeyConstraint(
[cls.name, cls.model], [F.name, F.model], ondelete="CASCADE"
),
)
return table_args
@classmethod
def define_mapper_args(cls):
mapper_args = super(Field, cls).define_mapper_args()
if cls.__registry_name__ == System.Field.__registry_name__:
mapper_args.update(
{
"polymorphic_identity": cls.__registry_name__,
"polymorphic_on": cls.entity_type,
}
)
else:
mapper_args.update(
{
"polymorphic_identity": cls.__registry_name__,
}
)
return mapper_args
@classmethod
def get_cname(self, field, cname):
return cname
def _description(self):
res = {
"id": self.name,
"label": self.label,
"type": self.ftype,
"nullable": True,
"primary_key": False,
"model": None,
}
c = self.anyblok.loaded_namespaces_first_step[self.model][self.name]
c.update_description(self.anyblok, self.model, res)
return res
@classmethod
def add_field(cls, rname, label, model, table, ftype):
"""Insert a field definition
:param rname: name of the field
:param label: label of the field
:param model: namespace of the model
:param table: name of the table of the model
:param ftype: type of the AnyBlok Field
"""
cls.insert(
code=table + "." + rname,
model=model,
name=rname,
label=label,
ftype=ftype,
)
@classmethod
def alter_field(cls, field, label, ftype):
"""Update an existing field
:param field: instance of the Field model to update
:param label: label of the field
:param ftype: type of the AnyBlok Field
"""
field.update(label=label, ftype=ftype) | PypiClean |
/GeneralMarshall-1.0.1.tar.gz/GeneralMarshall-1.0.1/general_marshall/marshall.py | import logging
import os
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
class XML(object):
"""
An XML file container.
The XML structure is hidden from the user so that element content can be
set as if it was a flat field. The hierarchy is stored in the class'
fields. Missing fields are created when looked up.
XML is designed as an abstract class. The fields necessary to use it should
be set in subclasses.
Fields:
_namespace: The namespace the XML document should work in
_root_name: The XML document's root tag
_unique_tags: Unique tags that can occur in the document
_unique_tag_attributes: Possible unique attributes that can be placed
on tags with the tag that should contain them.
Organized thusly:
{attribute_identifier:
(tag, xml_attribute_name)}
_tag_hierarchy: A tag hierarchy organized as a dictionary. This is used
to place tags when they are generated. It is also used
to dynamically generate parent elements that may not
exist when creating child elements. The tag_identifier
and xml_tagname *can* be different, but they do not
have to. This is to allow client code to use different
identifiers in the Python code than what is shown in
the marshalled XML.
Organized thusly:
{tag_identifier: (parent, xml_tagname)}
Methods:
__init__: Either read a given XML file or create a root XML tag and
store it internally
__str__: Use etree's tostring() method to convert the object's internal
data into a prettily printed standalone XML file with XML
declaration and character encoding
__repr__: Return initialization string
__getattr__: Overridden to force interaction with XML tree
__setattr__: Overridden to force interaction with XML tree
_get_or_create_tag: Return a tag, creating it if needed.
export: Export XML query as file
"""
_namespace = None
_root_name = None
_unique_tags = None
_unique_tag_attributes = []
_tag_hierarchy = None
def __init__(self, xml=""):
"""Parse input XML file. If none is provided, make XML structure."""
if xml:
self.source_file = xml
self.tree = etree.parse(self.source_file)
self.root = self.tree.getroot()
else:
self.source_file = ""
self.root = etree.Element(self._root_name)
self.tree = etree.ElementTree(self.root)
self.ns = "{" + self._namespace + "}"
def __str__(self):
"""
Return marshalled representation of object.
The query string uses single quotation marks in the first line,
otherwise some parsers reject it.
The prettyprinting is programmed here manually because the builtin
``xml`` library can't prettyprint, and the ``lxml`` library is not
always present.
"""
ugly_xml = etree.tostring(self.root, encoding="UTF-8")
# Split string on adjacent tags without string in between
stringlines = ">\n<".join(ugly_xml.split("><")).split("\n")
indent = 0
string = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' +
stringlines[0] + "\n")
for number, line in enumerate(stringlines[1:]):
# Line starts with closing tag
if line.startswith("</"):
indent -= 2
# Line opens and closes tag
elif ("</" in stringlines[number] and not
stringlines[number].strip().startswith("</")):
pass
# Line and previous line were opening tags
elif not stringlines[number].strip().startswith("</"):
indent += 2
string += " " * indent + line.strip() + "\n"
return string
def __repr__(self):
return 'XML("{}")'.format(self.source_file)
def __getattr__(self, key):
"""
Look up requested key in hierarchy, creating tags if necessary.
If tag has text, return the text. Otherwise return the tag itself.
Getters are used here in order to ensure that the simple fields exposed
to the user are synchronized with the internally stored XML elements
"""
_attribute_error_string = ("{} instance has no attribute "
"'{}'".format(self.__class__.__name__, key))
# If key is in field hierarchy, get it or create it
if key in self._tag_hierarchy:
tag = self._get_or_create_tag(key)
if tag.text:
return tag.text
else:
return tag
# If key is attribute, get attribute value
if key in self._unique_tag_attributes:
logging.debug("Key {} is a unique tag attribute.".format(key))
tag_name = self._unique_tag_attributes[key][0]
parent_tag = self._get_or_create_tag(tag_name)
logging.debug("{} is a tag attribute that belongs to tag {}"
".".format(key, parent_tag))
attribute = parent_tag.get(self._unique_tag_attributes[key][1])
return attribute
raise AttributeError(_attribute_error_string)
def __setattr__(self, name, value):
"""
Set tag or attribute according to hierarchy, or set normal field.
If name is a unique tag attribute, the tag's attribute is set to the
given value.
If name is a a unique tag, the tag's text is set to the given value.
@param name: Name of attribute to be set
@type name: String
@param value: Value to be assigned
"""
if name in self._unique_tag_attributes:
tag_name = self._unique_tag_attributes[name][0]
logging.debug("{} is an attribute of {} tag.".format(name,
tag_name))
tag = self._get_or_create_tag(tag_name)
tag.set(self._unique_tag_attributes[name][1], value)
elif name in self._unique_tags:
tag = self._get_or_create_tag(name)
tag.text = value
else:
self.__dict__[name] = value
def _locate_in_hierarchy(self, tag_name):
"""
Given a tag, return its parent and the tag's XML name.
Nonexistent parents are created recursively.
@param tag_name: The tag's code identifier.
@return: parent tag, tag's XML name
@rtype: XML tag, string
"""
# Does element exist in hierarchy?
try:
parent_name = self._tag_hierarchy[tag_name][0]
except KeyError:
_attribute_error_string = ("{} instance has no attribute "
"'{}'".format(self.__class__.__name__,
tag_name))
raise AttributeError(_attribute_error_string)
# Does parent exist?
try:
logging.debug("Looking for {}'s parent.".format(tag_name))
parent = self.__dict__[parent_name]
# If not, create and retrieve parent
except KeyError:
logging.debug("KeyError. Making parent {}.".format(parent_name))
self.__dict__[parent_name] = self._get_or_create_tag(parent_name)
parent = self.__dict__[parent_name]
# Check if element exists
child_name = self._tag_hierarchy[tag_name][1]
logging.debug("Parent {} exists. {}'s XML name is {}.".
format(parent, tag_name, child_name))
return parent, child_name
def _get_or_create_tag(self, tag_name):
"""
Get or create a tag.
Check if parent exists. If needed, the call method recursively until
all parent elements are created. The requested element is
created, if necessary, and then returned.
@param tag_name: The name of the element
@param type: String
"""
parent, child_name = self._locate_in_hierarchy(tag_name)
elements = parent.getchildren()
element = None
# If children are found
if elements:
logging.debug("{} has children: {}. ".format(parent, elements))
# Check if I can find the element the easy way
element = parent.find(child_name)
if element is not None:
logging.debug("Found tag the easy way. "
"It's {}.".format(element))
return element
# Otherwise search for it with full namespace
else:
element = parent.find("{ns}{element}".
format(ns=self.ns, element=child_name))
logging.debug("Found tag with namespace. "
"It's {}.".format(element))
# If I found the element, return it
if element is not None:
logging.debug("Found tag. It's {}.".format(element))
return element
# Otherwise create it
tag = etree.SubElement(parent, child_name)
return tag
def export(self, path):
"""Write query to XML file."""
if os.path.isfile(path):
overwrite = ""
while overwrite != "y" and overwrite != "n":
prompt = "File already exists. Overwrite? (y/n)\n"
overwrite = raw_input(prompt)
overwrite = overwrite.lower()
if overwrite == "n":
print("Please enter a new file name.")
return
with open(path, "w") as output_file:
output_file.write(str(self)) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/distro.py | import os
import re
import sys
import json
import shlex
import logging
import argparse
import subprocess
_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {
'ol': 'oracle', # Oracle Enterprise Linux
}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current OS distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the OS distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular OS distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current distribution, as a
machine-readable string.
For a number of OS distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
"openbsd" OpenBSD
"netbsd" NetBSD
"freebsd" FreeBSD
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the OS distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current OS distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current OS distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current OS distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current OS distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current OS
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current OS distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current OS distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def uname_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
"""
return _distro.uname_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
def uname_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
"""
return _distro.uname_attr(attribute)
class cached_property(object):
"""A version of @property which caches the value. On access, it calls the
underlying function and sets the value in `__dict__` so future accesses
will not re-call the property.
"""
def __init__(self, f):
self._fname = f.__name__
self._f = f
def __get__(self, obj, owner):
assert obj is not None, 'call {} on an instance'.format(self._fname)
ret = obj.__dict__[self._fname] = self._f(obj)
return ret
class LinuxDistribution(object):
"""
Provides information about a OS distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current OS distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file='',
include_uname=True):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
* ``include_name`` (bool): Controls whether uname command output is
included as a data source. If the uname command is not available in
the program execution path the data source for the uname command will
be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
This controls whether the lsb information will be loaded.
* ``include_uname`` (bool): The result of the ``include_uname``
parameter. This controls whether the uname information will
be loaded.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self.include_lsb = include_lsb
self.include_uname = include_uname
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={self.os_release_file!r}, " \
"distro_release_file={self.distro_release_file!r}, " \
"include_lsb={self.include_lsb!r}, " \
"include_uname={self.include_uname!r}, " \
"_os_release_info={self._os_release_info!r}, " \
"_lsb_release_info={self._lsb_release_info!r}, " \
"_distro_release_info={self._distro_release_info!r}, " \
"_uname_info={self._uname_info!r})".format(
self=self)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the OS distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the OS distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
distro_id = self.uname_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name') \
or self.uname_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name') \
or self.uname_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the OS distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the current distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the current distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the OS distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the OS distribution.
For details, see :func:`distro.codename`.
"""
try:
# Handle os_release specially since distros might purposefully set
# this to empty string to have no codename
return self._os_release_info['codename']
except KeyError:
return self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the OS
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the OS distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the OS
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def uname_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the uname command data source of the OS distribution.
For details, see :func:`distro.uname_info`.
"""
return self._uname_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the OS distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the OS distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the OS distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def uname_attr(self, attribute):
"""
Return a single named information item from the uname command
output data source of the OS distribution.
For details, see :func:`distro.uname_release_attr`.
"""
return self._uname_info.get(attribute, '')
@cached_property
def _os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
else:
# Ignore any tokens that are not variable assignments
pass
if 'version_codename' in props:
# os-release added a version_codename field. Use that in
# preference to anything else Note that some distros purposefully
# do not have code names. They should be setting
# version_codename=""
props['codename'] = props['version_codename']
elif 'ubuntu_codename' in props:
# Same as above but a non-standard field name used on older Ubuntus
props['codename'] = props['ubuntu_codename']
elif 'version' in props:
# If there is no version_codename, parse it from the version
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
return props
@cached_property
def _lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
if not self.include_lsb:
return {}
with open(os.devnull, 'w') as devnull:
try:
cmd = ('lsb_release', '-a')
stdout = subprocess.check_output(cmd, stderr=devnull)
except OSError: # Command not found
return {}
content = stdout.decode(sys.getfilesystemencoding()).splitlines()
return self._parse_lsb_release_content(content)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
@cached_property
def _uname_info(self):
with open(os.devnull, 'w') as devnull:
try:
cmd = ('uname', '-rs')
stdout = subprocess.check_output(cmd, stderr=devnull)
except OSError:
return {}
content = stdout.decode(sys.getfilesystemencoding()).splitlines()
return self._parse_uname_content(content)
@staticmethod
def _parse_uname_content(lines):
props = {}
match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
if match:
name, version = match.groups()
# This is to prevent the Linux kernel version from
# appearing as the 'best' version on otherwise
# identifiable distributions.
if name == 'Linux':
return {}
props['id'] = name.lower()
props['name'] = name
props['release'] = version
return props
@cached_property
def _distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if 'name' in distro_info \
and 'cloudlinux' in distro_info['name'].lower():
distro_info['id'] = 'cloudlinux'
elif match:
distro_info['id'] = match.group(1)
return distro_info
else:
try:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
except OSError:
# This may occur when /etc is not readable but we can't be
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
basenames = ['SuSE-release',
'arch-release',
'base-release',
'centos-release',
'fedora-release',
'gentoo-release',
'mageia-release',
'mandrake-release',
'mandriva-release',
'mandrivalinux-release',
'manjaro-release',
'oracle-release',
'redhat-release',
'sl-release',
'slackware-version']
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
if 'cloudlinux' in distro_info['name'].lower():
distro_info['id'] = 'cloudlinux'
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
try:
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
except (OSError, IOError):
# Ignore not being able to read a specific, seemingly version
# related file.
# See https://github.com/nir0s/distro/issues/162
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="OS distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main() | PypiClean |
/Neveregiveup-2.0.3.tar.gz/Neveregiveup-2.0.3/wargame/abstractgameunit.py | from __future__ import print_function
import random
from abc import ABCMeta, abstractmethod
from gameutils import print_bold, weighted_random_selection
from gameuniterror import GameUnitError
class AbstractGameUnit(metaclass=ABCMeta):
"""Abstract class to represent a game character (or a 'unit')"""
def __init__(self, name=''):
self.max_hp = 0
self.health_meter = 0
self.name = name
self.enemy = None
self.unit_type = None
@abstractmethod
def info(self):
"""Print information about this game unit.
Abstract method. See subclasses for implementation.
"""
pass
def attack(self, enemy):
"""The main logic to 'attack' the enemy unit
Determines injured unit and the amount of injury
.. todo:: Check if enemy exists!
"""
injured_unit = weighted_random_selection(self, enemy)
injury = random.randint(10, 15)
injured_unit.health_meter = max(injured_unit.health_meter - injury, 0)
print("ATTACK! ", end='')
self.show_health(end=' ')
enemy.show_health(end=' ')
def heal(self, heal_by=2, full_healing=True):
"""Heal the unit replenishing its hit points"""
if self.health_meter == self.max_hp:
return
if full_healing:
self.health_meter = self.max_hp
else:
self.health_meter += heal_by
# ------------------------------------------------------------------
# raise a custom exception. Refer to chapter on exception handling
# ------------------------------------------------------------------
if self.health_meter > self.max_hp:
raise GameUnitError("health_meter > max_hp!", 101)
print_bold("You are HEALED!", end=' ')
self.show_health(bold=True)
def reset_health_meter(self):
"""Reset the `health_meter` (assign default hit points)"""
self.health_meter = self.max_hp
def show_health(self, bold=False, end='\n'):
"""Print info on the current health reading of this game unit"""
# TODO: what if there is no enemy?
msg = "Health: %s: %d" % (self.name, self.health_meter)
if bold:
print_bold(msg, end=end)
else:
print(msg, end=end) | PypiClean |
/EcoFin-1.3.tar.gz/EcoFin-1.3/[TMP]/4_portfolioTester.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from EcoFin.assetAllocation.performance import Performance
from EcoFin.utils import utils
# -------------------------[Set-up]-------------------------
ticker_list = [line.rstrip('\n') for line in open(r'../Tesi/INDEXs/DJIA.txt')]
maturity_min = 15
base_path = r'../Tesi/Export/BackTest_C'
start_date = 0
# Strategy set-up
direction = 'OPS_[OI]' # Direction driver
force = None#'VIX_[CBOE]' # In None, don't use force driver
polarize = False # True or False: polarize direction component
buy_only = True # Set a buy only strategy that ignore negative signals
# Portfolio set-up
w_limit = 5 # Rank best N ticker based on strategy
leverage = None # Strategy leverage (1 is no leverage, None is auto-compensation)
# ----------------------------------------------------------
base = ['SpotPrice']
data = {b: {} for b in base + [direction, force]}
if None in data.keys():
del data[None]
for tick in tqdm(ticker_list, desc='Importing data'):
try:
# Import data and clean-up
source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')
source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]
source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)
for driver in data.keys():
data[driver][tick] = source[driver]
except:
pass
# Merge (concatenate) data and create dataframes
for driver in data.keys():
data[driver] = pd.concat(data[driver], axis=1)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌[Normalize direction data]❌❌❌❌❌❌❌❌❌❌❌
if driver == direction:
data[driver] = data[driver].sub(data[driver].mean(axis=1), axis=0)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌
# Generate strategy signals
# ⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕[SET-UP]⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕
if polarize:
limit = 0 if buy_only else -1
data[direction] = utils.polarizeTable(data[direction], under=limit)
if force is None:
force_v = 1
else:
force_v = data[force]
data['signals'] = data[direction] * force_v
# ⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕[SET-UP]⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕⭕
if w_limit is not None:
mask = pd.DataFrame(np.sort(data['signals'].values, axis=1), index=data['signals'].index,
columns=data['signals'].columns).iloc[:, -w_limit]
data['signals'] = (data['signals'] >= pd.DataFrame(
data=np.array([mask, ] * data['signals'].shape[1]).T,
index=data['signals'].index,
columns=data['signals'].columns)).astype(int)
data['signals'][data['signals'] > 0] = 1
if buy_only:
data['signals'][data['signals'] < 0] = 0
# Compute weights
data['weights'] = data['signals'].div(data['signals'].abs().sum(axis=1), axis=0)
# Compute ln-returns of benchmark and strategy
if leverage is None: leverage = data['SpotPrice'].shape[1]
data['lnReturns'] = np.log(data['SpotPrice'].shift(-1) / data['SpotPrice'])
data['strategy'] = data['lnReturns'] * data['weights'] * leverage
# Compute performance metrics
performance = Performance(data['lnReturns'].mean(axis=1), data['strategy'].mean(axis=1), r=0.019)
performance.printPerformanceSummary()
# =====================================================================================
# FROM HERE NO DATA MANIPULATION
# =====================================================================================
# Create plot framework
fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)
fig.suptitle('Strategy tester', fontsize=16)
# Plot strategy return vs. benchmark (data)
axs[0].set_title('data returns')
axs[0].plot(data['lnReturns'].mean(axis=1).cumsum(), label='Benchmark')
axs[0].plot(data['strategy'].mean(axis=1).cumsum(), label='Strategy')
axs[0].set(ylabel='Cumulated ln-returns ($X_t$)')
axs[0].legend()
# Plot number of assets in portfolio
ax2 = axs[0].twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:gray'
ax2.set_ylabel('Assets count', color=color) # we already handled the x-label with ax1
ax2.plot(data['weights'].index, data['weights'].ne(0).sum(axis=1), linewidth=.5, color=color)
ax2.tick_params(axis='y', labelcolor=color)
# Plot evolution of weights
positive = data['weights'][data['weights'] >= 0].fillna(0)
negative = data['weights'][data['weights'] < 0].fillna(0)
axs[1].set_title('Weights evolution')
axs[1].stackplot(data['weights'].index, positive.T)
axs[1].stackplot(data['weights'].index, negative.T)
axs[1].plot(data['weights'].sum(axis=1), linewidth=1, linestyle="dotted",
color='black', alpha=.6, label='Avg. ($\mu$)')
axs[1].set(xlabel=r'days ($t$)', ylabel=r'data weights')
axs[1].legend()
with pd.ExcelWriter('{}/portfolio.xlsx'.format(base_path)) as writer:
data['lnReturns'].to_excel(writer, sheet_name='lnReturns', index=True)
data['signals'].to_excel(writer, sheet_name='Signals', index=True)
data['weights'].to_excel(writer, sheet_name='Weights', index=True)
data['strategy'].to_excel(writer, sheet_name='Strategy', index=True)
plt.show() | PypiClean |
/AGouTI-1.0.3.tar.gz/AGouTI-1.0.3/agouti_pkg/gffutils/attributes.py | import agouti_pkg.six
import collections
from agouti_pkg.gffutils import constants
# collections.MutableMapping is apparently the best way to provide dict-like
# interface (http://stackoverflow.com/a/3387975)
try:
class Attributes(collections.MutableMapping):
def __init__(self, *args, **kwargs):
"""
An Attributes object acts much like a dictionary. However, values are
always stored internally as lists, even if a single value is provided.
Whether or not you get a list back depends on the
`constants.always_return_list` setting, which can be set on-the-fly.
If True, then one-item lists are returned. This is best shown with an
example:
Set up an Attributes object:
>>> attr = Attributes()
Set the "Name" attribute with a string:
>>> attr['Name'] = 'gene1'
This is stored internally as a list, and by default, we'll get a list
back:
>>> assert attr['Name'] == ['gene1']
The same thing happens if we set it with a list in the first place:
>>> attr['Name'] = ['gene1']
>>> assert attr['Name'] == ['gene1']
Now, change the setting so that upon access, single-value lists are
returned as the first item.
>>> constants.always_return_list = False
>>> assert attr['Name'] == 'gene1'
Change it back again:
>>> constants.always_return_list = True
>>> assert attr['Name'] == ['gene1']
"""
self._d = dict()
self.update(*args, **kwargs)
def __setitem__(self, k, v):
if not isinstance(v, (list, tuple)):
v = [v]
self._d[k] = v
def __getitem__(self, k):
v = self._d[k]
if constants.always_return_list:
return v
if isinstance(v, list) and len(v) == 1:
v = v[0]
return v
def __delitem__(self, key):
del self._d[key]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._d)
def keys(self):
return self._d.keys()
def values(self):
return [self.__getitem__(k) for k in self.keys()]
def items(self):
r = []
for k in self.keys():
r.append((k, self.__getitem__(k)))
return r
def __str__(self):
s = []
for i in self.items():
s.append("%s: %s" % i)
return '\n'.join(s)
def update(self, *args, **kwargs):
for k, v in agouti_pkg.six.iteritems(dict(*args, **kwargs)):
self[k] = v
except AttributeError:
class Attributes(collections.abc.MutableMapping):
def __init__(self, *args, **kwargs):
"""
An Attributes object acts much like a dictionary. However, values are
always stored internally as lists, even if a single value is provided.
Whether or not you get a list back depends on the
`constants.always_return_list` setting, which can be set on-the-fly.
If True, then one-item lists are returned. This is best shown with an
example:
Set up an Attributes object:
>>> attr = Attributes()
Set the "Name" attribute with a string:
>>> attr['Name'] = 'gene1'
This is stored internally as a list, and by default, we'll get a list
back:
>>> assert attr['Name'] == ['gene1']
The same thing happens if we set it with a list in the first place:
>>> attr['Name'] = ['gene1']
>>> assert attr['Name'] == ['gene1']
Now, change the setting so that upon access, single-value lists are
returned as the first item.
>>> constants.always_return_list = False
>>> assert attr['Name'] == 'gene1'
Change it back again:
>>> constants.always_return_list = True
>>> assert attr['Name'] == ['gene1']
"""
self._d = dict()
self.update(*args, **kwargs)
def __setitem__(self, k, v):
if not isinstance(v, (list, tuple)):
v = [v]
self._d[k] = v
def __getitem__(self, k):
v = self._d[k]
if constants.always_return_list:
return v
if isinstance(v, list) and len(v) == 1:
v = v[0]
return v
def __delitem__(self, key):
del self._d[key]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._d)
def keys(self):
return self._d.keys()
def values(self):
return [self.__getitem__(k) for k in self.keys()]
def items(self):
r = []
for k in self.keys():
r.append((k, self.__getitem__(k)))
return r
def __str__(self):
s = []
for i in self.items():
s.append("%s: %s" % i)
return '\n'.join(s)
def update(self, *args, **kwargs):
for k, v in agouti_pkg.six.iteritems(dict(*args, **kwargs)):
self[k] = v
# Useful for profiling: which dictionary-like class to store attributes in.
# This is used in Feature below and in parser.py
dict_class = Attributes
#dict_class = dict
#dict_class = helper_classes.DefaultOrderedDict
#dict_class = collections.defaultdict
#dict_class = collections.OrderedDict
#dict_class = helper_classes.DefaultListOrderedDict | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/utils/misc.py | from torch.autograd import Function
from torch.nn import functional as F
class SigmoidGeometricMean(Function):
"""Forward and backward function of geometric mean of two sigmoid
functions.
This implementation with analytical gradient function substitutes
the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
original implementation incurs none during gradient backprapagation
if both x and y are very small values.
"""
@staticmethod
def forward(ctx, x, y):
x_sigmoid = x.sigmoid()
y_sigmoid = y.sigmoid()
z = (x_sigmoid * y_sigmoid).sqrt()
ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
return z
@staticmethod
def backward(ctx, grad_output):
x_sigmoid, y_sigmoid, z = ctx.saved_tensors
grad_x = grad_output * z * (1 - x_sigmoid) / 2
grad_y = grad_output * z * (1 - y_sigmoid) / 2
return grad_x, grad_y
sigmoid_geometric_mean = SigmoidGeometricMean.apply
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners) | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/actions/block_storage/attach_used_volumes_via_compute.py |
import copy
import logging
from cinderclient import exceptions as cinder_exceptions
from novaclient import exceptions as nova_exceptions
from cloudferry.lib.base import exception
from cloudferry.lib.base.action import action
from cloudferry.lib.migration import notifiers
from cloudferry.lib.migration import objects
from cloudferry.lib.utils import proxy_client
from cloudferry.lib.utils import utils
LOG = logging.getLogger(__name__)
class AttachVolumesCompute(action.Action):
def __init__(self, init, cloud=None):
super(AttachVolumesCompute, self).__init__(init, cloud)
self.state_notifier = notifiers.MigrationStateNotifier()
for observer in self.init['migration_observers']:
self.state_notifier.add_observer(observer)
def _try_get_source_instance(self, instance_dict):
default_retval = instance_dict['instance']
source_instance_id = instance_dict.get('old_id')
if source_instance_id is None:
LOG.debug("No source instance ID specified for instance '%s'",
instance_dict)
return default_retval
try:
src_cpu = self.src_cloud.resources[utils.COMPUTE_RESOURCE]
return src_cpu.nova_client.servers.get(source_instance_id)
except nova_exceptions.NotFound:
LOG.debug("Failed to find instance '%s' in source cloud",
source_instance_id)
return default_retval
def run(self, info, **kwargs):
info = copy.deepcopy(info)
compute_res = self.cloud.resources[utils.COMPUTE_RESOURCE]
storage_res = self.cloud.resources[utils.STORAGE_RESOURCE]
for instance in info[utils.INSTANCES_TYPE].itervalues():
if not instance[utils.META_INFO].get(utils.VOLUME_BODY):
continue
for vol in instance[utils.META_INFO][utils.VOLUME_BODY]:
volume = vol['volume']
volume_id = volume['id']
status = None
with proxy_client.expect_exception(cinder_exceptions.NotFound):
try:
status = storage_res.get_status(volume_id)
except cinder_exceptions.NotFound:
dst_volume = storage_res.get_migrated_volume(volume_id)
if dst_volume is not None:
volume_id = dst_volume.id
status = dst_volume.status
inst = instance['instance']
source_instance = self._try_get_source_instance(instance)
if status is None:
msg = ("Cannot attach volume '{vol}' to VM '{vm}': volume "
"does not exist").format(vol=volume_id,
vm=inst['name'])
self.state_notifier.incomplete(
objects.MigrationObjectType.VM, source_instance, msg)
continue
if status == 'available':
nova_client = compute_res.nova_client
try:
nova_client.volumes.create_server_volume(
inst['id'], volume_id, volume['device'])
timeout = self.cfg.migrate.storage_backend_timeout
storage_res.wait_for_status(volume_id,
storage_res.get_status,
'in-use',
timeout=timeout)
except (cinder_exceptions.ClientException,
nova_exceptions.ClientException,
exception.TimeoutException) as e:
msg = ("Failed to attach volume '%s' to instance "
"'%s': %s. Skipping" %
(volume_id, inst['id'], e.message))
LOG.warning(msg)
self.state_notifier.incomplete(
objects.MigrationObjectType.VM,
source_instance,
msg)
else:
msg = ("Cannot attach volume '%s' to instance '%s' since "
"it's status is '%s'" % (volume_id, inst['id'],
status))
LOG.warning(msg)
self.state_notifier.incomplete(
objects.MigrationObjectType.VM, source_instance, msg)
return {} | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/dense_heads/mask2former_head.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init
from mmcv.cnn.bricks.transformer import (build_positional_encoding,
build_transformer_layer_sequence)
from mmcv.ops import point_sample
from mmcv.runner import ModuleList
from mmdet.core import build_assigner, build_sampler, reduce_mean
from mmdet.models.utils import get_uncertain_point_coords_with_randomness
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
from .maskformer_head import MaskFormerHead
@HEADS.register_module()
class Mask2FormerHead(MaskFormerHead):
"""Implements the Mask2Former head.
See `Masked-attention Mask Transformer for Universal Image
Segmentation <https://arxiv.org/pdf/2112.01527>`_ for details.
Args:
in_channels (list[int]): Number of channels in the input feature map.
feat_channels (int): Number of channels for features.
out_channels (int): Number of channels for output.
num_things_classes (int): Number of things.
num_stuff_classes (int): Number of stuff.
num_queries (int): Number of query in Transformer decoder.
pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel
decoder. Defaults to None.
enforce_decoder_input_project (bool, optional): Whether to add
a layer to change the embed_dim of tranformer encoder in
pixel decoder to the embed_dim of transformer decoder.
Defaults to False.
transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for
transformer decoder. Defaults to None.
positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for
transformer decoder position encoding. Defaults to None.
loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification
loss. Defaults to None.
loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss.
Defaults to None.
loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss.
Defaults to None.
train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of
Mask2Former head.
test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of
Mask2Former head.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels,
feat_channels,
out_channels,
num_things_classes=80,
num_stuff_classes=53,
num_queries=100,
num_transformer_feat_level=3,
pixel_decoder=None,
enforce_decoder_input_project=False,
transformer_decoder=None,
positional_encoding=None,
loss_cls=None,
loss_mask=None,
loss_dice=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
**kwargs):
super(AnchorFreeHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = self.num_things_classes + self.num_stuff_classes
self.num_queries = num_queries
self.num_transformer_feat_level = num_transformer_feat_level
self.num_heads = transformer_decoder.transformerlayers.\
attn_cfgs.num_heads
self.num_transformer_decoder_layers = transformer_decoder.num_layers
assert pixel_decoder.encoder.transformerlayers.\
attn_cfgs.num_levels == num_transformer_feat_level
pixel_decoder_ = copy.deepcopy(pixel_decoder)
pixel_decoder_.update(
in_channels=in_channels,
feat_channels=feat_channels,
out_channels=out_channels)
self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1]
self.transformer_decoder = build_transformer_layer_sequence(
transformer_decoder)
self.decoder_embed_dims = self.transformer_decoder.embed_dims
self.decoder_input_projs = ModuleList()
# from low resolution to high resolution
for _ in range(num_transformer_feat_level):
if (self.decoder_embed_dims != feat_channels
or enforce_decoder_input_project):
self.decoder_input_projs.append(
Conv2d(
feat_channels, self.decoder_embed_dims, kernel_size=1))
else:
self.decoder_input_projs.append(nn.Identity())
self.decoder_positional_encoding = build_positional_encoding(
positional_encoding)
self.query_embed = nn.Embedding(self.num_queries, feat_channels)
self.query_feat = nn.Embedding(self.num_queries, feat_channels)
# from low resolution to high resolution
self.level_embed = nn.Embedding(self.num_transformer_feat_level,
feat_channels)
self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)
self.mask_embed = nn.Sequential(
nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),
nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),
nn.Linear(feat_channels, out_channels))
self.test_cfg = test_cfg
self.train_cfg = train_cfg
if train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
self.sampler = build_sampler(self.train_cfg.sampler, context=self)
self.num_points = self.train_cfg.get('num_points', 12544)
self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0)
self.importance_sample_ratio = self.train_cfg.get(
'importance_sample_ratio', 0.75)
self.class_weight = loss_cls.class_weight
self.loss_cls = build_loss(loss_cls)
self.loss_mask = build_loss(loss_mask)
self.loss_dice = build_loss(loss_dice)
def init_weights(self):
for m in self.decoder_input_projs:
if isinstance(m, Conv2d):
caffe2_xavier_init(m, bias=0)
self.pixel_decoder.init_weights()
for p in self.transformer_decoder.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
img_metas):
"""Compute classification and mask targets for one image.
Args:
cls_score (Tensor): Mask score logits from a single decoder layer
for one image. Shape (num_queries, cls_out_channels).
mask_pred (Tensor): Mask logits for a single decoder layer for one
image. Shape (num_queries, h, w).
gt_labels (Tensor): Ground truth class indices for one image with
shape (num_gts, ).
gt_masks (Tensor): Ground truth mask for each image, each with
shape (num_gts, h, w).
img_metas (dict): Image informtation.
Returns:
tuple[Tensor]: A tuple containing the following for one image.
- labels (Tensor): Labels of each image. \
shape (num_queries, ).
- label_weights (Tensor): Label weights of each image. \
shape (num_queries, ).
- mask_targets (Tensor): Mask targets of each image. \
shape (num_queries, h, w).
- mask_weights (Tensor): Mask weights of each image. \
shape (num_queries, ).
- pos_inds (Tensor): Sampled positive indices for each \
image.
- neg_inds (Tensor): Sampled negative indices for each \
image.
"""
# sample points
num_queries = cls_score.shape[0]
num_gts = gt_labels.shape[0]
point_coords = torch.rand((1, self.num_points, 2),
device=cls_score.device)
# shape (num_queries, num_points)
mask_points_pred = point_sample(
mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1,
1)).squeeze(1)
# shape (num_gts, num_points)
gt_points_masks = point_sample(
gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1,
1)).squeeze(1)
# assign and sample
assign_result = self.assigner.assign(cls_score, mask_points_pred,
gt_labels, gt_points_masks,
img_metas)
sampling_result = self.sampler.sample(assign_result, mask_pred,
gt_masks)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
# label target
labels = gt_labels.new_full((self.num_queries, ),
self.num_classes,
dtype=torch.long)
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = gt_labels.new_ones((self.num_queries, ))
# mask target
mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]
mask_weights = mask_pred.new_zeros((self.num_queries, ))
mask_weights[pos_inds] = 1.0
return (labels, label_weights, mask_targets, mask_weights, pos_inds,
neg_inds)
def loss_single(self, cls_scores, mask_preds, gt_labels_list,
gt_masks_list, img_metas):
"""Loss function for outputs from a single decoder layer.
Args:
cls_scores (Tensor): Mask score logits from a single decoder layer
for all images. Shape (batch_size, num_queries,
cls_out_channels). Note `cls_out_channels` should includes
background.
mask_preds (Tensor): Mask logits for a pixel decoder for all
images. Shape (batch_size, num_queries, h, w).
gt_labels_list (list[Tensor]): Ground truth class indices for each
image, each with shape (num_gts, ).
gt_masks_list (list[Tensor]): Ground truth mask for each image,
each with shape (num_gts, h, w).
img_metas (list[dict]): List of image meta information.
Returns:
tuple[Tensor]: Loss components for outputs from a single \
decoder layer.
"""
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
mask_preds_list = [mask_preds[i] for i in range(num_imgs)]
(labels_list, label_weights_list, mask_targets_list, mask_weights_list,
num_total_pos,
num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list,
gt_labels_list, gt_masks_list,
img_metas)
# shape (batch_size, num_queries)
labels = torch.stack(labels_list, dim=0)
# shape (batch_size, num_queries)
label_weights = torch.stack(label_weights_list, dim=0)
# shape (num_total_gts, h, w)
mask_targets = torch.cat(mask_targets_list, dim=0)
# shape (batch_size, num_queries)
mask_weights = torch.stack(mask_weights_list, dim=0)
# classfication loss
# shape (batch_size * num_queries, )
cls_scores = cls_scores.flatten(0, 1)
labels = labels.flatten(0, 1)
label_weights = label_weights.flatten(0, 1)
class_weight = cls_scores.new_tensor(self.class_weight)
loss_cls = self.loss_cls(
cls_scores,
labels,
label_weights,
avg_factor=class_weight[labels].sum())
num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos]))
num_total_masks = max(num_total_masks, 1)
# extract positive ones
# shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)
mask_preds = mask_preds[mask_weights > 0]
if mask_targets.shape[0] == 0:
# zero match
loss_dice = mask_preds.sum()
loss_mask = mask_preds.sum()
return loss_cls, loss_mask, loss_dice
with torch.no_grad():
points_coords = get_uncertain_point_coords_with_randomness(
mask_preds.unsqueeze(1), None, self.num_points,
self.oversample_ratio, self.importance_sample_ratio)
# shape (num_total_gts, h, w) -> (num_total_gts, num_points)
mask_point_targets = point_sample(
mask_targets.unsqueeze(1).float(), points_coords).squeeze(1)
# shape (num_queries, h, w) -> (num_queries, num_points)
mask_point_preds = point_sample(
mask_preds.unsqueeze(1), points_coords).squeeze(1)
# dice loss
loss_dice = self.loss_dice(
mask_point_preds, mask_point_targets, avg_factor=num_total_masks)
# mask loss
# shape (num_queries, num_points) -> (num_queries * num_points, )
mask_point_preds = mask_point_preds.reshape(-1)
# shape (num_total_gts, num_points) -> (num_total_gts * num_points, )
mask_point_targets = mask_point_targets.reshape(-1)
loss_mask = self.loss_mask(
mask_point_preds,
mask_point_targets,
avg_factor=num_total_masks * self.num_points)
return loss_cls, loss_mask, loss_dice
def forward_head(self, decoder_out, mask_feature, attn_mask_target_size):
"""Forward for head part which is called after every decoder layer.
Args:
decoder_out (Tensor): in shape (num_queries, batch_size, c).
mask_feature (Tensor): in shape (batch_size, c, h, w).
attn_mask_target_size (tuple[int, int]): target attention
mask size.
Returns:
tuple: A tuple contain three elements.
- cls_pred (Tensor): Classification scores in shape \
(batch_size, num_queries, cls_out_channels). \
Note `cls_out_channels` should includes background.
- mask_pred (Tensor): Mask scores in shape \
(batch_size, num_queries,h, w).
- attn_mask (Tensor): Attention mask in shape \
(batch_size * num_heads, num_queries, h, w).
"""
decoder_out = self.transformer_decoder.post_norm(decoder_out)
decoder_out = decoder_out.transpose(0, 1)
# shape (batch_size, num_queries, c)
cls_pred = self.cls_embed(decoder_out)
# shape (batch_size, num_queries, c)
mask_embed = self.mask_embed(decoder_out)
# shape (batch_size, num_queries, h, w)
mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature)
attn_mask = F.interpolate(
mask_pred,
attn_mask_target_size,
mode='bilinear',
align_corners=False)
# shape (batch_size, num_queries, h, w) ->
# (batch_size * num_head, num_queries, h*w)
attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat(
(1, self.num_heads, 1, 1)).flatten(0, 1)
attn_mask = attn_mask.sigmoid() < 0.5
attn_mask = attn_mask.detach()
return cls_pred, mask_pred, attn_mask
def forward(self, feats, img_metas):
"""Forward function.
Args:
feats (list[Tensor]): Multi scale Features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
Returns:
tuple: A tuple contains two elements.
- cls_pred_list (list[Tensor)]: Classification logits \
for each decoder layer. Each is a 3D-tensor with shape \
(batch_size, num_queries, cls_out_channels). \
Note `cls_out_channels` should includes background.
- mask_pred_list (list[Tensor]): Mask logits for each \
decoder layer. Each with shape (batch_size, num_queries, \
h, w).
"""
batch_size = len(img_metas)
mask_features, multi_scale_memorys = self.pixel_decoder(feats)
# multi_scale_memorys (from low resolution to high resolution)
decoder_inputs = []
decoder_positional_encodings = []
for i in range(self.num_transformer_feat_level):
decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])
# shape (batch_size, c, h, w) -> (h*w, batch_size, c)
decoder_input = decoder_input.flatten(2).permute(2, 0, 1)
level_embed = self.level_embed.weight[i].view(1, 1, -1)
decoder_input = decoder_input + level_embed
# shape (batch_size, c, h, w) -> (h*w, batch_size, c)
mask = decoder_input.new_zeros(
(batch_size, ) + multi_scale_memorys[i].shape[-2:],
dtype=torch.bool)
decoder_positional_encoding = self.decoder_positional_encoding(
mask)
decoder_positional_encoding = decoder_positional_encoding.flatten(
2).permute(2, 0, 1)
decoder_inputs.append(decoder_input)
decoder_positional_encodings.append(decoder_positional_encoding)
# shape (num_queries, c) -> (num_queries, batch_size, c)
query_feat = self.query_feat.weight.unsqueeze(1).repeat(
(1, batch_size, 1))
query_embed = self.query_embed.weight.unsqueeze(1).repeat(
(1, batch_size, 1))
cls_pred_list = []
mask_pred_list = []
cls_pred, mask_pred, attn_mask = self.forward_head(
query_feat, mask_features, multi_scale_memorys[0].shape[-2:])
cls_pred_list.append(cls_pred)
mask_pred_list.append(mask_pred)
for i in range(self.num_transformer_decoder_layers):
level_idx = i % self.num_transformer_feat_level
# if a mask is all True(all background), then set it all False.
attn_mask[torch.where(
attn_mask.sum(-1) == attn_mask.shape[-1])] = False
# cross_attn + self_attn
layer = self.transformer_decoder.layers[i]
attn_masks = [attn_mask, None]
query_feat = layer(
query=query_feat,
key=decoder_inputs[level_idx],
value=decoder_inputs[level_idx],
query_pos=query_embed,
key_pos=decoder_positional_encodings[level_idx],
attn_masks=attn_masks,
query_key_padding_mask=None,
# here we do not apply masking on padded region
key_padding_mask=None)
cls_pred, mask_pred, attn_mask = self.forward_head(
query_feat, mask_features, multi_scale_memorys[
(i + 1) % self.num_transformer_feat_level].shape[-2:])
cls_pred_list.append(cls_pred)
mask_pred_list.append(mask_pred)
return cls_pred_list, mask_pred_list | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components.js | var components = {
"core": {
"meta": {
"path": "components/prism-core.js",
"option": "mandatory"
},
"core": "Core"
},
"themes": {
"meta": {
"path": "themes/{id}.css",
"link": "index.html?theme={id}",
"exclusive": true
},
"prism": {
"title": "Default",
"option": "default"
},
"prism-dark": "Dark",
"prism-funky": "Funky",
"prism-okaidia": {
"title": "Okaidia",
"owner": "ocodia"
},
"prism-twilight": {
"title": "Twilight",
"owner": "remybach"
},
"prism-coy": {
"title": "Coy",
"owner": "tshedor"
},
"prism-solarizedlight": {
"title": "Solarized Light",
"owner": "hectormatos2011 "
}
},
"languages": {
"meta": {
"path": "components/prism-{id}",
"noCSS": true,
"examplesPath": "examples/prism-{id}",
"addCheckAll": true
},
"markup": {
"title": "Markup",
"aliasTitles": { "html": "HTML", "xml": "XML", "svg": "SVG", "mathml": "MathML" },
"option": "default"
},
"css": {
"title": "CSS",
"option": "default"
},
"clike": {
"title": "C-like",
"option": "default"
},
"javascript": {
"title": "JavaScript",
"option": "default",
"require": "clike"
},
"abap": {
"title": "ABAP",
"owner": "dellagustin"
},
"actionscript": {
"title": "ActionScript",
"require": "javascript",
"owner": "Golmote"
},
"ada": {
"title": "Ada",
"owner": "Lucretia"
},
"apacheconf": {
"title": "Apache Configuration",
"owner": "GuiTeK"
},
"apl": {
"title": "APL",
"owner": "ngn"
},
"applescript": {
"title": "AppleScript",
"owner": "Golmote"
},
"arduino": {
"title": "Arduino",
"require": "cpp",
"owner": "eisbehr-"
},
"asciidoc": {
"title": "AsciiDoc",
"owner": "Golmote"
},
"aspnet": {
"title": "ASP.NET (C#)",
"require": "markup",
"owner": "nauzilus"
},
"autohotkey": {
"title": "AutoHotkey",
"owner": "aviaryan"
},
"autoit": {
"title": "AutoIt",
"owner": "Golmote"
},
"bash": {
"title": "Bash",
"owner": "zeitgeist87"
},
"basic": {
"title": "BASIC",
"owner": "Golmote"
},
"batch": {
"title": "Batch",
"owner": "Golmote"
},
"bison": {
"title": "Bison",
"require": "c",
"owner": "Golmote"
},
"brainfuck": {
"title": "Brainfuck",
"owner": "Golmote"
},
"bro": {
"title": "Bro",
"owner": "wayward710"
},
"c": {
"title": "C",
"require": "clike",
"owner": "zeitgeist87"
},
"csharp": {
"title": "C#",
"require": "clike",
"owner": "mvalipour"
},
"cpp": {
"title": "C++",
"require": "c",
"owner": "zeitgeist87"
},
"coffeescript": {
"title": "CoffeeScript",
"require": "javascript",
"owner": "R-osey"
},
"crystal": {
"title": "Crystal",
"require": "ruby",
"owner": "MakeNowJust"
},
"css-extras": {
"title": "CSS Extras",
"require": "css",
"owner": "milesj"
},
"d": {
"title": "D",
"require": "clike",
"owner": "Golmote"
},
"dart": {
"title": "Dart",
"require": "clike",
"owner": "Golmote"
},
"django": {
"title": "Django/Jinja2",
"require": "markup",
"owner": "romanvm"
},
"diff": {
"title": "Diff",
"owner": "uranusjr"
},
"docker": {
"title": "Docker",
"owner": "JustinBeckwith"
},
"eiffel": {
"title": "Eiffel",
"owner": "Conaclos"
},
"elixir": {
"title": "Elixir",
"owner": "Golmote"
},
"erlang": {
"title": "Erlang",
"owner": "Golmote"
},
"fsharp": {
"title": "F#",
"require": "clike",
"owner": "simonreynolds7"
},
"flow": {
"title": "Flow",
"require": "javascript",
"owner": "Golmote"
},
"fortran": {
"title": "Fortran",
"owner": "Golmote"
},
"gherkin": {
"title": "Gherkin",
"owner": "hason"
},
"git": {
"title": "Git",
"owner": "lgiraudel"
},
"glsl": {
"title": "GLSL",
"require": "clike",
"owner": "Golmote"
},
"go": {
"title": "Go",
"require": "clike",
"owner": "arnehormann"
},
"graphql": {
"title": "GraphQL",
"owner": "Golmote"
},
"groovy": {
"title": "Groovy",
"require": "clike",
"owner": "robfletcher"
},
"haml": {
"title": "Haml",
"require": "ruby",
"owner": "Golmote"
},
"handlebars": {
"title": "Handlebars",
"require": "markup",
"owner": "Golmote"
},
"haskell": {
"title": "Haskell",
"owner": "bholst"
},
"haxe": {
"title": "Haxe",
"require": "clike",
"owner": "Golmote"
},
"http": {
"title": "HTTP",
"owner": "danielgtaylor"
},
"icon": {
"title": "Icon",
"owner": "Golmote"
},
"inform7": {
"title": "Inform 7",
"owner": "Golmote"
},
"ini": {
"title": "Ini",
"owner": "aviaryan"
},
"j": {
"title": "J",
"owner": "Golmote"
},
"java": {
"title": "Java",
"require": "clike",
"owner": "sherblot"
},
"jolie": {
"title": "Jolie",
"require": "clike",
"owner": "thesave"
},
"json": {
"title": "JSON",
"owner": "CupOfTea696"
},
"julia": {
"title": "Julia",
"owner": "cdagnino"
},
"keyman": {
"title": "Keyman",
"owner": "mcdurdin"
},
"kotlin": {
"title": "Kotlin",
"require": "clike",
"owner": "Golmote"
},
"latex": {
"title": "LaTeX",
"owner": "japborst"
},
"less": {
"title": "Less",
"require": "css",
"owner": "Golmote"
},
"livescript": {
"title": "LiveScript",
"owner": "Golmote"
},
"lolcode": {
"title": "LOLCODE",
"owner": "Golmote"
},
"lua": {
"title": "Lua",
"owner": "Golmote"
},
"makefile": {
"title": "Makefile",
"owner": "Golmote"
},
"markdown": {
"title": "Markdown",
"require": "markup",
"owner": "Golmote"
},
"matlab": {
"title": "MATLAB",
"owner": "Golmote"
},
"mel": {
"title": "MEL",
"owner": "Golmote"
},
"mizar": {
"title": "Mizar",
"owner": "Golmote"
},
"monkey": {
"title": "Monkey",
"owner": "Golmote"
},
"n4js":{
"title": "N4JS",
"require": "javascript",
"owner": "bsmith-n4"
},
"nasm": {
"title": "NASM",
"owner": "rbmj"
},
"nginx": {
"title": "nginx",
"owner": "westonganger",
"require": "clike"
},
"nim": {
"title": "Nim",
"owner": "Golmote"
},
"nix": {
"title": "Nix",
"owner": "Golmote"
},
"nsis": {
"title": "NSIS",
"owner": "idleberg"
},
"objectivec": {
"title": "Objective-C",
"require": "c",
"owner": "uranusjr"
},
"ocaml": {
"title": "OCaml",
"owner": "Golmote"
},
"opencl": {
"title": "OpenCL",
"require": "cpp",
"owner": "Milania1"
},
"oz": {
"title": "Oz",
"owner": "Golmote"
},
"parigp": {
"title": "PARI/GP",
"owner": "Golmote"
},
"parser": {
"title": "Parser",
"require": "markup",
"owner": "Golmote"
},
"pascal": {
"title": "Pascal",
"owner": "Golmote"
},
"perl": {
"title": "Perl",
"owner": "Golmote"
},
"php": {
"title": "PHP",
"require": "clike",
"owner": "milesj"
},
"php-extras": {
"title": "PHP Extras",
"require": "php",
"owner": "milesj"
},
"powershell": {
"title": "PowerShell",
"owner": "nauzilus"
},
"processing": {
"title": "Processing",
"require": "clike",
"owner": "Golmote"
},
"prolog": {
"title": "Prolog",
"owner": "Golmote"
},
"properties": {
"title": ".properties",
"owner": "Golmote"
},
"protobuf": {
"title": "Protocol Buffers",
"require": "clike",
"owner": "just-boris"
},
"pug": {
"title": "Pug",
"require": "javascript",
"owner": "Golmote"
},
"puppet": {
"title": "Puppet",
"owner": "Golmote"
},
"pure": {
"title": "Pure",
"owner": "Golmote"
},
"python": {
"title": "Python",
"owner": "multipetros"
},
"q": {
"title": "Q",
"owner": "Golmote"
},
"qore": {
"title": "Qore",
"require": "clike",
"owner": "temnroegg"
},
"r": {
"title": "R",
"owner": "Golmote"
},
"jsx":{
"title": "React JSX",
"require": ["markup", "javascript"],
"owner": "vkbansal"
},
"renpy": {
"title": "Ren'py",
"owner": "HyuchiaDiego"
},
"reason": {
"title": "Reason",
"require": "clike",
"owner": "Golmote"
},
"rest": {
"title": "reST (reStructuredText)",
"owner": "Golmote"
},
"rip": {
"title": "Rip",
"owner": "ravinggenius"
},
"roboconf": {
"title": "Roboconf",
"owner": "Golmote"
},
"ruby": {
"title": "Ruby",
"require": "clike",
"owner": "samflores"
},
"rust": {
"title": "Rust",
"owner": "Golmote"
},
"sas": {
"title": "SAS",
"owner": "Golmote"
},
"sass": {
"title": "Sass (Sass)",
"require": "css",
"owner": "Golmote"
},
"scss": {
"title": "Sass (Scss)",
"require": "css",
"owner": "MoOx"
},
"scala": {
"title": "Scala",
"require": "java",
"owner": "jozic"
},
"scheme" : {
"title": "Scheme",
"owner" : "bacchus123"
},
"smalltalk": {
"title": "Smalltalk",
"owner": "Golmote"
},
"smarty": {
"title": "Smarty",
"require": "markup",
"owner": "Golmote"
},
"sql": {
"title": "SQL",
"owner": "multipetros"
},
"stylus" : {
"title": "Stylus",
"owner": "vkbansal"
},
"swift": {
"title": "Swift",
"require": "clike",
"owner": "chrischares"
},
"tcl": {
"title": "Tcl",
"owner": "PeterChaplin"
},
"textile": {
"title": "Textile",
"require": "markup",
"owner": "Golmote"
},
"twig": {
"title": "Twig",
"require": "markup",
"owner": "brandonkelly"
},
"typescript":{
"title": "TypeScript",
"require": "javascript",
"owner": "vkbansal"
},
"vbnet": {
"title": "VB.Net",
"require": "basic",
"owner": "Bigsby"
},
"verilog": {
"title": "Verilog",
"owner": "a-rey"
},
"vhdl": {
"title": "VHDL",
"owner": "a-rey"
},
"vim": {
"title": "vim",
"owner": "westonganger"
},
"wiki": {
"title": "Wiki markup",
"require": "markup",
"owner": "Golmote"
},
"xojo": {
"title": "Xojo (REALbasic)",
"owner": "Golmote"
},
"yaml": {
"title": "YAML",
"owner": "hason"
}
},
"plugins": {
"meta": {
"path": "plugins/{id}/prism-{id}",
"link": "plugins/{id}/"
},
"line-highlight": "Line Highlight",
"line-numbers": {
"title": "Line Numbers",
"owner": "kuba-kubula"
},
"show-invisibles": "Show Invisibles",
"autolinker": "Autolinker",
"wpd": "WebPlatform Docs",
"custom-class": {
"title": "Custom Class",
"owner": "dvkndn",
"noCSS": true
},
"file-highlight": {
"title": "File Highlight",
"noCSS": true
},
"show-language": {
"title": "Show Language",
"owner": "nauzilus",
"noCSS": true,
"require": "toolbar"
},
"jsonp-highlight": {
"title": "JSONP Highlight",
"noCSS": true,
"owner": "nauzilus"
},
"highlight-keywords": {
"title": "Highlight Keywords",
"owner": "vkbansal",
"noCSS": true
},
"remove-initial-line-feed": {
"title": "Remove initial line feed",
"owner": "Golmote",
"noCSS": true
},
"previewer-base": {
"title": "Previewer: Base",
"owner": "Golmote"
},
"previewer-color": {
"title": "Previewer: Color",
"require": "previewer-base",
"owner": "Golmote"
},
"previewer-gradient": {
"title": "Previewer: Gradient",
"require": "previewer-base",
"owner": "Golmote"
},
"previewer-easing": {
"title": "Previewer: Easing",
"require": "previewer-base",
"owner": "Golmote"
},
"previewer-time": {
"title": "Previewer: Time",
"require": "previewer-base",
"owner": "Golmote"
},
"previewer-angle": {
"title": "Previewer: Angle",
"require": "previewer-base",
"owner": "Golmote"
},
"autoloader": {
"title": "Autoloader",
"owner": "Golmote",
"noCSS": true
},
"keep-markup": {
"title": "Keep Markup",
"owner": "Golmote",
"after": "normalize-whitespace",
"noCSS": true
},
"command-line": {
"title": "Command Line",
"owner": "chriswells0"
},
"unescaped-markup": "Unescaped Markup",
"normalize-whitespace": {
"title": "Normalize Whitespace",
"owner": "zeitgeist87",
"after": "unescaped-markup",
"noCSS": true
},
"data-uri-highlight": {
"title": "Data-URI Highlight",
"owner": "Golmote",
"noCSS": true
},
"toolbar": {
"title": "Toolbar",
"owner": "mAAdhaTTah"
},
"copy-to-clipboard": {
"title": "Copy to Clipboard Button",
"owner": "mAAdhaTTah",
"require": "toolbar",
"noCSS": true
}
}
}; | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojo/cldr/nls/sv/gregorian.js | ({"months-format-narrow":["J","F","M","A","M","J","J","A","S","O","N","D"],"quarters-standAlone-narrow":["1","2","3","4"],"field-weekday":"veckodag","dateFormatItem-yQQQ":"y QQQ","dateFormatItem-yMEd":"EEE, yyyy-MM-dd","dateFormatItem-MMMEd":"E d MMM","eraNarrow":["f.Kr.","e.Kr."],"dateFormat-long":"d MMMM y","months-format-wide":["januari","februari","mars","april","maj","juni","juli","augusti","september","oktober","november","december"],"dateFormat-full":"EEEE d MMMM y","dateFormatItem-Md":"d/M","dateFormatItem-MMMMEEEd":"EEE d MMMM","field-era":"era","dateFormatItem-yM":"yyyy-MM","months-standAlone-wide":["januari","februari","mars","april","maj","juni","juli","augusti","september","oktober","november","december"],"timeFormat-short":"HH.mm","quarters-format-wide":["1:a kvartalet","2:a kvartalet","3:e kvartalet","4:e kvartalet"],"timeFormat-long":"HH.mm.ss z","field-year":"år","dateFormatItem-yMMM":"y MMM","dateFormatItem-yQ":"yyyy Q","field-hour":"timme","dateFormatItem-MMdd":"dd/MM","months-format-abbr":["jan","feb","mar","apr","maj","jun","jul","aug","sep","okt","nov","dec"],"dateFormatItem-yyQ":"Q yy","timeFormat-full":"'kl'. HH.mm.ss zzzz","am":"fm","months-standAlone-abbr":["jan","feb","mar","apr","maj","jun","jul","aug","sep","okt","nov","dec"],"quarters-format-abbr":["K1","K2","K3","K4"],"quarters-standAlone-wide":["1:a kvartalet","2:a kvartalet","3:e kvartalet","4:e kvartalet"],"dateFormatItem-HHmmss":"HH.mm.ss","dateFormatItem-hhmmss":"h.mm.ss a","dateFormatItem-M":"L","days-standAlone-wide":["söndag","måndag","tisdag","onsdag","torsdag","fredag","lördag"],"dateFormatItem-yyyyMMM":"MMM y","dateFormatItem-MMMMd":"d MMMM","dateFormatItem-yyMMM":"MMM -yy","timeFormat-medium":"HH.mm.ss","dateFormatItem-Hm":"H.mm","quarters-standAlone-abbr":["K1","K2","K3","K4"],"eraAbbr":["f.Kr.","e.Kr."],"field-minute":"minut","field-dayperiod":"fm/em","days-standAlone-abbr":["sön","mån","tis","ons","tors","fre","lör"],"dateFormatItem-d":"d","dateFormatItem-ms":"mm.ss","dateFormatItem-MMMd":"d MMM","dateFormatItem-MEd":"E d/M","dateFormatItem-yMMMM":"y MMMM","field-day":"dag","days-format-wide":["söndag","måndag","tisdag","onsdag","torsdag","fredag","lördag"],"field-zone":"tidszon","dateFormatItem-yyyyMM":"yyyy-MM","dateFormatItem-y":"y","months-standAlone-narrow":["J","F","M","A","M","J","J","A","S","O","N","D"],"dateFormatItem-yyMM":"yy-MM","days-format-abbr":["sön","mån","tis","ons","tors","fre","lör"],"eraNames":["före Kristus","efter Kristus"],"days-format-narrow":["S","M","T","O","T","F","L"],"field-month":"månad","days-standAlone-narrow":["S","M","T","O","T","F","L"],"dateFormatItem-MMM":"LLL","dateFormatItem-HHmm":"HH.mm","pm":"em","dateFormatItem-MMMMEd":"E d MMMM","dateFormat-short":"yyyy-MM-dd","dateFormatItem-MMd":"d/M","field-second":"sekund","dateFormatItem-yMMMEd":"EEE d MMM y","dateFormatItem-hhmm":"hh.mm a","field-week":"vecka","dateFormat-medium":"d MMM y","dateFormatItem-yyyyQQQQ":"QQQQ y","dateTimeFormats-appendItem-Day-Of-Week":"{0} {1}","dateTimeFormat-medium":"{1} {0}","dateFormatItem-EEEd":"d EEE","dateTimeFormats-appendItem-Second":"{0} ({2}: {1})","dateTimeFormats-appendItem-Era":"{0} {1}","dateTimeFormats-appendItem-Week":"{0} ({2}: {1})","quarters-format-narrow":["1","2","3","4"],"dateTimeFormat-long":"{1} {0}","dateTimeFormat-full":"{1} {0}","dateTimeFormats-appendItem-Day":"{0} ({2}: {1})","dateFormatItem-hm":"h:mm a","dateTimeFormats-appendItem-Year":"{0} {1}","dateTimeFormats-appendItem-Hour":"{0} ({2}: {1})","dateTimeFormats-appendItem-Quarter":"{0} ({2}: {1})","dateTimeFormats-appendItem-Month":"{0} ({2}: {1})","dateTimeFormats-appendItem-Minute":"{0} ({2}: {1})","dateTimeFormats-appendItem-Timezone":"{0} {1}","dateTimeFormat-short":"{1} {0}","dateFormatItem-Hms":"H:mm:ss","dateFormatItem-hms":"h:mm:ss a"}) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/localization/lt/TeX.js | MathJax.Localization.addTranslation("lt","TeX",{version:"2.7.9",isLoaded:true,strings:{ExtraOpenMissingClose:"Per daug atidarom\u0173j\u0173 arba per ma\u017Eai u\u017Edarom\u0173j\u0173 riestini\u0173 skliausteli\u0173",ExtraCloseMissingOpen:"Per daug u\u017Edarom\u0173j\u0173 arba per ma\u017Eai atidarom\u0173j\u0173 riestini\u0173 skliausteli\u0173",MissingLeftExtraRight:"Per ma\u017Eai \\left arba per daug \\right",MissingScript:"N\u0117ra vir\u0161utinio arba apatinio indekso argumento",ExtraLeftMissingRight:"Per daug \\left arba per ma\u017Eai \\right",Misplaced:"Ne tinkamoje vietoje %1",MissingOpenForSub:"N\u0117ra atidaromojo riestinio apatinio indekso skliaustelio",MissingOpenForSup:"N\u0117ra atidaromojo riestinio vir\u0161utinio indekso skliaustelio",AmbiguousUseOf:"Nevienareik\u0161m\u0117 %1 vartosena",EnvBadEnd:"\\begin{%1} baig\u0117si \\end{%2}",EnvMissingEnd:"N\u0117ra \\end{%1}",MissingBoxFor:"N\u0117ra %1 langelio",MissingCloseBrace:"N\u0117ra u\u017Edaromojo riestinio skliaustelio",UndefinedControlSequence:"Neapibr\u0117\u017Eta valdymo seka %1",DoubleExponent:"Kartojamas laipsnio rodiklis: tikslinti riestiniais skliausteliais",DoubleSubscripts:"Kartojamas apatinis indeksas: tikslinti riestiniais skliausteliais",DoubleExponentPrime:"Pirminis skai\u010Dius kartoja laipsnio rodikl\u012F: tikslinti riestiniais skliausteliais",CantUseHash1:"Makrokomandos parametro ra\u0161mens \u201E#\u201C matematikos veiksenoje vartoti negalima",MisplacedMiddle:"%1 privalo b\u016Bti \\left ir \\right viduje",MisplacedLimits:"%1 taikomas tik operatoriams",MisplacedMoveRoot:"%1 rodomas tik \u0161aknyje",MultipleCommand:"Kartojamas %1",IntegerArg:"%1 argumentas privalo b\u016Bti sveikasis skai\u010Dius",NotMathMLToken:"%1 n\u0117ra leksema",InvalidMathMLAttr:"Netinkamas \u201EMathML\u201C po\u017Eymis: %1",UnknownAttrForElement:"%1 n\u0117ra atpa\u017E\u012Fstamas %2 po\u017Eymis",MaxMacroSub1:"Vir\u0161ytas did\u017Eiausias leid\u017Eiamas \u201EMathJax\u201C makrokomand\u0173 pakait\u0173 skai\u010Dius; galb\u016Bt vykdomas rekursinis makrokomandos kreipinys?",MaxMacroSub2:"Vir\u0161ytas did\u017Eiausias leid\u017Eiamas \u201EMathJax\u201C pakait\u0173 skai\u010Dius; galb\u016Bt vykdoma rekursin\u0117 \u201ELaTeX\u201C aplinka?",MissingArgFor:"N\u0117ra %1 argumento",ExtraAlignTab:"Per daug lygiavimo tabuliatori\u0173 \\cases tekste",BracketMustBeDimension:"%1 argumentas riestiniuose skliausteliuose privalo b\u016Bti matmuo",InvalidEnv:"Netinkamas aplinkos pavadinimas \u201E%1\u201C",UnknownEnv:"Ne\u017Einoma aplinka '%1'",ExtraCloseLooking:"Per daug riestini\u0173 skliausteli\u0173 ie\u0161kant %1",MissingCloseBracket:"%1 argumente nepavyko rasti u\u017Edaromojo \u201E]\u201C",MissingOrUnrecognizedDelim:"%1 neturi skirtuko arba \u0161is n\u0117ra atpa\u017E\u012Fstamas",MissingDimOrUnits:"%1 neturi matmens arba \u0161iojo vienet\u0173",TokenNotFoundForCommand:"%2 skirto %1 rasti nepavyko",MathNotTerminated:"Teksto langelyje matematikos neaptikta",IllegalMacroParam:"Netinkama makrokomandos parametro nuoroda",MaxBufferSize:"Vir\u0161ytas vidinio \u201EMathJax\u201C buferio dydis; galb\u016Bt vykdomas rekursinis makrokomandos kreipinys?",CommandNotAllowedInEnv:"%1 aplinkoje %2 neleid\u017Eiamas",MultipleLabel:"Apibr\u0117\u017Etas \u017Eymenos \u201E%1\u201C kartojimas",CommandAtTheBeginingOfLine:"%1 privalo b\u016Bti eilut\u0117s prad\u017Eioje",IllegalAlign:"%1 nurodyta netinkama lygiuot\u0117",BadMathStyleFor:"Netinkamas %1 matematikos stilius",PositiveIntegerArg:"%1 argumentas privalo b\u016Bti teigiamas sveikasis skai\u010Dius",ErroneousNestingEq:"Klaidingas lyg\u010Di\u0173 sandar\u0173 \u012Fd\u0117jimas",MultlineRowsOneCol:"Eilut\u0117s %1 aplinkoje privalo apimti tiksliai vien\u0105 stulpel\u012F",MultipleBBoxProperty:"%1 kartojamas %2",InvalidBBoxProperty:"\u201E%1\u201C neatrodo kaip spalva, u\u017Epildymo matmuo arba stilius",ExtraEndMissingBegin:"Per daug %1 arba per ma\u017Eai \\begingroup",GlobalNotFollowedBy:"Po %1 neina \\let, \\def arba \\newcommand",UndefinedColorModel:"Neapibr\u0117\u017Etas spalvos modelis \u201E%1\u201C",ModelArg1:"Modelio \u201E%1\u201C spalv\u0173 reik\u0161m\u0117s ra\u0161omos trimis skaitmenimis",InvalidDecimalNumber:"Netinkamas de\u0161imtainis skai\u010Dius",ModelArg2:"Modelio \u201E%1\u201C spalv\u0173 reik\u0161m\u0117s privalo b\u016Bti tarp %2 ir %3",InvalidNumber:"Neleistinas skai\u010Dius",NewextarrowArg1:"Pirmasis %1 argumentas privalo b\u016Bti valdymo sekos pavadinimas",NewextarrowArg2:"Antrasis %1 argumentas privalo b\u016Bti du kableliu skiriami sveikieji skai\u010Diai",NewextarrowArg3:"Tre\u010Diasis %1 argumentas privalo b\u016Bti unikodo ra\u0161mens skai\u010Dius",NoClosingChar:"Nepavyksta rasti u\u017Edaromojo %1",IllegalControlSequenceName:"Netinkamas %1 valdymo sekos pavadinimas",IllegalParamNumber:"%1 nurodytas netinkamas parametr\u0173 skai\u010Dius",MissingCS:"Po %1 privalo eiti valdymo seka",CantUseHash2:"Netinkama \u201E#\u201C vartosena %1 ruo\u0161inyje",SequentialParam:"%1 parametrai numeruotini nuosekliai",MissingReplacementString:"N\u0117ra %1 apibr\u0117\u017Eimo pakaitos eilut\u0117s",MismatchUseDef:"%1 vartosena nesutampa su %1 apibr\u0117\u017Eimu",RunawayArgument:"Nevaldomas %1 argumentas?",NoClosingDelim:"Nepavyksta rasti u\u017Edaromojo %1 skirtuko"}});MathJax.Ajax.loadComplete("[MathJax]/localization/lt/TeX.js"); | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/datastructures/attributes.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.datastructures._mutablemapping import MutableMapping
__all__ = [
"NodeAttributeView",
"VertexAttributeView",
"FaceAttributeView",
"EdgeAttributeView",
"CellAttributeView",
]
class AttributeView(MutableMapping):
"""Base class for attribute dict views."""
def __init__(self, defaults, attr, custom_only=False):
super(AttributeView, self).__init__()
self.defaults = defaults
self.attr = attr
self.custom_only = custom_only
def __str__(self):
s = []
for k, v in self.items():
s.append("{}: {}".format(repr(k), repr(v)))
return "{" + ", ".join(s) + "}"
def __len__(self):
return len(set(self.defaults).union(self.attr))
def __getitem__(self, name):
if name not in self.attr:
if name not in self.defaults:
raise KeyError
return self.attr.get(name, self.defaults.get(name))
def __setitem__(self, name, value):
self.attr[name] = value
def __delitem__(self, name):
del self.attr[name]
def __iter__(self):
if self.custom_only:
for name in self.attr:
yield name
else:
names = set(self.defaults).union(self.attr)
for name in names:
yield name
class NodeAttributeView(AttributeView):
"""Mutable Mapping that provides a read/write view of the custom attributes of a node
combined with the default attributes of all nodes."""
def __init__(self, defaults, attr, custom_only=False):
super(NodeAttributeView, self).__init__(defaults, attr, custom_only)
class VertexAttributeView(AttributeView):
"""Mutable Mapping that provides a read/write view of the custom attributes of a vertex
combined with the default attributes of all vertices."""
def __init__(self, defaults, attr, custom_only=False):
super(VertexAttributeView, self).__init__(defaults, attr, custom_only)
class EdgeAttributeView(AttributeView):
"""Mutable Mapping that provides a read/write view of the custom attributes of an edge
combined with the default attributes of all edges."""
def __init__(self, defaults, attr, custom_only=False):
super(EdgeAttributeView, self).__init__(defaults, attr, custom_only)
class FaceAttributeView(AttributeView):
"""Mutable Mapping that provides a read/write view of the custom attributes of a face
combined with the default attributes of all faces."""
def __init__(self, defaults, attr, custom_only=False):
super(FaceAttributeView, self).__init__(defaults, attr, custom_only)
class CellAttributeView(AttributeView):
"""Mutable Mapping that provides a read/write view of the custom attributes of a cell
combined with the default attributes of all faces."""
def __init__(self, defaults, attr, custom_only=False):
super(CellAttributeView, self).__init__(defaults, attr, custom_only) | PypiClean |
/Lmrob-1.0.tar.gz/Lmrob-1.0/lmrob/robjects/packages.py | import os
import warnings
from types import ModuleType
from collections import defaultdict
from warnings import warn
import rpy2.rinterface as rinterface
import rpy2.robjects.lib
from . import conversion
from rpy2.robjects.functions import (SignatureTranslatedFunction,
docstring_property,
DocumentedSTFunction)
from rpy2.robjects.constants import NULL
from rpy2.robjects import Environment
from rpy2.robjects.packages_utils import (_libpaths,
get_packagepath,
_packages,
default_symbol_r2python,
default_symbol_check_after,
_map_symbols,
_fix_map_symbols)
import rpy2.robjects.help as rhelp
_require = rinterface.baseenv['require']
_library = rinterface.baseenv['library']
_as_env = rinterface.baseenv['as.environment']
_package_has_namespace = rinterface.baseenv['packageHasNamespace']
_system_file = rinterface.baseenv['system.file']
_get_namespace = rinterface.baseenv['getNamespace']
_get_namespace_version = rinterface.baseenv['getNamespaceVersion']
_get_namespace_exports = rinterface.baseenv['getNamespaceExports']
_loaded_namespaces = rinterface.baseenv['loadedNamespaces']
_globalenv = rinterface.globalenv
_new_env = rinterface.baseenv["new.env"]
StrSexpVector = rinterface.StrSexpVector
# Fetching symbols in the namespace "utils" assumes that "utils" is loaded
# (currently the case by default in R).
_data = rinterface.baseenv['::'](StrSexpVector(('utils', )),
StrSexpVector(('data', )))
_reval = rinterface.baseenv['eval']
_options = rinterface.baseenv['options']
def no_warnings(func):
""" Decorator to run R functions without warning. """
def run_withoutwarnings(*args, **kwargs):
warn_i = _options().do_slot('names').index('warn')
oldwarn = _options()[warn_i][0]
_options(warn = -1)
try:
res = func(*args, **kwargs)
except Exception as e:
# restore the old warn setting before propagating
# the exception up
_options(warn = oldwarn)
raise e
_options(warn = oldwarn)
return res
return run_withoutwarnings
@no_warnings
def _eval_quiet(expr):
return _reval(expr)
# FIXME: should this be part of the API for rinterface ?
# (may be it is already the case and there is code
# duplicaton ?)
def reval(string, envir = _globalenv):
""" Evaluate a string as R code
:param string: R code
:type string: a :class:`str`
:param envir: an environment in which the environment should take place (default: R's global environment)
"""
p = rinterface.parse(string)
res = _reval(p, envir = envir)
return res
def quiet_require(name, lib_loc = None):
""" Load an R package /quietly/ (suppressing messages to the console). """
if lib_loc == None:
lib_loc = "NULL"
else:
lib_loc = "\"%s\"" % (lib_loc.replace('"', '\\"'))
expr_txt = "suppressPackageStartupMessages(base::require(%s, lib.loc=%s))" \
%(name, lib_loc)
expr = rinterface.parse(expr_txt)
ok = _eval_quiet(expr)
return ok
class PackageData(object):
""" Datasets in an R package.
In R datasets can be distributed with a package.
Datasets can be:
- serialized R objects
- R code (that produces the dataset)
For a given R packages, datasets are stored separately from the rest
of the code and are evaluated/loaded lazily.
The lazy aspect has been conserved and the dataset are only loaded
or generated when called through the method 'fetch()'.
"""
_packagename = None
_lib_loc = None
_datasets = None
def __init__(self, packagename, lib_loc = rinterface.NULL):
self._packagename = packagename
self._lib_loc
def _init_setlist(self):
_datasets = dict()
# 2D array of information about datatsets
tmp_m = _data(**{'package':StrSexpVector((self._packagename, )),
'lib.loc': self._lib_loc})[2]
nrows, ncols = tmp_m.do_slot('dim')
c_i = 2
for r_i in range(nrows):
_datasets[tmp_m[r_i + c_i * nrows]] = None
# FIXME: check if instance methods are overriden
self._datasets = _datasets
def names(self):
""" Names of the datasets"""
if self._datasets is None:
self._init_setlist()
return self._datasets.keys()
def fetch(self, name):
""" Fetch the dataset (loads it or evaluates the R associated
with it.
In R, datasets are loaded into the global environment by default
but this function returns an environment that contains the dataset(s).
"""
#tmp_env = rinterface.SexpEnvironment()
if self._datasets is None:
self._init_setlist()
if name not in self._datasets:
raise ValueError('Data set "%s" cannot be found' % name)
env = _new_env()
_data(StrSexpVector((name, )),
**{'package': StrSexpVector((self._packagename, )),
'lib.loc': self._lib_loc,
'envir': env})
return Environment(env)
class Package(ModuleType):
""" Models an R package
(and can do so from an arbitrary environment - with the caution
that locked environments should mostly be considered).
"""
_env = None
__rname__ = None
_translation = None
_rpy2r = None
__fill_rpy2r__ = None
__update_dict__ = None
_exported_names = None
_symbol_r2python = None
__version__ = None
__rdata__ = None
def __init__(self, env, name, translation = {},
exported_names = None, on_conflict = 'fail',
version = None,
symbol_r2python = default_symbol_r2python,
symbol_check_after = default_symbol_check_after):
""" Create a Python module-like object from an R environment,
using the specified translation if defined.
- env: R environment
- name: package name
- translation: `dict` with R names as keys and corresponding Python
names as values
- exported_names: `set` of names/symbols to expose to instance users
- on_conflict: 'fail' or 'warn' (default: 'fail')
- version: version string for the package
- symbol_r2python: function to convert R symbols into Python symbols.
The default translate `.` into `_`.
- symbol_check_after: function to check the Python symbols obtained
from `symbol_r2python`.
"""
super(Package, self).__init__(name)
self._env = env
self.__rname__ = name
self._translation = translation
mynames = tuple(self.__dict__)
self._rpy2r = {}
if exported_names is None:
exported_names = set(self._env.keys())
self._exported_names = exported_names
self._symbol_r2python = symbol_r2python
self._symbol_check_after = symbol_check_after
self.__fill_rpy2r__(on_conflict = on_conflict)
self._exported_names = self._exported_names.difference(mynames)
self.__version__ = version
def __update_dict__(self, on_conflict = 'fail'):
""" Update the __dict__ according to what is in the R environment """
for elt in self._rpy2r:
del(self.__dict__[elt])
self._rpy2r.clear()
self.__fill_rpy2r__(on_conflict = on_conflict)
def __fill_rpy2r__(self, on_conflict = 'fail'):
""" Fill the attribute _rpy2r.
- on_conflict: 'fail' or 'warn' (default: 'fail')
"""
assert(on_conflict in ('fail', 'warn'))
name = self.__rname__
(symbol_mapping,
conflicts,
resolutions) = _map_symbols(self._env,
translation = self._translation,
symbol_r2python = self._symbol_r2python,
symbol_check_after = self._symbol_check_after)
msg_prefix = 'Conflict when converting R symbols'+\
' in the package "%s"' % self.__rname__ +\
' to Python symbols: \n-'
exception = LibraryError
_fix_map_symbols(symbol_mapping,
conflicts,
on_conflict,
msg_prefix,
exception)
symbol_mapping.update(resolutions)
reserved_pynames = set(dir(self))
for rpyname, rnames in symbol_mapping.items():
# last paranoid check
if len(rnames) > 1:
raise ValueError('Only one R name should be associated with %s (and we have %s)' % (rpyname, str(rnames)))
rname = rnames[0]
if rpyname in reserved_pynames:
raise LibraryError('The symbol ' + rname +\
' in the package "' + name + '"' +\
' is conflicting with' +\
' a Python object attribute')
self._rpy2r[rpyname] = rname
if (rpyname != rname) and (rname in self._exported_names):
self._exported_names.remove(rname)
self._exported_names.add(rpyname)
try:
riobj = self._env[rname]
except rinterface.RRuntimeError as rre:
warn(str(rre))
rpyobj = conversion.ri2ro(riobj)
if hasattr(rpyobj, '__rname__'):
rpyobj.__rname__ = rname
#FIXME: shouldn't the original R name be also in the __dict__ ?
self.__dict__[rpyname] = rpyobj
def __repr__(self):
s = super(Package, self).__repr__()
return 'rpy2.robjects.packages.Package as a ' + s
# alias
STF = SignatureTranslatedFunction
class SignatureTranslatedPackage(Package):
""" R package in which the R functions had their signatures
'translated' (that this the named parameters were made to
to conform Python's rules for vaiable names)."""
def __fill_rpy2r__(self, on_conflict = 'fail'):
super(SignatureTranslatedPackage, self).__fill_rpy2r__(on_conflict = on_conflict)
for name, robj in self.__dict__.items():
if isinstance(robj, rinterface.Sexp) and robj.typeof == rinterface.CLOSXP:
self.__dict__[name] = STF(self.__dict__[name],
on_conflict = on_conflict,
symbol_r2python = self._symbol_r2python,
symbol_check_after = self._symbol_check_after)
# alias
STP = SignatureTranslatedPackage
class SignatureTranslatedAnonymousPackage(SignatureTranslatedPackage):
def __init__(self, string, name):
env = Environment()
reval(string, env)
super(SignatureTranslatedAnonymousPackage, self).__init__(env,
name)
# alias
STAP = SignatureTranslatedAnonymousPackage
class InstalledSTPackage(SignatureTranslatedPackage):
@docstring_property(__doc__)
def __doc__(self):
doc = list(['Python representation of an R package.'])
if not self.__rname__:
doc.append('<No information available>')
else:
try:
doc.append(rhelp.docstring(self.__rname__,
self.__rname__ + '-package',
sections=['description']))
except rhelp.HelpNotFoundError as hnf:
doc.append('[R help was not found]')
return os.linesep.join(doc)
def __fill_rpy2r__(self, on_conflict = 'fail'):
super(SignatureTranslatedPackage, self).__fill_rpy2r__(on_conflict = on_conflict)
for name, robj in self.__dict__.items():
if isinstance(robj, rinterface.Sexp) and robj.typeof == rinterface.CLOSXP:
self.__dict__[name] = DocumentedSTFunction(self.__dict__[name],
packagename = self.__rname__)
class InstalledPackage(Package):
@docstring_property(__doc__)
def __doc__(self):
doc = list(['Python representation of an R package.',
'R arguments:', ''])
if not self.__rname__:
doc.append('<No information available>')
else:
try:
doc.append(rhelp.docstring(self.__rname__,
self.__rname__ + '-package',
sections=['description']))
except rhelp.HelpNotFoundError as hnf:
doc.append('[R help was not found]')
return os.linesep.join(doc)
class WeakPackage(Package):
"""
'Weak' R package, with which looking for symbols results in
a warning (and a None returned) whenever the desired symbol is
not found (rather than a traditional `AttributeError`).
"""
def __getattr__(self, name):
res =self.__dict__.get(name)
if res is None:
warnings.warn("The symbol '%s' is not in this R namespace/package." % name)
return res
class LibraryError(ImportError):
""" Error occuring when importing an R library """
pass
class InstalledPackages(object):
""" R packages installed. """
def __init__(self, lib_loc=None):
libraryiqr = _library(**{'lib.loc': lib_loc})
lib_results_i = libraryiqr.do_slot('names').index('results')
self.lib_results = libraryiqr[lib_results_i]
self.nrows, self.ncols = self.lib_results.do_slot('dim')
self.colnames = self.lib_results.do_slot('dimnames')[1] # column names
self.lib_packname_i = self.colnames.index('Package')
def isinstalled(self, packagename):
if not isinstance(packagename, rinterface.StrSexpVector):
rname = rinterface.StrSexpVector((packagename, ))
else:
if len(packagename) > 1:
raise ValueError("Only specify one package name at a time.")
rname = packagename
nrows, ncols = self.nrows, self.ncols
lib_results, lib_packname_i = self.lib_results, self.lib_packname_i
for i in range(0+lib_packname_i*nrows,
nrows*(lib_packname_i+1),
1):
if lib_results[i] == packagename:
return True
return False
def __iter__(self):
""" Iterate through rows, yield tuples at each iteration """
lib_results = self.lib_results
nrows, ncols = self.nrows, self.ncols
colrg = range(0, ncols)
for row_i in range(nrows):
yield tuple(lib_results[x*nrows+row_i] for x in colrg)
def isinstalled(name,
lib_loc = None):
"""
Find whether an R package is installed
:param name: name of an R package
:param lib_loc: specific location for the R library (default: None)
:rtype: a :class:`bool`
"""
instapack = InstalledPackages(lib_loc)
return instapack.isinstalled(name)
def importr(name,
lib_loc = None,
robject_translations = {},
signature_translation = True,
suppress_messages = True,
on_conflict = 'fail',
symbol_r2python = default_symbol_r2python,
symbol_check_after = default_symbol_check_after,
data = True):
""" Import an R package.
Arguments:
- name: name of the R package
- lib_loc: specific location for the R library (default: None)
- robject_translations: dict (default: {})
- signature_translation: (True or False)
- suppress_message: Suppress messages R usually writes on the console
(defaut: True)
- on_conflict: 'fail' or 'warn' (default: 'fail')
- symbol_r2python: function to translate R symbols into Python symbols
- symbol_check_after: function to check the Python symbol obtained
from `symbol_r2python`.
- data: embed a PackageData objects under the attribute
name __rdata__ (default: True)
Return:
- an instance of class SignatureTranslatedPackage, or of class Package
"""
rname = rinterface.StrSexpVector((name, ))
if suppress_messages:
ok = quiet_require(name, lib_loc = lib_loc)
else:
ok = _require(rinterface.StrSexpVector(rname),
**{'lib.loc': rinterface.StrSexpVector((lib_loc, ))})[0]
if not ok:
raise LibraryError("The R package %s could not be imported" %name)
if _package_has_namespace(rname,
_system_file(package = rname)):
env = _get_namespace(rname)
version = _get_namespace_version(rname)[0]
exported_names = set(_get_namespace_exports(rname))
else:
env = _as_env(rinterface.StrSexpVector(['package:'+name, ]))
exported_names = None
version = None
if signature_translation:
pack = InstalledSTPackage(env, name,
translation = robject_translations,
exported_names = exported_names,
on_conflict = on_conflict,
version = version,
symbol_r2python = symbol_r2python,
symbol_check_after = symbol_check_after)
else:
pack = InstalledPackage(env, name, translation = robject_translations,
exported_names = exported_names,
on_conflict = on_conflict,
version = version,
symbol_r2python = symbol_r2python,
symbol_check_after = symbol_check_after)
if data:
if pack.__rdata__ is not None:
warn('While importing the R package "%s", the rpy2 Package object is masking a translated R symbol "__rdata__" already present' % name)
pack.__rdata__ = PackageData(name, lib_loc = lib_loc)
return pack
def data(package):
""" Return the PackageData for the given package."""
return package.__rdata__
def wherefrom(symbol, startenv = rinterface.globalenv):
""" For a given symbol, return the environment
this symbol is first found in, starting from 'startenv'.
"""
env = startenv
obj = None
tryagain = True
while tryagain:
try:
obj = env[symbol]
tryagain = False
except LookupError as knf:
env = env.enclos()
if env.rsame(rinterface.emptyenv):
tryagain = False
else:
tryagain = True
return conversion.ri2ro(env) | PypiClean |
/MusicRaft-0.9.7-py3-none-any.whl/musicraft/raft/external.py | import logging
logger = logging.getLogger()
import sys, os, re, subprocess, tempfile, locale
from .. import (Shared, dbg_print, QtGui, QtWidgets, head_dir)
encoding = locale.getpreferredencoding()
class StdTab(QtWidgets.QPlainTextEdit):
"""
This once very bare looking class is gradually being embellished with facilities for
error location helpers etc. in due course. It is the class behind the several
tabs (Abcm2svg etc.) within the subprocess output notebook.
"""
def __init__(self, commander):
QtWidgets.QPlainTextEdit.__init__(self)
# self.setFont(commander.font) # maybe unnecessary - see External.write
dbg_print(1, self.__class__.__name__+':__init__... commander.reMsg =',
commander.reMsg)
self.creMsg = commander.creMsg
self.rowColOrigin = commander.rowColOrigin
self.quiet = False
self.cursorPositionChanged.connect(self.handleCursorMove)
def handleCursorMove(self):
# dbg_print(1, self.__class__.__name__+':handleCursorMove... self.quiet =',
# self.quiet)
if self.quiet or self.creMsg is None:
return
match = self.creMsg.match(self.textCursor().block().text())
# dbg_print(1, self.__class__.__name__+':handleCursorMove... match =', match)
if match is None:
return
location = [o1+o2 for (o1, o2) in zip(
map(lambda s: int(s), match.groups()),
self.rowColOrigin)]
print ("Autolocating error in ABC", location )
if location:
Shared.raft.editBook.moveToRowCol(*location)
def setPlainText(self, text):
self.quiet = True
QtWidgets.QPlainTextEdit.setPlainText(self, text)
self.quiet = False
class External(object):
"""
'External' is the generic class representing command processors invoked from
within abcraft.
"""
fmtNameIn = '%s.in'
fmtNameOut = '%s.out'
exec_dir = ''
exec_file = "base_class_stub_of_exec_file"
showOut = True
reMsg = None # r'$^' # default = don't match any lines.
rowColOrigin = (0, -1)
stdFont = 'Courier New', 10, False, False
useItalic = False
tabName = True # = use class name
lastStdTab = None
def __init__(self):
# Fix up executive directory to be relative to the head directory not the working directory.
# This behaviour can be circumvened by prefixing path name with './'.
#
if self.exec_dir and self.exec_dir[0] not in '/\\.':
self.exec_dir = os.path.join(head_dir, self.exec_dir)
self.font = QtGui.QFont(*self.stdFont)
self.creMsg = (self.reMsg is not None and re.compile(self.reMsg)) or None
if self.tabName is True:
self.tabName = self.__class__.__name__
dbg_print(1, f"??adding tab? name='{self.tabName}'")
if self.tabName:
External.lastStdTab = self.stdTab = StdTab(self)
self.stdTab.setFont(self.font)
Shared.raft.stdBook.widget.addTab(self.stdTab, self.tabName)
elif self.tabName is None:
self.stdTab = External.lastStdTab
Shared.raft.stdBook.widget.setCurrentWidget(self.stdTab)
## phasing out... Shared.raft.editBook.fileSaved.connect(self.process)
def cmd(self, *pp, stdout=None, stderr=None, **kw):
answer = ' '.join(((self.exec_dir + self.exec_file),) + pp)
dbg_print(1, "External commands = ", answer)
return answer
def process(self, triggerFileName, **kw):
dbg_print(1, f"process {triggerFileName}")
baseName = os.path.splitext(triggerFileName)[0]
inFileName = (self.fmtNameIn % baseName)
self.outFileName = self.fmtNameOut % baseName
outputfile, errorfile = [tempfile.NamedTemporaryFile(
mode='rb+', suffix='.' + self.__class__.__name__.lower() + '-' + suffix)
for suffix in ('out', 'err')]
ext_cmd = self.cmd(inFileName, self.outFileName, stdout=outputfile, stderr=errorfile, **kw)
dbg_print(1, ext_cmd)
if ext_cmd:
process = subprocess.Popen(ext_cmd, stdout=outputfile, stderr=errorfile, shell=True)
process.wait()
dbg_print(1, f"{self.__class__.__name__}.process 6")
output, error = [(file_.seek(0), file_.read().decode(encoding), file_.close())[1]
for file_ in (outputfile, errorfile)]
dbg_print(1, f"{self.__class__.__name__}.process 7")
output, error = self.fixup(output, error)
self.write(out=self.showOut and output or '', err=error, append=False)
dbg_print(1, f"{self.__class__.__name__}.process 8")
return output
def fixup(self, output, error):
return output, error # hook function for 're-arranging between output and error.. etc.!
def write(self, out='', err='', append=True):
if Shared.bitMaskDebug & 2:
sys.__stdout__.write(out)
if Shared.bitMaskDebug & 4:
sys.__stderr__.write(err)
if self.stdTab is None:
dbg_print(1, "self.stdTab is None!")
return
if not append:
self.stdTab.setPlainText('')
self.stdTab.setFont(self.font) # to cope with stdout/stderr case.
tc = self.stdTab.textCursor()
cf = tc.charFormat()
for blurb, useItalic in ((out, False),(err, True)):
if blurb in ('', '\n'): # unjustifiable kludge, perhaps .. but it has the desired effect!
continue # compensate for extra new line provided by appendPlainText.
cf.setFontItalic(useItalic)
tc.setCharFormat(cf)
self.stdTab.setTextCursor(tc)
self.stdTab.appendPlainText(blurb)
class StdOut(External):
tabName = 'System'
class StdErr(StdOut):
tabName = None # = hitch-hike with previously created sibling.
def write(self, out='', err='', append=True):
return StdOut.write(self, out=err, err=out, append=append) | PypiClean |
/Config_Handler-0.1.0-py3-none-any.whl/config_handler/config_file.py | from __future__ import annotations
import os
from abc import ABCMeta
from string import Template
from configparser import ConfigParser
from typing import AnyStr, Dict, TYPE_CHECKING
from config_handler.constants import ConfigKeys
from config_handler.exceptions import ConfigHandlerFileReadException
if TYPE_CHECKING:
from config_handler.handler import ConfigHandler
class ConfigFile(metaclass=ABCMeta):
"""Abstract method for ConfigFile consumers."""
def __init__(self, config_handler: ConfigHandler):
""":param config_handler: a ConfigHandler object
"""
self._config_handler = config_handler
def _check_config_path_exist(self) -> bool:
"""Check if the config file path exist.
:return: True or False
"""
return self._config_handler.config_path and \
os.path.exists(self._config_handler.config_path)
def _read_config_file(self) -> ConfigParser:
"""Reads contents of the config file for internal purposes.
:return: ConfigParser object
"""
config = ConfigParser()
if self._check_config_path_exist():
config.read(self._config_handler.config_path)
else:
config.read_dict({
ConfigKeys.DEFAULT: {}
})
return config
def _get_template_path(self) -> AnyStr:
"""Returns path to where the config file should be saved.
Initialize it if one is not provided in __init__().
:return: path
"""
if not self._config_handler.template_path:
self._config_handler.template_path = \
f'{self._config_handler.config_path}.template'
return self._config_handler.template_path
def check_config_path(self) -> None:
"""Checks whether the config path exists or not.
:return: None
"""
if not self._config_handler.config_path:
raise ConfigHandlerFileReadException('Config path not set')
def check_template_path(self) -> None:
"""Checks whether the config path exists or not.
:return: None
"""
if not os.path.exists(self._get_template_path()):
msg = f'Template file doesn\'t ' \
f'exist: {self._config_handler.template_path}'
raise ConfigHandlerFileReadException(msg)
def read_template_file(self, template_vars: Dict) -> ConfigParser:
"""Reads contents of the config template file.
:param template_vars: variables for inserting into template config
:return: ConfigParser object
"""
with open(self._get_template_path()) as f:
t = Template(f.read())
template_string = t.safe_substitute(**template_vars)
config = ConfigParser()
config.read_string(template_string)
return config
class Reader(ConfigFile):
"""A ConfigFile reader class."""
def check_config_path(self) -> None:
"""Checks whether the config path exists or not.
:return: None
"""
if not self._config_handler.config_path:
raise ConfigHandlerFileReadException('Config path not set')
if not self._check_config_path_exist():
msg = f'Config file doesn\'t ' \
f'exist: {self._config_handler.config_path}'
raise ConfigHandlerFileReadException(msg)
class Writer(ConfigFile):
"""A ConfigFile writer class."""
def check_config_path(self) -> None:
"""Checks whether the config path exists or not.
:return: None
"""
if not self._config_handler.config_path:
raise ConfigHandlerFileReadException('Config path not set')
def write_config_file(self, config: ConfigParser) -> None:
"""Writes contents into the config file using ConfigParser lib.
:return: None
"""
with open(self._config_handler.config_path, 'w') as f:
config.write(f) | PypiClean |
/Flask-Saved-1.0.6.tar.gz/Flask-Saved-1.0.6/README.md | flask-saved
flask-saved是一个flask的存储扩展
**使用方式**
pip install Flask-Saved
from flask_saved import Storage
storage = Storage(app) or storage = Storage.init_app(app)
storage.save(file)
**目前实现了local(本地) oss(阿里云)的存储方式**
**local 配置项**
STORAGE_LOCAL_BASE_PATH
说明:
本地存储基本的路径
比如 upload 目录
STORAGE_LOCAL_BASE_PATH = 'upload'
相对路径。。相对与当前应用的目录
STORAGE_LOCAL_BASE_PATH = '../upload'
相对路径。。相对与当前应用目录的上层目录
STORAGE_LOCAL_BASE_URL
说明:
本地存储基本url
STORAGE_LOCAL_BASE_URL = 'http://picture.domain.com'
**oss 配置项**
STORAGE_OSS_ACCESS_KEY
STORAGE_OSS_SECRET_KEY
STORAGE_OSS_ENDPOINT
STORAGE_OSS_BUCKET
STORAGE_OSS_CNAME
STORAGE_OSS_DOMIAN
STORAGE_OSS_BASE_PATH | PypiClean |
/Akhet-2.0.tar.gz/Akhet-2.0/docs/demo/content.rst | Templates and stylesheets
=========================
The demo's templates and stylesheets are designed to function
in a variety of environments, so you can copy them to your application as a starting
point. The following files are included:
* A home page, *akhet_demo/templates/index.html*
* A site template, *akhet_demo/templates/site.html*
* A stylesheet, *akhet_demo/static/stylesheets/default.css*
* A "reset" stylesheet, *akhet_demo/static/stylesheets/reset.css*
The HTML files are Mako templates. The stylesheets are static files.
index.html
----------
This is a page template, so it contains only the unique parts of this page. The
first three lines are Mako constructs:
.. code-block:: mako
:linenos:
<%inherit file="/site.html" />
<%def name="title()">Hello, ${project}!</%def>
<%def name="ht_title()">${project}</%def>
Line 1 makes the template inherit from the site template, which will add the
site's header and footer. Lines 2 and 3 are Mako methods. They output the body
title (the <h1> at the top of the page) and the head title (the <title> tag)
respectively. Mako templates and methods are not literally Python classes and
methods -- they compile to modules and functions respectively -- but Mako
treats them in a way that's similar to classes and methods.
The "${varname}" syntax is a placeholder which will output the named variable.
Template variables can come from several sources: (1) keys in the view's return
dict, (2) template globals specified in *akhet_demo/subscribers.py*, (3) local
variables defined in the template, (4) built-in Mako variables like ``self``.
The rest of the file is a big chunk of HTML that will be plugged into the site
template. Mako implicitly puts this chunk in a method named "body", which can
be called from other templates as we'll see in a moment.
Site template
-------------
The site template contains the "complete" HTML document, with
placeholders to plug in content from the page template. The most important
placeholder here is "${self.body()}", which outputs the body of the
highest-level template in the inheritance chain.
Note the difference between calling "${body()}" and "${self.body()}". The
former calls a <%def> method defined in the same template. The latter calls the
highest-level <%def> method with that name in the inheritance chain, which may
be in a different template.
The site template also calls "self.title()" and "self.ht_title()", and defines
default implementations for these methods. The default body title outputs
nothing (resulting in an empty title); the default head title is whatever the
body title returns. So you can just define a "title" in your pages and forget about
"ht_title" if it's the same. But there are times when you'll want to make them
different:
* When the body title contains embedded HTML tags like <em>. The head title
can't contain these because it will display them literally rather than
changing the font.
* Sometimes the body title is too wordy for the head title.
* Many sites want the site's name in the head title. A general rule of thumb is
"Short Page Title &emdash; Site Name". Or if you're part of a large
organization: "Short Page Title | Site Name | Organization Name". Search
engines pay special attention to the head title, so it should contain all the
essential words that describe the page, and it should be less than sixty or
so characters so it can be displayed in a variety of contexts.
The other kind of placeholder in the site template is "${url.app}", which is
used to form static URLs like "${url.app}/stylesheets.default.css". "url" is
the URL generator, which the subscriber puts into the template namespace.
"url.app" is the application's URL prefix. This is normally empty for a
top-level application mounted at "/". But if the application is mounted at a
sub-URL like "/site1", that will be what "url.app" is set to.
Normally you'd generate URLs by route name, such as "${url('home')}" or its
full form "${url.route('home')}". But static URLs don't have a route name, and
the URL generator does not have a ``static`` method (although you can define
one in a subclass). So we're left with literal URLs relative to the application
prefix.
The template displays flash messages, which a view may have pushed into the
session before redirecting. The code for this is:
.. code-block:: mako
<div id="content">
<div id="flash-messages">
% for message in request.session.pop_flash():
<div class="info">${message}</div>
% endfor
</div>
The stylesheet displays it all pretty-like.
Reset stylesheet
----------------
This is an industry-standard reset stylesheet by Eric Meyer, which is in the
public domain. The original site is http://meyerweb.com/eric/tools/css/reset/ .
It resets all the tag styles to be consistent across browsers.
The top part of the page is Meyer's original stylesheet; the bottom contains
some overrides. Meyers does remove some attributes which have generally
been assumed to be intrinsic to the tag, such as margins around <p> and <h\*>.
His reasoning is that you should start with nothing and consciously re-add the
styles you want. Some people may find this attitude to be overkill. The reset
stylesheet is just provided as a service if you want to use it. In any case, I
have re-added some expected styles, and also set <dt> to boldface which is a
pet peeve of mine.
If you want something with more bells and whistles, some Pyramid developers
recommend `HTML5 Boilerplate`_.
It's also based on Meyer's stylesheet.
We're exploring stylesheet compilers like Less, but this version of the demo
does not include one.
.. _HTML5 Boilerplate: http://html5boilerplate.com/
Default stylesheet
------------------
This is the stylesheet referenced in the page template; it inherits the reset
stylesheet. It defines some styles the default home page needs. You'll probably
want to adjust them for your layout.
The bottom section has styles for flash messages. The ".info" stanza is used by
the demo. The ".warning" and ".error" styles are not used by
the demo but are provided as extras.
| PypiClean |
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/fileIO.py | import os
import re
import sys
from copy import deepcopy
from io import IOBase, StringIO
from math import ceil
import numpy as np
from AaronTools import addlogger
from AaronTools.atoms import Atom
from AaronTools.const import ELEMENTS, PHYSICAL, UNIT
from AaronTools.orbitals import Orbitals
from AaronTools.spectra import Frequency, ValenceExcitations
from AaronTools.theory import *
from AaronTools.utils.utils import (
is_alpha,
is_int,
is_num,
float_num,
)
read_types = [
"xyz",
"log",
"com",
"gjf",
"sd",
"sdf",
"mol",
"mol2",
"out",
"dat",
"fchk",
"crest",
"xtb",
"sqmout",
"47",
"31",
"qout",
]
write_types = ["xyz", "com", "inp", "inq", "in", "sqmin", "cube"]
file_type_err = "File type not yet implemented: {}"
NORM_FINISH = "Normal termination"
ORCA_NORM_FINISH = "****ORCA TERMINATED NORMALLY****"
PSI4_NORM_FINISH = "*** Psi4 exiting successfully. Buy a developer a beer!"
ERROR = {
"Convergence failure -- run terminated.": "SCF_CONV",
"Inaccurate quadrature in CalDSu": "CONV_CDS",
"Error termination request processed by link 9999": "CONV_LINK",
"FormBX had a problem": "FBX",
"NtrErr Called from FileIO": "CHK",
"Wrong number of Negative eigenvalues": "EIGEN",
"Erroneous write": "QUOTA",
"Atoms too close": "CLASH",
"The combination of multiplicity": "CHARGEMULT",
"Bend failed for angle": "REDUND",
"Linear angle in Bend": "REDUND",
"Error in internal coordinate system": "COORD",
"galloc: could not allocate memory": "GALLOC",
"Error imposing constraints": "CONSTR",
"End of file reading basis center.": "BASIS_READ",
"Atomic number out of range for .* basis set.": "BASIS",
"Unrecognized atomic symbol": "ATOM",
"malloc failed.": "MEM",
"A syntax error was detected in the input line": "SYNTAX",
"Unknown message": "UNKNOWN",
}
ERROR_ORCA = {
"SCF NOT CONVERGED AFTER": "SCF_CONV",
# ORCA doesn't actually exit if the SCF doesn't converge...
# "CONV_CDS": "",
"The optimization did not converge but reached the maximum number": "OPT_CONV",
# ORCA still prints the normal finish line if opt doesn't converge...
# "FBX": "",
# "CHK": "",
# "EIGEN": "", <- ORCA doesn't seem to have this
# "QUOTA": "",
"Zero distance between atoms": "CLASH", # <- only get an error if atoms are literally on top of each other
"Error : multiplicity": "CHARGEMULT",
# "REDUND": "",
# "REDUND": "",
# "GALLOC": "",
# "CONSTR": "",
"The basis set was either not assigned or not available for this element": "BASIS",
"Element name/number, dummy atom or point charge expected": "ATOM",
"Error (ORCA_SCF): Not enough memory available!": "MEM",
"WARNING: Analytical MP2 frequency calculations": "NUMFREQ",
"WARNING: Analytical Hessians are not yet implemented for meta-GGA functionals": "NUMFREQ",
"ORCA finished with error return": "UNKNOWN",
"UNRECOGNIZED OR DUPLICATED KEYWORD(S) IN SIMPLE INPUT LINE": "TYPO",
}
# some exceptions are listed in https://psicode.org/psi4manual/master/_modules/psi4/driver/p4util/exceptions.html
ERROR_PSI4 = {
"PsiException: Could not converge SCF iterations": "SCF_CONV",
"psi4.driver.p4util.exceptions.SCFConvergenceError: Could not converge SCF iterations": "SCF_CONV",
"OptimizationConvergenceError": "OPT_CONV",
"TDSCFConvergenceError": "TDCF_CONV",
"The INTCO_EXCEPTion handler": "INT_COORD",
# ^ this is basically psi4's FBX
# "CONV_CDS": "",
# "CONV_LINK": "",
# "FBX": "",
# "CHK": "",
# "EIGEN": "", <- psi4 doesn't seem to have this
# "QUOTA": "",
# "ValidationError:": "INPUT", <- generic input error, CHARGEMULT and CLASH would also get caught by this
"qcelemental.exceptions.ValidationError: Following atoms are too close:": "CLASH",
"qcelemental.exceptions.ValidationError: Inconsistent or unspecified chg/mult": "CHARGEMULT",
"MissingMethodError": "INVALID_METHOD",
# "REDUND": "",
# "REDUND": "",
# "GALLOC": "",
# "CONSTR": "",
"psi4.driver.qcdb.exceptions.BasisSetNotFound: BasisSet::construct: Unable to find a basis set for": "BASIS",
"qcelemental.exceptions.NotAnElementError": "ATOM",
"psi4.driver.p4util.exceptions.ValidationError: set_memory()": "MEM",
# ERROR_PSI4[""] = "UNKNOWN",
"Could not converge backtransformation.": "ICOORDS",
}
def step2str(step):
if int(step) == step:
return str(int(step))
else:
return str(step).replace(".", "-")
def str2step(step_str):
if "-" in step_str:
return float(step_str.replace("-", "."))
else:
return float(step_str)
def expected_inp_ext(exec_type):
"""
extension expected for an input file for exec_type
Gaussian - .com (.gjf on windows)
ORCA - .inp
Psi4 - .in
SQM - .mdin
qchem - .inp
"""
if exec_type.lower() == "gaussian":
if sys.platform.startswith("win"):
return ".gjf"
return ".com"
if exec_type.lower() == "orca":
return ".inp"
if exec_type.lower() == "psi4":
return ".in"
if exec_type.lower() == "sqm":
return ".mdin"
if exec_type.lower() == "qchem":
return ".inp"
def expected_out_ext(exec_type):
"""
extension expected for an input file for exec_type
Gaussian - .log
ORCA - .out
Psi4 - .out
SQM - .mdout
qchem - .out
"""
if exec_type.lower() == "gaussian":
return ".log"
if exec_type.lower() == "orca":
return ".out"
if exec_type.lower() == "psi4":
return ".out"
if exec_type.lower() == "sqm":
return ".mdout"
if exec_type.lower() == "qchem":
return ".out"
class FileWriter:
@classmethod
def write_file(
cls, geom, style=None, append=False, outfile=None, *args, **kwargs
):
"""
Writes file from geometry in the specified style
:geom: the Geometry to use
:style: the file type style to generate
Currently supported options: xyz (default), com, inp, in
if outfile has one of these extensions, default is that style
:append: for *.xyz, append geometry to the same file
:outfile: output destination - default is
[geometry name] + [extension] or [geometry name] + [step] + [extension]
if outfile is False, no output file will be written, but the contents will be returned
:theory: for com, inp, and in files, an object with a get_header and get_footer method
"""
if isinstance(outfile, str) and style is None:
name, ext = os.path.splitext(outfile)
style = ext.strip(".")
elif style is None:
style = "xyz"
if style.lower() not in write_types:
if style.lower() == "gaussian":
style = "com"
elif style.lower() == "orca":
style = "inp"
elif style.lower() == "psi4":
style = "in"
elif style.lower() == "sqm":
style = "sqmin"
elif style.lower() == "qchem":
style = "inq"
else:
raise NotImplementedError(file_type_err.format(style))
if (
outfile is None
and os.path.dirname(geom.name)
and not os.access(os.path.dirname(geom.name), os.W_OK)
):
os.makedirs(os.path.dirname(geom.name))
if style.lower() == "xyz":
out = cls.write_xyz(geom, append, outfile)
elif style.lower() == "com":
if "theory" in kwargs:
theory = kwargs["theory"]
del kwargs["theory"]
out = cls.write_com(geom, theory, outfile, **kwargs)
else:
raise TypeError(
"when writing 'com/gjf' files, **kwargs must include: theory=Aaron.Theory() (or AaronTools.Theory())"
)
elif style.lower() == "inp":
if "theory" in kwargs:
theory = kwargs["theory"]
del kwargs["theory"]
out = cls.write_inp(geom, theory, outfile, **kwargs)
else:
raise TypeError(
"when writing 'inp' files, **kwargs must include: theory=Aaron.Theory() (or AaronTools.Theory())"
)
elif style.lower() == "in":
if "theory" in kwargs:
theory = kwargs["theory"]
del kwargs["theory"]
out = cls.write_in(geom, theory, outfile, **kwargs)
else:
raise TypeError(
"when writing 'in' files, **kwargs must include: theory=Aaron.Theory() (or AaronTools.Theory())"
)
elif style.lower() == "sqmin":
if "theory" in kwargs:
theory = kwargs["theory"]
del kwargs["theory"]
out = cls.write_sqm(geom, theory, outfile, **kwargs)
else:
raise TypeError(
"when writing 'sqmin' files, **kwargs must include: theory=Aaron.Theory() (or AaronTools.Theory())"
)
elif style.lower() == "inq":
if "theory" in kwargs:
theory = kwargs["theory"]
del kwargs["theory"]
out = cls.write_inq(geom, theory, outfile, **kwargs)
else:
raise TypeError(
"when writing 'inq' files, **kwargs must include: theory=Aaron.Theory() (or AaronTools.Theory())"
)
elif style.lower() == "cube":
out = cls.write_cube(geom, outfile=outfile, **kwargs)
return out
@classmethod
def write_xyz(cls, geom, append, outfile=None):
mode = "a" if append else "w"
fmt = "{:3s} {: 10.5f} {: 10.5f} {: 10.5f}\n"
s = "%i\n" % len(geom.atoms)
s += "%s\n" % geom.comment
for atom in geom.atoms:
s += fmt.format(atom.element, *atom.coords)
if outfile is None:
# if no output file is specified, use the name of the geometry
with open(geom.name + ".xyz", mode) as f:
f.write(s)
elif outfile is False:
# if no output file is desired, just return the file contents
return s.strip()
else:
# write output to the requested destination
with open(outfile, mode) as f:
f.write(s)
return
@classmethod
def write_com(
cls, geom, theory, outfile=None, return_warnings=False, **kwargs
):
"""
write Gaussian input file for given Theory() and Geometry()
geom - Geometry()
theory - Theory()
outfile - None, False, or str
None - geom.name + ".com" is used as output destination
False - return contents of the input file as a str
str - output destination
return_warnings - True to return a list of warnings (e.g. basis
set might be misspelled
kwargs - passed to Theory methods (make_header, make_molecule, etc.)
"""
# get file content string
header, header_warnings = theory.make_header(
geom, return_warnings=True, **kwargs
)
mol, mol_warnings = theory.make_molecule(
geom, return_warnings=True, **kwargs
)
footer, footer_warnings = theory.make_footer(
geom, return_warnings=True, **kwargs
)
s = header + mol + footer
warnings = header_warnings + mol_warnings + footer_warnings
if outfile is None:
# if outfile is not specified, name file in Aaron format
if "step" in kwargs:
outfile = "{}.{}.com".format(geom.name, step2str(kwargs["step"]))
else:
outfile = "{}.com".format(geom.name)
if outfile is False:
if return_warnings:
return s, warnings
return s
else:
fname = os.path.basename(outfile)
name, ext = os.path.splitext(fname)
# could use jinja, but it's one thing...
s = s.replace("{{ name }}", name)
with open(outfile, "w") as f:
f.write(s)
if return_warnings:
return warnings
return
@classmethod
def write_inp(
cls, geom, theory, outfile=None, return_warnings=False, **kwargs
):
"""
write ORCA input file for the given Theory() and Geometry()
geom - Geometry()
theory - Theory()
outfile - None, False, or str
None - geom.name + ".inp" is used as output destination
False - return contents of the input file as a str
str - output destination
return_warnings - True to return a list of warnings (e.g. basis
set might be misspelled
kwargs - passed to Theory methods (make_header, make_molecule, etc.)
"""
fmt = "{:<3s} {: 9.5f} {: 9.5f} {: 9.5f}\n"
header, warnings = theory.make_header(
geom, style="orca", return_warnings=True, **kwargs
)
footer = theory.make_footer(
geom, style="orca", return_warnings=False, **kwargs
)
s = header
for atom in geom.atoms:
s += fmt.format(atom.element, *atom.coords)
s += "*\n"
s += footer
if outfile is None:
# if outfile is not specified, name file in Aaron format
if "step" in kwargs:
outfile = "{}.{}.inp".format(geom.name, step2str(kwargs["step"]))
else:
outfile = "{}.inp".format(geom.name)
if outfile is False:
if return_warnings:
return s, warnings
return s
else:
fname = os.path.basename(outfile)
name, ext = os.path.splitext(fname)
# could use jinja, but it's one thing...
s = s.replace("{{ name }}", name)
with open(outfile, "w") as f:
f.write(s)
if return_warnings:
return warnings
@classmethod
def write_inq(
cls, geom, theory, outfile=None, return_warnings=False, **kwargs
):
"""
write QChem input file for the given Theory() and Geometry()
geom - Geometry()
theory - Theory()
outfile - None, False, or str
None - geom.name + ".inq" is used as output destination
False - return contents of the input file as a str
str - output destination
return_warnings - True to return a list of warnings (e.g. basis
set might be misspelled
kwargs - passed to Theory methods (make_header, make_molecule, etc.)
"""
fmt = "{:<3s} {: 9.5f} {: 9.5f} {: 9.5f}\n"
header, header_warnings = theory.make_header(
geom, style="qchem", return_warnings=True, **kwargs
)
mol, mol_warnings = theory.make_molecule(
geom, style="qchem", return_warnings=True, **kwargs
)
out = header + mol
warnings = header_warnings + mol_warnings
if outfile is None:
# if outfile is not specified, name file in Aaron format
if "step" in kwargs:
outfile = "{}.{}.inq".format(geom.name, step2str(kwargs["step"]))
else:
outfile = "{}.inq".format(geom.name)
if outfile is False:
if return_warnings:
return out, warnings
return out
else:
fname = os.path.basename(outfile)
name, ext = os.path.splitext(fname)
# could use jinja, but it's one thing...
out = out.replace("{{ name }}", name)
with open(outfile, "w") as f:
f.write(out)
if return_warnings:
return warnings
@classmethod
def write_in(
cls, geom, theory, outfile=None, return_warnings=False, **kwargs
):
"""
write Psi4 input file for the given Theory() and Geometry()
geom - Geometry()
theory - Theory()
outfile - None, False, or str
None - geom.name + ".com" is used as output destination
False - return contents of the input file as a str
str - output destination
return_warnings - True to return a list of warnings (e.g. basis
set might be misspelled
kwargs - passed to Theory methods (make_header, make_molecule, etc.)
"""
header, header_warnings = theory.make_header(
geom, style="psi4", return_warnings=True, **kwargs
)
mol, mol_warnings = theory.make_molecule(
geom, style="psi4", return_warnings=True, **kwargs
)
footer, footer_warnings = theory.make_footer(
geom, style="psi4", return_warnings=True, **kwargs
)
s = header + mol + footer
warnings = header_warnings + mol_warnings + footer_warnings
if outfile is None:
# if outfile is not specified, name file in Aaron format
if "step" in kwargs:
outfile = "{}.{}.in".format(geom.name, step2str(kwargs["step"]))
else:
outfile = "{}.in".format(geom.name)
if outfile is False:
if return_warnings:
return s, warnings
return s
else:
fname = os.path.basename(outfile)
name, ext = os.path.splitext(fname)
# could use jinja, but it's one thing...
s = s.replace("{{ name }}", name)
with open(outfile, "w") as f:
f.write(s)
if return_warnings:
return warnings
@classmethod
def write_sqm(
cls, geom, theory, outfile=None, return_warnings=False, **kwargs
):
"""
write SQM input file for the given Theory() and Geometry()
geom - Geometry()
theory - Theory()
outfile - None, False, or str
None - geom.name + ".com" is used as output destination
False - return contents of the input file as a str
str - output destination
return_warnings - True to return a list of warnings (e.g. basis
set might be misspelled
kwargs - passed to Theory methods (make_header, make_molecule, etc.)
"""
header, header_warnings = theory.make_header(
geom, style="sqm", return_warnings=True, **kwargs
)
mol, mol_warnings = theory.make_molecule(
geom, style="sqm", return_warnings=True, **kwargs
)
s = header + mol
warnings = header_warnings + mol_warnings
if outfile is None:
# if outfile is not specified, name file in Aaron format
if "step" in kwargs:
outfile = "{}.{}.com".format(
geom.name, step2str(kwargs["step"])
)
else:
outfile = "{}.com".format(geom.name)
if outfile is False:
if return_warnings:
return s, warnings
return s
else:
fname = os.path.basename(outfile)
name, ext = os.path.splitext(fname)
# could use jinja, but it's one thing...
s = s.replace("{{ name }}", name)
with open(outfile, "w") as f:
f.write(s)
if return_warnings:
return warnings
@classmethod
def write_cube(
cls,
geom,
orbitals=None,
outfile=None,
kind="homo",
padding=4.0,
spacing=0.2,
alpha=True,
xyz=False,
n_jobs=1,
delta=0.1,
**kwargs,
):
"""
write a cube file for a molecular orbital
geom - geometry
orbitals - Orbitals()
outfile - output destination
mo - index of molecular orbital or "homo" for ground state
highest occupied molecular orbital or "lumo" for first
ground state unoccupied MO
can also be an array of MO coefficients
ao - index of atomic orbital to print
padding - padding around geom's coordinates
spacing - targeted spacing between points
n_jobs - number of parallel threads to use
this is on top of NumPy's multithreading, so
if NumPy uses 8 threads and n_jobs=2, you can
expect to see 16 threads in use
delta - see Orbitals.fukui_donor_value or fukui_acceptor_value
"""
if orbitals is None:
raise RuntimeError(
"no Orbitals() instance given to FileWriter.write_cube"
)
n_pts1, n_pts2, n_pts3, v1, v2, v3, com, u = orbitals.get_cube_array(
geom,
standard_axes=xyz,
spacing=spacing,
padding=padding,
)
mo = None
if kind.lower() == "homo":
mo = max(orbitals.n_alpha, orbitals.n_beta) - 1
elif kind.lower() == "lumo":
mo = max(orbitals.n_alpha, orbitals.n_beta)
elif kind.lower().startswith("mo"):
mo = int(kind.split()[-1])
elif kind.lower().startswith("ao"):
mo = np.zeros(orbitals.n_mos)
mo[int(kind.split()[-1])] = 1
s = ""
s += " %s\n" % geom.comment
s += " %s\n" % kind
# the '-' in front of the number of atoms indicates that this is
# MO info so there's an extra data entry between the molecule
# and the function values
bohr_com = com / UNIT.A0_TO_BOHR
if isinstance(mo, int):
s += " -"
else:
s += " "
s += "%i %13.5f %13.5f %13.5f 1\n" % (
len(geom.atoms), *bohr_com,
)
# the basis vectors of cube files are ordered based on the
# spacing between points along that axis
# or maybe it's the number of points?
# we use the first one
for n, v in sorted(
zip([n_pts1, n_pts2, n_pts3], [v1, v2, v3]),
key=lambda p: np.linalg.norm(p[1]),
):
bohr_v = v / UNIT.A0_TO_BOHR
s += " %5i %13.5f %13.5f %13.5f\n" % (
n, *bohr_v
)
# contruct an array of points for the grid
coords, n_list = orbitals.get_cube_points(
n_pts1, n_pts2, n_pts3, v1, v2, v3, com
)
# write the structure in bohr
for atom in geom.atoms:
s += " %5i %13.5f %13.5f %13.5f %13.5f\n" % (
ELEMENTS.index(atom.element),
ELEMENTS.index(atom.element),
atom.coords[0] / UNIT.A0_TO_BOHR,
atom.coords[1] / UNIT.A0_TO_BOHR,
atom.coords[2] / UNIT.A0_TO_BOHR,
)
# extra section - only for MO data
if isinstance(mo, int):
s += " %5i %5i\n" % (1, mo + 1)
# get values for this MO
if kind.lower() == "density":
val = orbitals.density_value(coords, n_jobs=n_jobs)
elif kind.lower() == "fukui donor":
val = orbitals.fukui_donor_value(
coords, n_jobs=n_jobs, delta=delta
)
elif kind.lower() == "fukui acceptor":
val = orbitals.fukui_acceptor_value(
coords, n_jobs=n_jobs, delta=delta
)
elif kind.lower() == "fukui dual":
val = orbitals.fukui_dual_value(
coords, n_jobs=n_jobs, delta=delta
)
else:
val = orbitals.mo_value(mo, coords, n_jobs=n_jobs)
# write to a file
for n1 in range(0, n_list[0]):
for n2 in range(0, n_list[1]):
val_ndx = n1 * n_list[2] * n_list[1] + n2 * n_list[2]
val_subset = val[val_ndx : val_ndx + n_list[2]]
for i, v in enumerate(val_subset):
if abs(v) < 1e-30:
v = 0
s += "%13.5e" % v
if (i + 1) % 6 == 0:
s += "\n"
if (i + 1) % 6 != 0:
s += "\n"
if outfile is None:
# if no output file is specified, use the name of the geometry
with open(geom.name + ".cube", "w") as f:
f.write(s)
elif outfile is False:
# if no output file is desired, just return the file contents
return s
else:
# write output to the requested destination
with open(outfile, "w") as f:
f.write(s)
return
@addlogger
class FileReader:
"""
Attributes:
name ''
file_type ''
comment ''
atoms [Atom]
other {}
"""
LOG = None
LOGLEVEL = "DEBUG"
def __init__(
self,
fname,
get_all=False,
just_geom=True,
freq_name=None,
conf_name=None,
nbo_name=None,
max_length=10000000,
):
"""
:fname: either a string specifying the file name of the file to read
or a tuple of (str(name), str(file_type), str(content))
:get_all: if true, optimization steps are also saved in
self.all_geom; otherwise only saves last geometry
:just_geom: if true, does not store other information, such as
frequencies, only what is needed to construct a Geometry() obj
:freq_name: Name of the file containing the frequency output. Only use
if this information is in a different file than `fname` (eg: xtb runs
using the --hess runtype option)
:nbo_name: Name of the file containing the NBO orbital coefficients
in the AO basis. Only used when reading *.47 files.
:max_length: maximum array size to store from FCHK files
any array that would be larger than this will be the
size the array would be
"""
# Initialization
self.name = ""
self.file_type = ""
self.comment = ""
self.atoms = []
self.other = {}
self.content = None
self.all_geom = None
# get file name and extention
if isinstance(fname, str):
self.name, self.file_type = os.path.splitext(fname)
self.file_type = self.file_type.lower()[1:]
elif isinstance(fname, (tuple, list)):
self.name = fname[0]
self.file_type = fname[1]
self.content = fname[2]
if self.file_type not in read_types:
raise NotImplementedError(file_type_err.format(self.file_type))
# Fill in attributes with geometry information
if self.content is None:
self.read_file(
get_all, just_geom,
freq_name=freq_name,
conf_name=conf_name,
nbo_name=nbo_name,
max_length=max_length,
)
elif isinstance(self.content, str):
f = StringIO(self.content)
elif isinstance(self.content, IOBase):
f = self.content
if self.content is not None:
if self.file_type == "log":
self.read_log(f, get_all, just_geom)
elif any(self.file_type == ext for ext in ["sd", "sdf", "mol"]):
self.read_sd(f)
elif self.file_type == "xyz":
self.read_xyz(f, get_all)
elif self.file_type == "mol2":
self.read_mol2(f, get_all)
elif any(self.file_type == ext for ext in ["com", "gjf"]):
self.read_com(f)
elif self.file_type == "out":
self.read_orca_out(f, get_all, just_geom)
elif self.file_type == "dat":
self.read_psi4_out(f, get_all, just_geom)
elif self.file_type == "fchk":
self.read_fchk(f, just_geom, max_length=max_length)
elif self.file_type == "crest":
self.read_crest(f, conf_name=conf_name)
elif self.file_type == "xtb":
self.read_xtb(f, freq_name=freq_name)
elif self.file_type == "sqmout":
self.read_sqm(f)
elif self.file_type == "47":
self.read_nbo_47(f, nbo_name=nbo_name)
elif self.file_type == "31":
self.read_nbo_31(f, nbo_name=nbo_name)
elif self.file_type == "qout":
self.read_qchem_out(f, get_all, just_geom)
def read_file(
self, get_all=False, just_geom=True,
freq_name=None, conf_name=None, nbo_name=None,
max_length=10000000,
):
"""
Reads geometry information from fname.
Parameters:
get_all If false (default), only keep the last geom
If true, self is last geom, but return list
of all others encountered
nbo_name nbo output file containing coefficients to
map AO's to orbitals
max_length max. array size for arrays to store in FCHK
files - anything larger will be the size
the array would be
"""
if os.path.isfile(self.name):
f = open(self.name)
else:
fname = ".".join([self.name, self.file_type])
fname = os.path.expanduser(fname)
if os.path.isfile(fname):
f = open(fname)
else:
raise FileNotFoundError(
"Error while looking for %s: could not find %s or %s in %s"
% (self.name, fname, self.name, os.getcwd())
)
if self.file_type == "xyz":
self.read_xyz(f, get_all)
elif self.file_type == "log":
self.read_log(f, get_all, just_geom)
elif any(self.file_type == ext for ext in ["com", "gjf"]):
self.read_com(f)
elif any(self.file_type == ext for ext in ["sd", "sdf", "mol"]):
self.read_sd(f)
elif self.file_type == "mol2":
self.read_mol2(f)
elif self.file_type == "out":
self.read_orca_out(f, get_all, just_geom)
elif self.file_type == "dat":
self.read_psi4_out(f, get_all, just_geom)
elif self.file_type == "fchk":
self.read_fchk(f, just_geom, max_length=max_length)
elif self.file_type == "crest":
self.read_crest(f, conf_name=conf_name)
elif self.file_type == "xtb":
self.read_xtb(f, freq_name=freq_name)
elif self.file_type == "sqmout":
self.read_sqm(f)
elif self.file_type == "47":
self.read_nbo_47(f, nbo_name=nbo_name)
elif self.file_type == "31":
self.read_nbo_31(f, nbo_name=nbo_name)
elif self.file_type == "qout":
self.read_qchem_out(f, get_all, just_geom)
f.close()
return
def skip_lines(self, f, n):
for i in range(n):
f.readline()
return
def read_xyz(self, f, get_all=False):
self.all_geom = []
# number of atoms
f.readline()
# comment
self.comment = f.readline().strip()
# atom info
atom_count = 0
for line in f:
line = line.strip()
if line == "":
continue
try:
int(line)
if get_all:
self.all_geom += [
(deepcopy(self.comment), deepcopy(self.atoms))
]
self.comment = f.readline().strip()
self.atoms = []
atom_count = 0
except ValueError:
line = line.split()
atom_count += 1
self.atoms += [Atom(element=line[0], coords=line[1:4], name=str(atom_count))]
# if get_all:
# self.all_geom += [(deepcopy(self.comment), deepcopy(self.atoms))]
def read_sd(self, f, get_all=False):
self.all_geom = []
lines = f.readlines()
progress = 0
for i, line in enumerate(lines):
progress += 1
if "$$$$" in line:
progress = 0
if get_all:
self.all_geom.append(
[deepcopy(self.comment), deepcopy(self.atoms)]
)
continue
if progress == 3:
self.comment = line.strip()
if progress == 4:
counts = line.split()
natoms = int(counts[0])
nbonds = int(counts[1])
if progress == 5:
self.atoms = []
for line in lines[i : i + natoms]:
atom_info = line.split()
self.atoms += [
Atom(element=atom_info[3], coords=atom_info[0:3])
]
for line in lines[i + natoms : i + natoms + nbonds]:
a1, a2 = [int(x) - 1 for x in line.split()[0:2]]
self.atoms[a1].connected.add(self.atoms[a2])
self.atoms[a2].connected.add(self.atoms[a1])
for j, a in enumerate(self.atoms):
a.name = str(j + 1)
self.other["charge"] = 0
for line in lines[i + natoms + nbonds:]:
if "CHG" in line:
self.other["charge"] += int(line.split()[-1])
if "$$$$" in line:
break
def read_mol2(self, f, get_all=False):
"""
read TRIPOS mol2
"""
atoms = []
lines = f.readlines()
i = 0
while i < len(lines):
if lines[i].startswith("@<TRIPOS>MOLECULE"):
self.comment = lines[i + 1]
info = lines[i + 2].split()
n_atoms = int(info[0])
n_bonds = int(info[1])
i += 3
elif lines[i].startswith("@<TRIPOS>ATOM"):
for j in range(0, n_atoms):
i += 1
info = lines[i].split()
# name = info[1]
coords = np.array([float(x) for x in info[2:5]])
element = re.match("([A-Za-z]+)", info[5]).group(1)
atoms.append(
Atom(element=element, coords=coords, name=str(j + 1))
)
self.atoms = atoms
elif lines[i].startswith("@<TRIPOS>BOND"):
for j in range(0, n_bonds):
i += 1
info = lines[i].split()
a1, a2 = [int(ndx) - 1 for ndx in info[1:3]]
self.atoms[a1].connected.add(self.atoms[a2])
self.atoms[a2].connected.add(self.atoms[a1])
i += 1
def read_psi4_out(self, f, get_all=False, just_geom=True):
uv_vis = ""
def get_atoms(f, n):
rv = []
self.skip_lines(f, 1)
n += 2
line = f.readline()
i = 0
mass = 0
while line.strip():
i += 1
line = line.strip()
atom_info = line.split()
element = atom_info[0]
# might be a ghost atom - like for sapt
if "Gh" in element:
element = element.strip("Gh(").strip(")")
coords = np.array([float(x) for x in atom_info[1:-1]])
rv += [Atom(element=element, coords=coords, name=str(i))]
mass += float(atom_info[-1])
line = f.readline()
n += 1
return rv, mass, n
line = f.readline()
n = 1
read_geom = False
while line != "":
if "* O R C A *" in line:
self.file_type = "out"
return self.read_orca_out(
f, get_all=get_all, just_geom=just_geom
)
if "A Quantum Leap Into The Future Of Chemistry" in line:
self.file_type = "qout"
return self.read_qchem_out(
f, get_all=get_all, just_geom=just_geom
)
if line.startswith(" Geometry (in Angstrom), charge"):
if not just_geom:
self.other["charge"] = int(line.split()[5].strip(","))
self.other["multiplicity"] = int(
line.split()[8].strip(":")
)
elif line.strip() == "SCF":
read_geom = True
elif line.strip().startswith("Center") and read_geom:
read_geom = False
if get_all and len(self.atoms) > 0:
if self.all_geom is None:
self.all_geom = []
self.all_geom += [
(deepcopy(self.atoms), deepcopy(self.other))
]
self.atoms, mass, n = get_atoms(f, n)
if not just_geom:
self.other["mass"] = mass
self.other["mass"] *= UNIT.AMU_TO_KG
if just_geom:
line = f.readline()
n += 1
continue
else:
if line.strip().startswith("Total Energy ="):
self.other["energy"] = float(line.split()[-1])
elif line.strip().startswith("Total E0"):
self.other["energy"] = float(line.split()[-2])
elif line.strip().startswith("Correction ZPE"):
self.other["ZPVE"] = float(line.split()[-4])
elif line.strip().startswith("Total ZPE"):
self.other["E_ZPVE"] = float(line.split()[-2])
elif line.strip().startswith("Total H, Enthalpy"):
self.other["enthalpy"] = float(line.split()[-2])
elif line.strip().startswith("Total G, Free"):
self.other["free_energy"] = float(line.split()[-2])
self.other["temperature"] = float(line.split()[-4])
elif "symmetry no. =" in line:
self.other["rotational_symmetry_number"] = int(
line.split()[-1].strip(")")
)
elif (
line.strip().startswith("Rotational constants:")
and line.strip().endswith("[cm^-1]")
and "rotational_temperature" not in self.other
):
self.other["rotational_temperature"] = [
float(x) if is_num(x) else 0
for x in line.split()[-8:-1:3]
]
self.other["rotational_temperature"] = [
x
* PHYSICAL.SPEED_OF_LIGHT
* PHYSICAL.PLANCK
/ PHYSICAL.KB
for x in self.other["rotational_temperature"]
]
elif line.startswith(" Vibration "):
freq_str = ""
while not line.strip().startswith("=="):
freq_str += line
line = f.readline()
n += 1
self.other["frequency"] = Frequency(
freq_str, hpmodes=False, style="psi4"
)
elif PSI4_NORM_FINISH in line:
self.other["finished"] = True
elif line.startswith(" Convergence Criteria"):
# for tolerances:
# psi4 puts '*' next to converged values and 'o' in place of things that aren't monitored
grad = {}
dE_tol = line[24:38]
if "o" in dE_tol:
dE_tol = None
else:
dE_tol = dE_tol.split()[0]
max_f_tol = line[38:52]
if "o" in max_f_tol:
max_f_tol = None
else:
max_f_tol = max_f_tol.split()[0]
rms_f_tol = line[52:66]
if "o" in rms_f_tol:
rms_f_tol = None
else:
rms_f_tol = rms_f_tol.split()[0]
max_d_tol = line[66:80]
if "o" in max_d_tol:
max_d_tol = None
else:
max_d_tol = max_d_tol.split()[0]
rms_d_tol = line[80:94]
if "o" in rms_d_tol:
rms_d_tol = None
else:
rms_d_tol = rms_d_tol.split()[0]
line = f.readline()
line = f.readline()
n += 2
# for convergence:
# psi4 puts '*' next to converged values and 'o' next to things that aren't monitored
if dE_tol is not None:
dE_conv = line[24:38]
dE = float(dE_conv.split()[0])
grad["Delta E"] = {}
grad["Delta E"]["value"] = dE
grad["Delta E"]["converged"] = "*" in dE_conv
if max_f_tol is not None:
max_f_conv = line[38:52]
max_f = float(max_f_conv.split()[0])
grad["Max Force"] = {}
grad["Max Force"]["value"] = max_f
grad["Max Force"]["converged"] = "*" in max_f_conv
if rms_f_tol is not None:
rms_f_conv = line[52:66]
rms_f = float(rms_f_conv.split()[0])
grad["RMS Force"] = {}
grad["RMS Force"]["value"] = rms_f
grad["RMS Force"]["converged"] = "*" in rms_f_conv
if max_d_tol is not None:
max_d_conv = line[66:80]
max_d = float(max_d_conv.split()[0])
grad["Max Disp"] = {}
grad["Max Disp"]["value"] = max_d
grad["Max Disp"]["converged"] = "*" in max_d_conv
if rms_d_tol is not None:
rms_d_conv = line[80:94]
rms_d = float(rms_d_conv.split()[0])
grad["RMS Disp"] = {}
grad["RMS Disp"]["value"] = rms_d
grad["RMS Disp"]["converged"] = "*" in max_d_conv
self.other["gradient"] = grad
elif "Total Gradient" in line:
gradient = np.zeros((len(self.atoms), 3))
self.skip_lines(f, 2)
n += 2
for i in range(0, len(self.atoms)):
n += 1
line = f.readline()
info = line.split()
gradient[i] = np.array([float(x) for x in info[1:]])
self.other["forces"] = -gradient
elif "SAPT Results" in line:
self.skip_lines(f, 1)
n += 1
while "Total sSAPT" not in line:
n += 1
line = f.readline()
if "---" in line:
break
if len(line.strip()) > 0:
if "Special recipe" in line:
continue
item = line[:26].strip()
val = 1e-3 * float(line[34:47])
self.other[item] = val
elif "SCF energy" in line:
self.other["SCF energy"] = float(line.split()[-1])
elif "correlation energy" in line and "=" in line:
item = line.split("=")[0].strip()
self.other[item] = float(line.split()[-1])
elif "Full point group" in line:
self.other["full_point_group"] = line.split()[-1]
elif "Molecular point group" in line:
self.other["molecular_point_group"] = line.split()[-1]
elif (
"total energy" in line
and "=" in line
or re.search("\(.\) energy", line)
):
item = line.split("=")[0].strip().strip("*").strip()
self.other[item] = float(line.split()[-1])
# hopefully the highest level energy gets printed last
self.other["energy"] = self.other[item]
elif "Total Energy" in line and "=" in line:
item = line.split("=")[0].strip().strip("*").strip()
self.other[item] = float(line.split()[-2])
# hopefully the highest level energy gets printed last
self.other["energy"] = self.other[item]
elif "Correlation Energy" in line and "=" in line:
item = line.split("=")[0].strip().strip("*").strip()
if "DFT Exchange-Correlation" in item:
self.other[item] = float(line.split()[-1])
else:
self.other[item] = float(line.split()[-2])
elif "Ground State -> Excited State Transitions" in line:
self.skip_lines(f, 3)
n += 3
line = f.readline()
s = ""
while line.strip():
s += line
n += 1
line = f.readline()
self.other["uv_vis"] = ValenceExcitations(s, style="psi4")
elif "Excitation Energy" in line and "Rotatory" in line:
self.skip_lines(f, 2)
n += 2
line = f.readline()
s = ""
while line.strip():
s += line
n += 1
line = f.readline()
self.other["uv_vis"] = ValenceExcitations(s, style="psi4")
elif re.search("\| State\s*\d+", line):
# read energies from property calculation
uv_vis += line
elif "Excited state properties:" in line:
# read osc str or rotation from property calculation
while line.strip():
uv_vis += line
n += 1
line = f.readline()
if "Oscillator" in uv_vis or "Rotation" in uv_vis:
self.other["uv_vis"] = ValenceExcitations(uv_vis, style="psi4")
if "error" not in self.other:
for err in ERROR_PSI4:
if err in line:
self.other["error"] = ERROR_PSI4[err]
self.other["error_msg"] = line.strip()
line = f.readline()
n += 1
def read_orca_out(self, f, get_all=False, just_geom=True):
"""read orca output file"""
nrg_regex = re.compile("(?:[A-Za-z]+\s+)?E\((.*)\)\s*\.\.\.\s*(.*)$")
def add_grad(grad, name, line):
grad[name] = {}
grad[name]["value"] = line.split()[-3]
grad[name]["converged"] = line.split()[-1] == "YES"
def get_atoms(f, n):
"""parse atom info"""
rv = []
self.skip_lines(f, 1)
n += 2
line = f.readline()
i = 0
while line.strip():
i += 1
line = line.strip()
atom_info = line.split()
element = atom_info[0]
coords = np.array([float(x) for x in atom_info[1:]])
rv += [Atom(element=element, coords=coords, name=str(i))]
line = f.readline()
n += 1
return rv, n
line = f.readline()
n = 1
while line != "":
if (
"Psi4: An Open-Source Ab Initio Electronic Structure Package"
in line
):
self.file_type = "dat"
return self.read_psi4_out(
f, get_all=get_all, just_geom=just_geom
)
if (
"A Quantum Leap Into The Future Of Chemistry"
in line
):
self.file_type = "qout"
return self.read_qchem_out(
f, get_all=get_all, just_geom=just_geom
)
if line.startswith("CARTESIAN COORDINATES (ANGSTROEM)"):
if get_all and len(self.atoms) > 0:
if self.all_geom is None:
self.all_geom = []
self.all_geom += [
(deepcopy(self.atoms), deepcopy(self.other))
]
self.atoms, n = get_atoms(f, n)
if just_geom:
line = f.readline()
n += 1
continue
else:
nrg = nrg_regex.match(line)
if nrg is not None:
nrg_type = nrg.group(1)
# for some reason, ORCA prints MP2 correlation energy
# as E(MP2) for CC jobs
if nrg_type == "MP2":
nrg_type = "MP2 CORR"
self.other["E(%s)" % nrg_type] = float(nrg.group(2))
if line.startswith("FINAL SINGLE POINT ENERGY"):
# if the wavefunction doesn't converge, ORCA prints a message next
# to the energy so we can't use line.split()[-1]
self.other["energy"] = float(line.split()[4])
if line.startswith("TOTAL SCF ENERGY"):
self.skip_lines(f, 2)
line = f.readline()
n += 3
self.other["SCF energy"] = float(line.split()[3])
elif "TOTAL ENERGY:" in line:
item = line.split()[-5] + " energy"
self.other[item] = float(line.split()[-2])
elif "CORRELATION ENERGY" in line and "Eh" in line:
item = line.split()[-6] + " correlation energy"
self.other[item] = float(line.split()[-2])
elif re.match("E\(\S+\)\s+...\s+-?\d+\.\d+$", line):
nrg = re.match("(E\(\S+\))\s+...\s+(-?\d+\.\d+)$", line)
self.other["energy"] = float(nrg.group(2))
self.other[nrg.group(1)] = float(nrg.group(2))
elif line.startswith("CARTESIAN GRADIENT"):
gradient = np.zeros((len(self.atoms), 3))
self.skip_lines(f, 2)
n += 2
for i in range(0, len(self.atoms)):
n += 1
line = f.readline()
# orca prints a warning before gradient if some
# coordinates are constrained
if line.startswith("WARNING:"):
continue
info = line.split()
gradient[i] = np.array([float(x) for x in info[3:]])
self.other["forces"] = -gradient
elif line.startswith("VIBRATIONAL FREQUENCIES"):
stage = "frequencies"
freq_str = "VIBRATIONAL FREQUENCIES\n"
self.skip_lines(f, 4)
n += 5
line = f.readline()
while not (stage == "THERMO" and line == "\n") and line:
if "--" not in line and line != "\n":
freq_str += line
if "NORMAL MODES" in line:
stage = "modes"
self.skip_lines(f, 6)
n += 6
if "RAMAN SPECTRUM" in line:
stage = "RAMAN"
self.skip_lines(f, 2)
n += 2
if "IR SPECTRUM" in line:
stage = "IR"
self.skip_lines(f, 2)
n += 2
if "THERMOCHEMISTRY" in line:
stage = "THERMO"
n += 1
line = f.readline()
self.other["frequency"] = Frequency(
freq_str, hpmodes=False, style="orca"
)
elif line.startswith("Temperature"):
self.other["temperature"] = float(line.split()[2])
elif line.startswith("Total Mass"):
# this may only get printed for freq jobs
self.other["mass"] = float(line.split()[3])
self.other["mass"] *= UNIT.AMU_TO_KG
elif line.startswith(" Total Charge"):
self.other["charge"] = int(line.split()[-1])
elif line.startswith(" Multiplicity"):
self.other["multiplicity"] = int(line.split()[-1])
elif "rotational symmetry number" in line:
# TODO: make this cleaner
self.other["rotational_symmetry_number"] = int(
line.split()[-2]
)
elif "Symmetry Number:" in line:
self.other["rotational_symmetry_number"] = int(
line.split()[-1]
)
elif line.startswith("Zero point energy"):
self.other["ZPVE"] = float(line.split()[4])
elif line.startswith("Total Enthalpy"):
self.other["enthalpy"] = float(line.split()[3])
elif line.startswith("Final Gibbs"):
# NOTE - Orca seems to only print Grimme's Quasi-RRHO free energy
# RRHO can be computed in AaronTool's CompOutput by setting the w0 to 0
self.other["free_energy"] = float(line.split()[5])
elif line.startswith("Rotational constants in cm-1:"):
# orca doesn't seem to print rotational constants in older versions
self.other["rotational_temperature"] = [
float(x) for x in line.split()[-3:]
]
self.other["rotational_temperature"] = [
x
* PHYSICAL.SPEED_OF_LIGHT
* PHYSICAL.PLANCK
/ PHYSICAL.KB
for x in self.other["rotational_temperature"]
]
elif "Point Group:" in line:
self.other["full_point_group"] = line.split()[2][:-1]
elif "Symmetry Number" in line:
self.other["rotational_symmetry_number"] = int(
line.split()[-1]
)
elif "sn is the rotational symmetry number" in line:
# older versions of orca print this differently
self.other["rotational_symmetry_number"] = int(
line.split()[-2]
)
elif "Geometry convergence" in line:
grad = {}
self.skip_lines(f, 2)
n += 3
line = f.readline()
while line and re.search("\w", line):
if re.search("Energy\schange", line):
add_grad(grad, "Delta E", line)
elif re.search("RMS\sgradient", line):
add_grad(grad, "RMS Force", line)
elif re.search("MAX\sgradient", line):
add_grad(grad, "Max Force", line)
elif re.search("RMS\sstep", line):
add_grad(grad, "RMS Disp", line)
elif re.search("MAX\sstep", line):
add_grad(grad, "Max Disp", line)
line = f.readline()
n += 1
self.other["gradient"] = grad
elif "MAYER POPULATION ANALYSIS" in line:
self.skip_lines(f, 2)
n += 2
line = f.readline()
data = dict()
headers = []
while line.strip():
info = line.split()
header = info[0]
name = " ".join(info[2:])
headers.append(header)
data[header] = (name, [])
line = f.readline()
self.skip_lines(f, 1)
n += 1
for i in range(0, len(self.atoms)):
line = f.readline()
info = line.split()[2:]
for header, val in zip(headers, info):
data[header][1].append(float(val))
for header in headers:
self.other[data[header][0]] = np.array(data[header][1])
elif line.startswith("LOEWDIN ATOMIC CHARGES"):
self.skip_lines(f, 1)
n += 1
charges = np.zeros(len(self.atoms))
for i in range(0, len(self.atoms)):
line = f.readline()
n += 1
charges[i] = float(line.split()[-1])
self.other["Löwdin Charges"] = charges
elif line.startswith("BASIS SET IN INPUT FORMAT"):
# read basis set primitive info
self.skip_lines(f, 3)
n += 3
line = f.readline()
n += 1
self.other["basis_set_by_ele"] = dict()
while "--" not in line and line != "":
new_gto = re.search("NewGTO\s+(\S+)", line)
if new_gto:
ele = new_gto.group(1)
line = f.readline()
n += 1
primitives = []
while "end" not in line and line != "":
shell_type, n_prim = line.split()
n_prim = int(n_prim)
exponents = []
con_coeffs = []
for i in range(0, n_prim):
line = f.readline()
n += 1
info = line.split()
exponent = float(info[1])
con_coeff = [float(x) for x in info[2:]]
exponents.append(exponent)
con_coeffs.extend(con_coeff)
primitives.append(
(
shell_type,
n_prim,
exponents,
con_coeffs,
)
)
line = f.readline()
n += 1
self.other["basis_set_by_ele"][ele] = primitives
line = f.readline()
n += 1
elif "EXCITED STATES" in line or re.search("STEOM.* RESULTS", line) or line.startswith("APPROXIMATE EOM LHS"):
s = ""
done = False
while not done:
s += line
n += 1
line = f.readline()
if (
"ORCA-CIS/TD-DFT FINISHED WITHOUT ERROR" in line or
re.search("TDM done", line) or
"TIMINGS" in line or
line == ""
):
done = True
self.other["uv_vis"] = ValenceExcitations(s, style="orca")
elif line.startswith("MOLECULAR ORBITALS"):
# read molecular orbitals
self.skip_lines(f, 1)
n += 1
line = f.readline()
self.other["alpha_coefficients"] = []
self.other["beta_coefficients"] = []
self.other["alpha_nrgs"] = []
self.other["beta_nrgs"] = []
self.other["alpha_occupancies"] = []
self.other["beta_occupancies"] = []
at_info = re.compile(
"\s*(\d+)\S+\s+\d+(?:s|p[xyz]|d(?:z2|xz|yz|x2y2|xy)|[fghi][\+\-]?\d+)"
)
if self.other["multiplicity"] != 1:
args = [
("alpha_coefficients", "beta_coefficients"),
("alpha_nrgs", "beta_nrgs"),
("alpha_occupancies", "beta_occupancies"),
]
else:
args = [
("alpha_coefficients",),
("alpha_nrgs",),
("alpha_occupancies",),
]
for coeff_name, nrg_name, occ_name in zip(*args):
self.other["shell_to_atom"] = []
mo_coefficients = []
orbit_nrgs = []
occupancy = []
while line.strip() != "":
at_match = at_info.match(line)
if at_match:
ndx = int(at_match.group(1))
self.other["shell_to_atom"].append(ndx)
coeffs = []
# there might not always be a space between the coefficients
# so we can't just split(), but they are formatted(-ish)
for coeff in re.findall("-?\d+\.\d\d\d\d\d\d", line[16:]):
coeffs.append(float(coeff))
for coeff, mo in zip(coeffs, mo_coefficients):
mo.append(coeff)
elif "--" not in line:
orbit_nrgs = occupancy
occupancy = [float(x) for x in line.split()]
elif "--" in line:
self.other[nrg_name].extend(orbit_nrgs)
self.other[occ_name].extend(occupancy)
if mo_coefficients:
self.other[coeff_name].extend(
mo_coefficients
)
if not all(
len(coeff) == len(mo_coefficients[0])
for coeff in mo_coefficients
):
self.LOG.warning(
"orbital coefficients may not "
"have been parsed correctly"
)
mo_coefficients = [[] for x in orbit_nrgs]
orbit_nrgs = []
line = f.readline()
n += 1
self.other[coeff_name].extend(mo_coefficients)
line = f.readline()
elif line.startswith("N(Alpha) "):
self.other["n_alpha"] = int(
np.rint(float(line.split()[2]))
)
elif line.startswith("N(Beta) "):
self.other["n_beta"] = int(np.rint(float(line.split()[2])))
elif ORCA_NORM_FINISH in line:
self.other["finished"] = True
# TODO E_ZPVE
if "error" not in self.other:
for err in ERROR_ORCA:
if err in line:
self.other["error"] = ERROR_ORCA[err]
self.other["error_msg"] = line.strip()
break
line = f.readline()
n += 1
if not just_geom:
if "finished" not in self.other:
self.other["finished"] = False
if (
"alpha_coefficients" in self.other
and "basis_set_by_ele" in self.other
):
self.other["orbitals"] = Orbitals(self)
def read_qchem_out(self, f, get_all=False, just_geom=True):
"""read qchem output file"""
def get_atoms(f, n):
"""parse atom info"""
rv = []
self.skip_lines(f, 2)
n += 1
line = f.readline()
i = 0
while "--" not in line:
i += 1
line = line.strip()
atom_info = line.split()
element = atom_info[1]
coords = np.array([float(x) for x in atom_info[2:5]])
rv += [Atom(element=element, coords=coords, name=str(i))]
line = f.readline()
n += 1
return rv, n
def add_grad(grad, name, line):
grad[name] = {}
grad[name]["value"] = line.split()[-3]
grad[name]["converged"] = line.split()[-1] == "YES"
line = f.readline()
n = 1
while line != "":
if (
"Psi4: An Open-Source Ab Initio Electronic Structure Package"
in line
):
self.file_type = "dat"
return self.read_psi4_out(
f, get_all=get_all, just_geom=just_geom
)
if "* O R C A *" in line:
self.file_type = "out"
return self.read_orca_out(
f, get_all=get_all, just_geom=just_geom
)
if (
"A Quantum Leap Into The Future Of Chemistry"
in line
):
self.file_type = "qout"
return self.read_qchem_out(
f, get_all=get_all, just_geom=just_geom
)
if "Standard Nuclear Orientation (Angstroms)" in line:
if get_all and len(self.atoms) > 0:
if self.all_geom is None:
self.all_geom = []
self.all_geom += [
(deepcopy(self.atoms), deepcopy(self.other))
]
self.atoms, n = get_atoms(f, n)
if just_geom:
line = f.readline()
n += 1
continue
else:
if "energy in the final basis set" in line:
self.other["energy"] = float(line.split()[-1])
if "SCF" in line:
self.other["scf_energy"] = self.other["energy"]
if re.search(r"energy\s+=\s+-?\d+\.\d+", line):
info = re.search(r"\s*([\S\s]+)\s+energy\s+=\s+(-?\d+\.\d+)", line)
kind = info.group(1)
if len(kind.split()) <= 2:
val = float(info.group(2))
if "correlation" not in kind and len(kind.split()) <= 2:
self.other["E(%s)" % kind.split()[0]] = val
self.other["energy"] = val
else:
self.other["E(corr)(%s)" % kind.split()[0]] = val
if "Total energy:" in line:
self.other["energy"] = float(line.split()[-2])
#MPn energy is printed as EMPn(SDQ)
if re.search("EMP\d(?:[A-Z]+)?\s+=\s*-?\d+.\d+$", line):
self.other["energy"] = float(line.split()[-1])
self.other["E(%s)" % line.split()[0][1:]] = self.other["energy"]
if "Molecular Point Group" in line:
self.other["full_point_group"] = line.split()[3]
if "Largest Abelian Subgroup" in line:
self.other["abelian_subgroup"] = line.split()[3]
if "Ground-State Mulliken Net Atomic Charges" in line:
charges = []
self.skip_lines(f, 3)
n += 2
line = f.readline()
while "--" not in line:
charge = float(line.split()[-1])
charges.append(charge)
line = f.readline()
n += 1
self.other["Mulliken Charges"] = charges
if "Cnvgd?" in line:
grad = {}
line = f.readline()
while line and re.search("\w", line):
if re.search("Energy\schange", line):
add_grad(grad, "Delta E", line)
elif re.search("Displacement", line):
add_grad(grad, "Disp", line)
elif re.search("Gradient", line):
add_grad(grad, "Max Disp", line)
line = f.readline()
n += 1
self.other["gradient"] = grad
if "VIBRATIONAL ANALYSIS" in line:
freq_str = ""
self.skip_lines(f, 10)
n += 9
line = f.readline()
while "STANDARD THERMODYNAMIC QUANTITIES" not in line:
n += 1
freq_str += line
line = f.readline()
self.other["frequency"] = Frequency(
freq_str, style="qchem",
)
self.other["temperature"] = float(line.split()[4])
if "Rotational Symmetry Number is" in line:
self.other["rotational_symmetry_number"] = int(line.split()[-1])
if "Molecular Mass:" in line:
self.other["mass"] = float(line.split()[-2]) * UNIT.AMU_TO_KG
if "$molecule" in line.lower():
line = f.readline()
while "$end" not in line.lower() and line:
if re.search("\d+\s+\d+", line):
match = re.search("^\s*(\d+)\s+(\d+)\s*$", line)
self.other["charge"] = int(match.group(1))
self.other["multiplicity"] = int(match.group(2))
break
line = f.readline()
if "Principal axes and moments of inertia" in line:
self.skip_lines(f, 1)
line = f.readline()
rot_consts = np.array([
float(x) for x in line.split()[2:]
])
rot_consts *= UNIT.AMU_TO_KG
rot_consts *= UNIT.A0_TO_BOHR ** 2
rot_consts *= 1e-20
rot_consts = PHYSICAL.PLANCK ** 2 / (8 * np.pi ** 2 * rot_consts * PHYSICAL.KB)
self.other["rotational_temperature"] = rot_consts
if line.startswith("Mult"):
self.other["multiplicity"] = int(line.split()[1])
# TD-DFT excitations
if re.search("TDDFT.* Excitation Energies", line):
excite_s = ""
self.skip_lines(f, 2)
line = f.readline()
n += 3
while "---" not in line and line:
excite_s += line
line = f.readline()
n += 1
self.other["uv_vis"] = ValenceExcitations(
excite_s, style="qchem",
)
# ADC excitations
if re.search("Excited State Summary", line):
excite_s = ""
self.skip_lines(f, 2)
line = f.readline()
n += 3
while "===" not in line and line:
excite_s += line
line = f.readline()
n += 1
self.other["uv_vis"] = ValenceExcitations(
excite_s, style="qchem",
)
# EOM excitations
if re.search("Start computing the transition properties", line):
excite_s = ""
line = f.readline()
n += 1
while "All requested transition properties have been computed" not in line and line:
excite_s += line
line = f.readline()
n += 1
self.other["uv_vis"] = ValenceExcitations(
excite_s, style="qchem",
)
if "Thank you very much for using Q-Chem" in line:
self.other["finished"] = True
line = f.readline()
n += 1
if not just_geom and "finished" not in self.other:
self.other["finished"] = False
def read_log(self, f, get_all=False, just_geom=True):
def get_atoms(f, n):
rv = self.atoms
self.skip_lines(f, 4)
line = f.readline()
n += 5
atnum = 0
while "--" not in line:
line = line.strip()
line = line.split()
for l in line:
try:
float(l)
except ValueError:
msg = "Error detected with log file on line {}"
raise IOError(msg.format(n))
try:
rv[atnum].coords = np.array(line[3:], dtype=float)
except IndexError:
pass
#print(atnum)
atnum += 1
line = f.readline()
n += 1
return rv, n
def get_input(f, n):
rv = []
line = f.readline()
n += 1
match = re.search(
"Charge\s*=\s*(-?\d+)\s*Multiplicity\s*=\s*(\d+)", line
)
if match is not None:
self.other["charge"] = int(match.group(1))
self.other["multiplicity"] = int(match.group(2))
line = f.readline()
n += 1
a = 0
while len(line.split()) > 1:
line = line.split()
if len(line) == 5:
flag = not(bool(line[1]))
a += 1
rv += [Atom(element=line[0], flag=flag, coords=line[2:], name=str(a))]
elif len(line) == 4:
a += 1
rv += [Atom(element=line[0], coords=line[1:], name=str(a))]
line = f.readline()
n += 1
return rv, n
def get_params(f, n):
rv = []
self.skip_lines(f, 2)
n += 3
line = f.readline()
if "Definition" in line:
definition = True
else:
definition = False
self.skip_lines(f, 1)
n += 2
line = f.readline()
while "--" not in line:
line = line.split()
param = line[1]
if definition:
val = float(line[3])
else:
val = float(line[2])
rv.append((param, val))
line = f.readline()
n += 1
return rv, n
def get_modredundant(f, n):
"""read constraints for modredundant section"""
rv = {}
line = f.readline()
n += 1
while line.strip():
atom_match = re.search("X\s+(\d+)\s+F", line)
bond_match = re.search("B\s+(\d+)\s+(\d+)\s+F", line)
angle_match = re.search("A\s+(\d+)\s+(\d+)\s+(\d+)\s+F", line)
torsion_match = re.search(
"D\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+F", line
)
if atom_match:
if "atoms" not in rv:
rv["atoms"] = ""
else:
rv["atoms"] += ","
rv["atoms"] += atom_match.group(1)
elif bond_match:
if "bonds" not in rv:
rv["bonds"] = []
rv["bonds"].append(
",".join([bond_match.group(1), bond_match.group(2)])
)
elif angle_match:
if "angles" not in rv:
rv["angles"] = []
rv["angles"].append(
",".join(
[
angle_match.group(1),
angle_match.group(2),
angle_match.group(3),
]
)
)
elif torsion_match:
if "torsions" not in rv:
rv["torsions"] = []
rv["torsions"].append(
",".join(
[
torsion_match.group(1),
torsion_match.group(2),
torsion_match.group(3),
torsion_match.group(4),
]
)
)
line = f.readline()
n += 1
return rv, n
self.all_geom = []
line = f.readline()
self.other["archive"] = ""
constraints = {}
self.other["opt_steps"] = 0
found_archive = False
n = 1
route = None
while line != "":
# route
# we need to grab the route b/c sometimes 'hpmodes' can get split onto multiple lines:
# B3LYP/genecp EmpiricalDispersion=GD3 int=(grid=superfinegrid) freq=(h
# pmodes,noraman,temperature=313.15)
if line.strip().startswith("#") and route is None:
route = ""
while "------" not in line:
route += line.strip()
n += 1
line = f.readline()
# archive entry
if line.strip().startswith("1\\1\\"):
found_archive = True
line = "@" + line.strip()[4:]
if found_archive and line.strip().endswith("@"):
self.other["archive"] = self.other["archive"][:-2] + "\\\\"
found_archive = False
elif found_archive:
self.other["archive"] += line.strip()
# input atom specs and charge/mult
if "Symbolic Z-matrix:" in line:
self.atoms, n = get_input(f, n)
#Pseudopotential info
if "Pseudopotential Parameters" in line:
self.other["ECP"] = []
self.skip_lines(f, 4)
n += 5
line = f.readline()
while "=====" not in line:
line = line.split()
if line[0].isdigit() and line[1].isdigit():
ele = line[1]
n += 1
line = f.readline().split()
if line[0] != "No":
self.other["ECP"].append(ELEMENTS[int(ele)])
n += 1
line = f.readline()
# geometry
if re.search("(Standard|Input) orientation:", line):
if get_all and len(self.atoms) > 0:
self.all_geom += [
(deepcopy(self.atoms), deepcopy(self.other))
]
self.atoms, n = get_atoms(f, n)
self.other["opt_steps"] += 1
if re.search(
"The following ModRedundant input section has been read:", line
):
constraints, n = get_modredundant(f, n)
if just_geom:
line = f.readline()
n += 1
continue
# z-matrix parameters
if re.search("Optimized Parameters", line):
self.other["params"], n = get_params(f, n)
# status
if NORM_FINISH in line:
self.other["finished"] = True
# read energies from different methods
if "SCF Done" in line:
tmp = [word.strip() for word in line.split()]
idx = tmp.index("=")
self.other["energy"] = float(tmp[idx + 1])
self.other["scf_energy"] = float(tmp[idx + 1])
else:
nrg_match = re.search("\s+(E\(\S+\))\s*=\s*(\S+)", line)
# ^ matches many methods
# will also match the SCF line (hence the else here)
# the match in the SCF line could be confusing b/c
# the SCF line could be
# SCF Done: E(RB2PLYPD3) = -76.2887108570 A.U. after 10 cycles
# and later on, there will be a line...
# E2(B2PLYPD3) = -0.6465105880D-01 E(B2PLYPD3) = -0.76353361915801D+02
# this will give:
# * E(RB2PLYPD3) = -76.2887108570
# * E(B2PLYPD3) = -76.353361915801
# very similar names for very different energies...
if nrg_match:
self.other["energy"] = float(nrg_match.group(2).replace("D", "E"))
self.other[nrg_match.group(1)] = self.other["energy"]
# CC energy
if line.startswith(" CCSD(T)= "):
self.other["energy"] = float(line.split()[-1].replace("D", "E"))
self.other["E(CCSD(T))"] = self.other["energy"]
# MP energies
mp_match = re.search("([RU]MP\d+(?:\(\S+\))?)\s*=\s*(\S+)", line)
if mp_match:
self.other["energy"] = float(mp_match.group(2).replace("D", "E"))
self.other["E(%s)" % mp_match.group(1)] = self.other["energy"]
if "Molecular mass:" in line:
self.other["mass"] = float(float_num.search(line).group(0))
self.other["mass"] *= UNIT.AMU_TO_KG
# Frequencies
if route is not None and "hpmodes" in route.lower():
self.other["hpmodes"] = True
if "Harmonic frequencies" in line:
freq_str = line
line = f.readline()
while line != "\n":
n += 1
freq_str += line
line = f.readline()
if "hpmodes" not in self.other:
self.other["hpmodes"] = False
self.other["frequency"] = Frequency(
freq_str, hpmodes=self.other["hpmodes"]
)
if "Anharmonic Infrared Spectroscopy" in line:
self.skip_lines(f, 5)
n += 5
anharm_str = ""
combinations_read = False
combinations = False
line = f.readline()
while not combinations_read:
n += 1
anharm_str += line
if "Combination Bands" in line:
combinations = True
line = f.readline()
if combinations and line == "\n":
combinations_read = True
self.other["frequency"].parse_gaussian_lines(
anharm_str.splitlines(), harmonic=False,
)
# X matrix for anharmonic
if "Total Anharmonic X Matrix" in line:
self.skip_lines(f, 1)
n += 1
n_freq = len(self.other["frequency"].data)
n_sections = int(np.ceil(n_freq / 5))
x_matrix = np.zeros((n_freq, n_freq))
for section in range(0, n_sections):
header = f.readline()
n += 1
for j in range(5 * section, n_freq):
line = f.readline()
n += 1
ll = 5 * section
ul = 5 * section + min(j - ll + 1, 5)
x_matrix[j, ll:ul] = [
float(x.replace("D", "e"))
for x in line.split()[1:]
]
x_matrix += np.tril(x_matrix, k=-1).T
self.other["X_matrix"] = x_matrix
if "Total X0" in line:
self.other["X0"] = float(line.split()[5])
# TD-DFT output
if line.strip().startswith("Ground to excited state"):
uv_vis = ""
highest_state = 0
done = False
read_states = False
while not done:
n += 1
uv_vis += line
if not read_states and line.strip() and line.split()[0].isdigit():
state = int(line.split()[0])
if state > highest_state:
highest_state = state
if line.strip().startswith("Ground to excited state transition velocity"):
read_states = True
if re.search("Excited State\s*%i:" % highest_state, line):
done = True
if line.strip().startswith("Total Energy, E"):
nrg = re.search(
r"Total Energy, E\((\S+)\)\s*=\s*(-?\d+\.\d+)", line
)
self.other["E(%s)" % nrg.group(1)] = float(nrg.group(2))
self.other["energy"] = float(nrg.group(2))
line = f.readline()
self.other["uv_vis"] = ValenceExcitations(
uv_vis, style="gaussian"
)
# Thermo
if re.search("Temperature\s*\d+\.\d+", line):
self.other["temperature"] = float(
float_num.search(line).group(0)
)
if "Rotational constants (GHZ):" in line:
rot = float_num.findall(line)
rot = [
float(r) * PHYSICAL.PLANCK * (10 ** 9) / PHYSICAL.KB
for r in rot
]
self.other["rotational_temperature"] = rot
# rotational constants from anharmonic frequency jobs
if "Rotational Constants (in MHz)" in line:
self.skip_lines(f, 2)
n += 2
equilibrium_rotational_temperature = np.zeros(3)
ground_rotational_temperature = np.zeros(3)
centr_rotational_temperature = np.zeros(3)
for i in range(0, 3):
line = f.readline()
n += 1
info = line.split()
Be = float(info[1])
B00 = float(info[3])
B0 = float(info[5])
equilibrium_rotational_temperature[i] = Be
ground_rotational_temperature[i] = B00
centr_rotational_temperature[i] = B0
equilibrium_rotational_temperature *= (
PHYSICAL.PLANCK * 1e6 / PHYSICAL.KB
)
ground_rotational_temperature *= (
PHYSICAL.PLANCK * 1e6 / PHYSICAL.KB
)
centr_rotational_temperature *= (
PHYSICAL.PLANCK * 1e6 / PHYSICAL.KB
)
self.other[
"equilibrium_rotational_temperature"
] = equilibrium_rotational_temperature
self.other[
"ground_rotational_temperature"
] = ground_rotational_temperature
self.other[
"centr_rotational_temperature"
] = centr_rotational_temperature
if "Sum of electronic and zero-point Energies=" in line:
self.other["E_ZPVE"] = float(float_num.search(line).group(0))
if "Sum of electronic and thermal Enthalpies=" in line:
self.other["enthalpy"] = float(float_num.search(line).group(0))
if "Sum of electronic and thermal Free Energies=" in line:
self.other["free_energy"] = float(
float_num.search(line).group(0)
)
if "Zero-point correction=" in line:
self.other["ZPVE"] = float(float_num.search(line).group(0))
if "Rotational symmetry number" in line:
self.other["rotational_symmetry_number"] = int(
re.search("\d+", line).group(0)
)
# Gradient
if re.search("Threshold\s+Converged", line) is not None:
line = f.readline()
n += 1
grad = {}
def add_grad(line, name, grad):
line = line.split()
grad[name] = {
"value": line[2],
"threshold": line[3],
"converged": True if line[4] == "YES" else False,
}
return grad
while line != "":
if "Predicted change in Energy" in line:
break
if re.search("Maximum\s+Force", line) is not None:
grad = add_grad(line, "Max Force", grad)
if re.search("RMS\s+Force", line) is not None:
grad = add_grad(line, "RMS Force", grad)
if re.search("Maximum\s+Displacement", line) is not None:
grad = add_grad(line, "Max Disp", grad)
if re.search("RMS\s+Displacement", line) is not None:
grad = add_grad(line, "RMS Disp", grad)
line = f.readline()
n += 1
self.other["gradient"] = grad
# electronic properties
if "Electrostatic Properties (Atomic Units)" in line:
self.skip_lines(f, 5)
n += 5
self.other["electric_potential"] = []
self.other["electric_field"] = []
line = f.readline()
while "--" not in line:
info = line.split()
self.other["electric_potential"].append(float(info[2]))
self.other["electric_field"].append([float(x) for x in info[3:]])
line = f.readline()
n += 1
self.other["electric_potential"] = np.array(self.other["electric_potential"])
self.other["electric_field"] = np.array(self.other["electric_field"])
# optical features
if "[Alpha]" in line:
alpha_match = re.search("\[Alpha\].*\(\s*(.*\s?.*)\)\s*=\s*(-?\d+\.\d+)", line)
self.other["optical_rotation_(%s)" % alpha_match.group(1)] = \
float(alpha_match.group(2))
# symmetry
if "Full point group" in line:
self.other["full_point_group"] = line.split()[-3]
if "Largest Abelian subgroup" in line:
self.other["abelian_subgroup"] = line.split()[-3]
if "Largest concise Abelian subgroup" in line:
self.other["concise_abelian_subgroup"] = line.split()[-3]
# forces
if "Forces (Hartrees/Bohr)" in line:
gradient = np.zeros((len(self.atoms), 3))
self.skip_lines(f, 2)
n += 2
for i in range(0, len(self.atoms)):
n += 1
line = f.readline()
info = line.split()
gradient[i] = np.array([float(x) for x in info[2:]])
self.other["forces"] = gradient
# nbo stuff
if "N A T U R A L A T O M I C O R B I T A L A N D" in line:
self.read_nbo(f)
# atomic charges
charge_match = re.search("(\S+) charges:\s*$", line)
if charge_match:
self.skip_lines(f, 1)
n += 1
charges = []
for i in range(0, len(self.atoms)):
line = f.readline()
n += 1
charges.append(float(line.split()[2]))
self.atoms[i].charge = float(line.split()[2])
self.other[charge_match.group(1) + " Charges"] = charges
# capture errors
# only keep first error, want to fix one at a time
if "error" not in self.other:
for err in ERROR:
if re.search(err, line):
self.other["error"] = ERROR[err]
self.other["error_msg"] = line.strip()
break
line = f.readline()
n += 1
if not just_geom:
if route is not None:
other_kwargs = {GAUSSIAN_ROUTE: {}}
route_spec = re.compile("(\w+)=?\((.*)\)")
method_and_basis = re.search(
"#(?:[NnPpTt]\s+?)(\S+)|#\s*?(\S+)", route
)
if method_and_basis is not None:
if method_and_basis.group(2):
method_info = method_and_basis.group(2).split("/")
else:
method_info = method_and_basis.group(1).split("/")
method = method_info[0]
if len(method_info) > 1:
basis = method_info[1]
else:
basis = None
route_options = route.split()
job_type = []
grid = None
solvent = None
for option in route_options:
if option.startswith("#"):
continue
elif option.startswith(method):
continue
option_lower = option.lower()
if option_lower.startswith("opt"):
ts = False
match = route_spec.search(option)
if match:
options = match.group(2).split(",")
elif option_lower.startswith("opt="):
options = ["".join(option.split("=")[1:])]
else:
if not constraints:
# if we didn't read constraints, try using flagged atoms instead
from AaronTools.finders import FlaggedAtoms
constraints = {"atoms": FlaggedAtoms}
if not any(atom.flag for atom in self.atoms):
constraints = None
job_type.append(
OptimizationJob(constraints=constraints)
)
continue
other_kwargs[GAUSSIAN_ROUTE]["opt"] = []
for opt in options:
if opt.lower() == "ts":
ts = True
else:
other_kwargs[GAUSSIAN_ROUTE]["opt"].append(
opt
)
job_type.append(
OptimizationJob(
transition_state=ts,
constraints=constraints,
)
)
elif option_lower.startswith("freq"):
temp = 298.15
match = route_spec.search(option)
if match:
options = match.group(2).split(",")
elif option_lower.startswith("freq="):
options = "".join(option.split("=")[1:])
else:
job_type.append(FrequencyJob())
continue
other_kwargs[GAUSSIAN_ROUTE]["freq"] = []
for opt in options:
if opt.lower().startswith("temp"):
temp = float(opt.split("=")[1])
else:
other_kwargs[GAUSSIAN_ROUTE][
"freq"
].append(opt)
job_type.append(FrequencyJob(temperature=temp))
elif option_lower == "sp":
job_type.append(SinglePointJob())
elif option_lower.startswith("int"):
match = route_spec.search(option)
if match:
options = match.group(2).split(",")
elif option_lower.startswith("freq="):
options = "".join(option.split("=")[1:])
else:
job_type.append(FrequencyJob())
continue
for opt in options:
if opt.lower().startswith("grid"):
grid_name = opt.split("=")[1]
grid = IntegrationGrid(grid_name)
else:
if (
"Integral"
not in other_kwargs[GAUSSIAN_ROUTE]
):
other_kwargs[GAUSSIAN_ROUTE][
"Integral"
] = []
other_kwargs[GAUSSIAN_ROUTE][
"Integral"
].append(opt)
else:
# TODO: parse solvent
match = route_spec.search(option)
if match:
keyword = match.group(1)
options = match.group(2).split(",")
other_kwargs[GAUSSIAN_ROUTE][keyword] = options
elif "=" in option:
keyword = option.split("=")[0]
options = "".join(option.split("=")[1:])
other_kwargs[GAUSSIAN_ROUTE][keyword] = [
options
]
else:
other_kwargs[GAUSSIAN_ROUTE][option] = []
continue
self.other["other_kwargs"] = other_kwargs
try:
theory = Theory(
charge=self.other["charge"],
multiplicity=self.other["multiplicity"],
job_type=job_type,
basis=basis,
method=method,
grid=grid,
solvent=solvent,
)
theory.kwargs = self.other["other_kwargs"]
self.other["theory"] = theory
except KeyError:
# if there is a serious error, too little info may be available
# to properly create the theory object
pass
for i, a in enumerate(self.atoms):
a.name = str(i + 1)
if "finished" not in self.other:
self.other["finished"] = False
if "error" not in self.other:
self.other["error"] = None
return
def read_com(self, f):
found_atoms = False
found_constraint = False
atoms = []
other = {}
for line in f:
# header
if line.startswith("%"):
continue
if line.startswith("#"):
method = re.search("^#([NnPpTt]\s+?)(\S+)|^#\s*?(\S+)", line)
# route can be #n functional/basis ...
# or #functional/basis ...
# or # functional/basis ...
if method.group(3):
other["method"] = method.group(3)
else:
other["method"] = method.group(2)
if "temperature=" in line:
other["temperature"] = re.search(
"temperature=(\d+\.?\d*)", line
).group(1)
if "solvent=" in line:
other["solvent"] = re.search(
"solvent=(\S+)\)", line
).group(1)
if "scrf=" in line:
# solvent model should be non-greedy b/c solvent name can have commas
other["solvent_model"] = re.search(
"scrf=\((\S+?),", line
).group(1)
if "EmpiricalDispersion=" in line:
other["emp_dispersion"] = re.search(
"EmpiricalDispersion=(\S+)", line
).group(1)
if "int=(grid" in line or "integral=(grid" in line.lower():
other["grid"] = re.search(
"(?:int||Integral)=\(grid[(=](\S+?)\)", line
).group(1)
# comments can be multiple lines long
# but there should be a blank line between the route and the comment
# and another between the comment and the charge+mult
blank_lines = 0
while blank_lines < 2:
line = f.readline().strip()
if len(line) == 0:
blank_lines += 1
else:
if "comment" not in other:
other["comment"] = ""
other["comment"] += "%s\n" % line
other["comment"] = (
other["comment"].strip() if "comment" in other else ""
)
line = f.readline()
if len(line.split()) > 1:
line = line.split()
else:
line = line.split(",")
other["charge"] = int(line[0])
other["multiplicity"] = int(line[1])
found_atoms = True
continue
# constraints
if found_atoms and line.startswith("B") and line.endswith("F"):
found_constraint = True
if "constraint" not in other:
other["constraint"] = []
other["constraint"] += [float_num.findall(line)]
continue
# footer
if found_constraint:
if "footer" not in other:
other["footer"] = ""
other["footer"] += line
continue
# atom coords
nums = float_num.findall(line)
line = line.split()
if len(line) == 5 and is_alpha(line[0]) and len(nums) == 4:
if not is_int(line[1]):
continue
a = Atom(element=line[0], coords=nums[1:], flag=nums[0])
atoms += [a]
elif len(line) == 4 and is_alpha(line[0]) and len(nums) == 3:
a = Atom(element=line[0], coords=nums)
atoms += [a]
else:
continue
for i, a in enumerate(atoms):
a.name = str(i + 1)
self.atoms = atoms
self.other = other
return
def read_fchk(self, f, just_geom=True, max_length=10000000):
def parse_to_list(
i, lines, length, data_type, debug=False, max_length=max_length,
):
"""takes a block in an fchk file and turns it into an array
block headers all end with N= <int>
the length of the array will be <int>
the data type is specified by data_type"""
i += 1
line = f.readline()
# print("first line", line)
items_per_line = len(line.split())
# print("items per line", items_per_line)
total_items = items_per_line
num_lines = ceil(length / items_per_line)
# print("lines in block", num_lines)
block = [line]
for k in range(0, num_lines - 1):
line = f.readline()
if max_length < length:
continue
block.append(line)
if max_length < length:
return length, i + num_lines
block = " ".join(block)
if debug:
print("full block")
print(block)
return (
np.fromstring(block, count=length, dtype=data_type, sep=" "),
i + num_lines,
)
self.atoms = []
atom_numbers = []
atom_coords = []
other = {}
int_info = re.compile("([\S\s]+?)\s*I\s*(N=)?\s*(-?\d+)")
real_info = re.compile(
"([\S\s]+?)\s*R\s*(N=)\s*(-?\d+\.?\d*[Ee]?[+-]?\d*)"
)
char_info = re.compile(
"([\S\s]+?)\s*C\s*(N=)?\s*(-?\d+\.?\d*[Ee]?[+-]?\d*)"
)
theory = Theory()
line = f.readline()
i = 0
while line != "":
if i == 0:
other["comment"] = line.strip()
elif i == 1:
i += 1
line = f.readline()
job_info = line.split()
if job_info[0] == "SP":
theory.job_type = [SinglePointJob()]
elif job_info[0] == "FOPT":
theory.job_type[OptimizationJob()]
elif job_info[0] == "FTS":
theory.job_type = [OptimizationJob(transition_state=True)]
elif job_info[0] == "FORCE":
theory.job_type = [ForceJob()]
elif job_info[0] == "FREQ":
theory.job_type = [FrequencyJob()]
theory.method = job_info[1]
if len(job_info) > 2:
theory.basis = job_info[2]
i += 1
line = f.readline()
continue
int_match = int_info.match(line)
real_match = real_info.match(line)
char_match = char_info.match(line)
if int_match is not None:
data = int_match.group(1)
# print("int", data)
value = int_match.group(3)
if data == "Charge" and not just_geom:
theory.charge = int(value)
elif data == "Multiplicity" and not just_geom:
theory.multiplicity = int(value)
elif data == "Atomic numbers":
atom_numbers, i = parse_to_list(i, f, int(value), int)
elif not just_geom:
if int_match.group(2):
other[data], i = parse_to_list(
i, f, int(value), int
)
else:
other[data] = int(value)
elif real_match is not None:
data = real_match.group(1)
# print("real", data)
value = real_match.group(3)
if data == "Current cartesian coordinates":
atom_coords, i = parse_to_list(i, f, int(value), float)
elif data == "Total Energy":
other["energy"] = float(value)
elif not just_geom:
if real_match.group(2):
other[data], i = parse_to_list(
i, f, int(value), float
)
else:
other[data] = float(value)
# elif char_match is not None:
# data = char_match.group(1)
# value = char_match.group(3)
# if not just_geom:
# other[data] = lines[i + 1]
# i += 1
line = f.readline()
i += 1
self.other = other
self.other["theory"] = theory
if isinstance(atom_coords, int):
raise RuntimeError(
"max. array size is insufficient to parse atom data\n"
"must be at least %i" % atom_coords
)
coords = np.reshape(atom_coords, (len(atom_numbers), 3))
for n, (atnum, coord) in enumerate(zip(atom_numbers, coords)):
atom = Atom(
element=ELEMENTS[atnum],
coords=UNIT.A0_TO_BOHR * coord,
name=str(n + 1),
)
self.atoms.append(atom)
try:
self.other["orbitals"] = Orbitals(self)
except (NotImplementedError, KeyError):
pass
except (TypeError, ValueError) as err:
self.LOG.warning(
"could not create Orbitals, try increasing the max.\n"
"array size to read from FCHK files\n\n"
"%s" % err
)
for key in [
"Alpha MO coefficients", "Beta MO coefficients",
"Shell types", "Shell to atom map", "Contraction coefficients",
"Primitive exponents", "Number of primitives per shell",
"Coordinates of each shell",
]:
if key in self.other and isinstance(self.other[key], int):
self.LOG.warning(
"size of %s is > %i: %i" % (key, max_length, self.other[key])
)
def read_nbo(self, f):
"""
read nbo data
"""
line = f.readline()
while line:
if "natural bond orbitals (summary):" in line.lower():
break
if "NATURAL POPULATIONS:" in line:
self.skip_lines(f, 3)
ao_types = []
ao_atom_ndx = []
nao_types = []
occ = []
nrg = []
blank_lines = 0
while blank_lines <= 1:
match = re.search(
"\d+\s+[A-Z][a-z]?\s+(\d+)\s+(\S+)\s+([\S\s]+?)(-?\d+\.\d+)\s+(-?\d+\.\d+)",
line
)
if match:
ao_atom_ndx.append(int(match.group(1)) - 1)
ao_types.append(match.group(2))
nao_types.append(match.group(3))
occ.append(float(match.group(4)))
nrg.append(float(match.group(5)))
blank_lines = 0
else:
blank_lines += 1
line = f.readline()
self.other["ao_types"] = ao_types
self.other["ao_atom_ndx"] = ao_atom_ndx
self.other["nao_type"] = nao_types
self.other["ao_occ"] = occ
self.other["ao_nrg"] = nrg
if "Summary of Natural Population Analysis:" in line:
self.skip_lines(f, 5)
core_occ = []
val_occ = []
rydberg_occ = []
nat_q = []
line = f.readline()
while "==" not in line:
info = line.split()
core_occ.append(float(info[3]))
val_occ.append(float(info[4]))
rydberg_occ.append(float(info[5]))
nat_q.append(float(info[2]))
line = f.readline()
self.other["Natural Charges"] = nat_q
self.other["core_occ"] = core_occ
self.other["valence_occ"] = val_occ
self.other["rydberg_occ"] = rydberg_occ
if "Wiberg bond index matrix in the NAO basis" in line:
dim = len(self.other["Natural Charges"])
bond_orders = np.zeros((dim, dim))
done = False
j = 0
for block in range(0, ceil(dim / 9)):
offset = 9 * j
self.skip_lines(f, 3)
for i in range(0, dim):
line = f.readline()
for k, bo in enumerate(line.split()[2:]):
bo = float(bo)
bond_orders[i][offset + k] = bo
j += 1
self.other["wiberg_nao"] = bond_orders
line = f.readline()
def read_crest(self, f, conf_name=None):
"""
conf_name = False to skip conformer loading (doesn't get written until crest job is done)
"""
if conf_name is None:
conf_name = os.path.join(
os.path.dirname(self.name), "crest_conformers.xyz"
)
line = True
self.other["finished"] = False
self.other["error"] = None
while line:
line = f.readline()
if "terminated normally" in line:
self.other["finished"] = True
elif "population of lowest" in line:
self.other["best_pop"] = float(float_num.findall(line)[0])
elif "ensemble free energy" in line:
self.other["free_energy"] = (
float(float_num.findall(line)[0]) / UNIT.HART_TO_KCAL
)
elif "ensemble entropy" in line:
self.other["entropy"] = (
float(float_num.findall(line)[1]) / UNIT.HART_TO_KCAL
)
elif "ensemble average energy" in line:
self.other["avg_energy"] = (
float(float_num.findall(line)[0]) / UNIT.HART_TO_KCAL
)
elif "E lowest" in line:
self.other["energy"] = float(float_num.findall(line)[0])
elif "T /K" in line:
self.other["temperature"] = float(float_num.findall(line)[0])
elif (
line.strip()
.lower()
.startswith(("forrtl", "warning", "*warning"))
):
self.other["error"] = "UNKNOWN"
if "error_msg" not in self.other:
self.other["error_msg"] = ""
self.other["error_msg"] += line
elif "-chrg" in line:
self.other["charge"] = int(float_num.findall(line)[0])
elif "-uhf" in line:
self.other["multiplicity"] = (
int(float_num.findall(line)[0]) + 1
)
if self.other["finished"] and conf_name:
self.other["conformers"] = FileReader(
conf_name,
get_all=True,
).all_geom
self.comment, self.atoms = self.other["conformers"][0]
self.other["conformers"] = self.other["conformers"][1:]
def read_xtb(self, f, freq_name=None):
line = True
self.other["finished"] = False
self.other["error"] = None
self.atoms = []
self.comment = ""
while line:
line = f.readline()
if "Optimized Geometry" in line:
line = f.readline()
n_atoms = int(line.strip())
line = f.readline()
self.comment = " ".join(line.strip().split()[2:])
for i in range(n_atoms):
line = f.readline()
elem, x, y, z = line.split()
self.atoms.append(Atom(element=elem, coords=[x, y, z]))
if "normal termination" in line:
self.other["finished"] = True
if "abnormal termination" in line:
self.other["error"] = "UNKNOWN"
if line.strip().startswith("#ERROR"):
if "error_msg" not in self.other:
self.other["error_msg"] = ""
self.other["error_msg"] += line
if "charge" in line and ":" in line:
self.other["charge"] = int(float_num.findall(line)[0])
if "spin" in line and ":" in line:
self.other["multiplicity"] = (
2 * float(float_num.findall(line)[0]) + 1
)
if "total energy" in line:
self.other["energy"] = (
float(float_num.findall(line)[0]) * UNIT.HART_TO_KCAL
)
if "zero point energy" in line:
self.other["ZPVE"] = (
float(float_num.findall(line)[0]) * UNIT.HART_TO_KCAL
)
if "total free energy" in line:
self.other["free_energy"] = (
float(float_num.findall(line)[0]) * UNIT.HART_TO_KCAL
)
if "electronic temp." in line:
self.other["temperature"] = float(float_num.findall(line)[0])
if freq_name is not None:
with open(freq_name) as f_freq:
self.other["frequency"] = Frequency(f_freq.read())
def read_sqm(self, f):
lines = f.readlines()
self.other["finished"] = False
self.atoms = []
i = 0
while i < len(lines):
line = lines[i]
if "Atomic Charges for Step" in line:
elements = []
for info in lines[i + 2 :]:
if not info.strip() or not info.split()[0].isdigit():
break
ele = info.split()[1]
elements.append(ele)
i += len(elements) + 2
if "Final Structure" in line:
k = 0
for info in lines[i + 4 :]:
data = info.split()
coords = np.array([x for x in data[4:7]])
self.atoms.append(
Atom(
name=str(k + 1),
coords=coords,
element=elements[k],
)
)
k += 1
if k == len(elements):
break
i += k + 4
if "Calculation Completed" in line:
self.other["finished"] = True
if "Total SCF energy" in line:
self.other["energy"] = (
float(line.split()[4]) / UNIT.HART_TO_KCAL
)
i += 1
if not self.atoms:
# there's no atoms if there's an error
# error is probably on the last line
self.other["error"] = "UNKNOWN"
self.other["error_msg"] = line
def read_nbo_47(self, f, nbo_name=None):
lines = f.readlines()
bohr = False
i = 0
while i < len(lines):
line = lines[i]
if line.startswith(" $"):
section = line.split()[0]
if section.startswith("$COORD"):
i += 1
self.atoms = []
line = lines[i]
while not line.startswith(" $END"):
if re.search("\d+\s+\d+(?:\s+-?\d+\.\d+\s){3}", line):
info = line.split()
ndx = int(info[0])
coords = [float(x) for x in info[2:5]]
self.atoms.append(
Atom(
element=ELEMENTS[ndx],
name=str(len(self.atoms) + 1),
coords=np.array(coords),
)
)
i += 1
line = lines[i]
elif section.startswith("$BASIS"):
reading_centers = False
reading_labels = False
i += 1
line = lines[i]
while not line.startswith(" $END"):
if "CENTER" in line.upper():
self.other["shell_to_atom"] = [
int(x) for x in line.split()[2:]
]
reading_centers = True
reading_labels = False
elif "LABEL" in line.upper():
self.other["momentum_label"] = [
int(x) for x in line.split()[2:]
]
reading_labels = True
reading_centers = False
elif reading_centers:
self.other["shell_to_atom"].extend(
[int(x) for x in line.split()]
)
elif reading_labels:
self.other["momentum_label"].extend(
[int(x) for x in line.split()]
)
i += 1
line = lines[i]
elif section.startswith("$CONTRACT"):
int_sections = {
"NCOMP": "funcs_per_shell",
"NPRIM": "n_prim_per_shell",
"NPTR": "start_ndx",
}
float_sections = {
"EXP": "exponents",
"CS": "s_coeff",
"CP": "p_coeff",
"CD": "d_coeff",
"CF": "f_coeff",
}
i += 1
line = lines[i]
while not line.startswith(" $END"):
if any(line.strip().startswith(section) for section in int_sections):
section = line.split()[0]
self.other[int_sections[section]] = [
int(x) for x in line.split()[2:]
]
i += 1
line = lines[i]
while "=" not in line and "$" not in line:
self.other[int_sections[section]].extend([
int(x) for x in line.split()
])
i += 1
line = lines[i]
elif any(line.strip().startswith(section) for section in float_sections):
section = line.split()[0]
self.other[float_sections[section]] = [
float(x) for x in line.split()[2:]
]
i += 1
line = lines[i]
while "=" not in line and "$" not in line:
self.other[float_sections[section]].extend([
float(x) for x in line.split()
])
i += 1
line = lines[i]
else:
i += 1
line = lines[i]
elif section.startswith("$GENNBO"):
if "BOHR" in section.upper():
bohr = True
nbas = re.search("NBAS=(\d+)", line)
n_funcs = int(nbas.group(1))
if "CUBICF" in section.upper():
self.LOG.warning("cubic F shell will not be handled correctly")
i += 1
if nbo_name is not None:
self._read_nbo_coeffs(nbo_name)
def _read_nbo_coeffs(self, nbo_name):
"""
read coefficients in AO basis for NBO's/NLHO's/NAO's/etc.
called by methods that read NBO input (.47) or output files (.31)
"""
with open(nbo_name, "r") as f2:
lines = f2.readlines()
kind = re.search("P?(\S+)s", lines[1]).group(1)
desc_file = os.path.splitext(nbo_name)[0] + ".46"
if os.path.exists(desc_file):
with open(desc_file, "r") as f3:
desc_lines = f3.readlines()
for k, line in enumerate(desc_lines):
if kind in line:
self.other["orbit_kinds"] = []
n_orbits = int(line.split()[1])
k += 1
while len(self.other["orbit_kinds"]) < n_orbits:
self.other["orbit_kinds"].extend([
desc_lines[k][i: i + 10]
for i in range(1, len(desc_lines[k]) - 1, 10)
])
k += 1
else:
self.LOG.warning(
"no .46 file found - orbital descriptions will be unavialable"
)
j = 3
self.other["alpha_coefficients"] = []
while len(self.other["alpha_coefficients"]) < sum(self.other["funcs_per_shell"]):
mo_coeff = []
while len(mo_coeff) < sum(self.other["funcs_per_shell"]):
mo_coeff.extend([float(x) for x in lines[j].split()])
j += 1
self.other["alpha_coefficients"].append(mo_coeff)
self.other["orbitals"] = Orbitals(self)
def read_nbo_31(self, f, nbo_name=None):
lines = f.readlines()
comment = lines[0].strip()
info = lines[3].split()
n_atoms = int(info[0])
self.atoms = []
for i in range(5, 5 + n_atoms):
atom_info = lines[i].split()
ele = ELEMENTS[int(atom_info[0])]
coords = np.array([float(x) for x in atom_info[1:4]])
self.atoms.append(
Atom(
element=ele,
coords=coords,
name=str(i-4),
)
)
i = n_atoms + 6
line = lines[i]
self.other["shell_to_atom"] = []
self.other["momentum_label"] = []
self.other["funcs_per_shell"] = []
self.other["start_ndx"] = []
self.other["n_prim_per_shell"] = []
while "---" not in line:
info = line.split()
ndx = int(info[0])
funcs = int(info[1])
start_ndx = int(info[2])
n_prim = int(info[3])
self.other["shell_to_atom"].extend([ndx for j in range(0, funcs)])
self.other["funcs_per_shell"].append(funcs)
self.other["start_ndx"].append(start_ndx)
self.other["n_prim_per_shell"].append(n_prim)
i += 1
line = lines[i]
momentum_labels = [int(x) for x in line.split()]
self.other["momentum_label"].extend(momentum_labels)
i += 1
line = lines[i]
i += 1
self.other["exponents"] = []
line = lines[i]
while line.strip() != "":
exponents = [float(x) for x in line.split()]
self.other["exponents"].extend(exponents)
i += 1
line = lines[i]
i += 1
self.other["s_coeff"] = []
line = lines[i]
while line.strip() != "":
coeff = [float(x) for x in line.split()]
self.other["s_coeff"].extend(coeff)
i += 1
line = lines[i]
i += 1
self.other["p_coeff"] = []
line = lines[i]
while line.strip() != "":
coeff = [float(x) for x in line.split()]
self.other["p_coeff"].extend(coeff)
i += 1
line = lines[i]
i += 1
self.other["d_coeff"] = []
line = lines[i]
while line.strip() != "":
coeff = [float(x) for x in line.split()]
self.other["d_coeff"].extend(coeff)
i += 1
line = lines[i]
i += 1
self.other["f_coeff"] = []
line = lines[i]
while line.strip() != "":
coeff = [float(x) for x in line.split()]
self.other["f_coeff"].extend(coeff)
i += 1
line = lines[i]
i += 1
self.other["g_coeff"] = []
line = lines[i]
while line.strip() != "":
coeff = [float(x) for x in line.split()]
self.other["g_coeff"].extend(coeff)
i += 1
line = lines[i]
if nbo_name is not None:
self._read_nbo_coeffs(nbo_name) | PypiClean |
/NovalIDE-1.1.8-py3-none-any.whl/noval/syntax_themes.py | from noval import GetApp
import noval.iface as iface
import noval.plugin as plugin
import noval.consts as consts
def default_light():
default_fg = "black"
default_bg = "#fdfdfd"
light_fg = "DarkGray"
string_fg = "DarkGreen"
open_string_bg = "#c3f9d3"
return {
"TEXT": {
"foreground": default_fg,
"insertbackground": default_fg,
"background": default_bg,
},
"GUTTER": {"foreground": "#999999", "background": "#e0e0e0"},
"breakpoint": {"foreground": "crimson"},
"current_line": {"background": "#f5f5f5"},
"definition": {"foreground": "DarkBlue", "font": "BoldEditorFont"},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg, "background": open_string_bg},
"open_string3": {"foreground": string_fg, "background": open_string_bg},
"keyword": {"foreground": "#7f0055", "font": "BoldEditorFont"},
"builtin": {"foreground": "#7f0055"},
"number": {"foreground": "#B04600"},
"comment": {"foreground": light_fg},
"prompt": {"foreground": "purple", "font": "BoldEditorFont"},
"magic": {"foreground": light_fg},
"stdin": {"foreground": "Blue"},
"stdout": {"foreground": "Black"},
"stderr": {"foreground": "Red"},
"value": {"foreground": "DarkBlue"},
"hyperlink": {"foreground": "#3A66DD", "underline": True},
# paren matcher
"surrounding_parens": {"foreground": "Blue", "font": "BoldEditorFont"},
"unclosed_expression": {"background": "LightGray"},
# find/replace
"found": {"foreground": "blue", "underline": True},
"current_found": {"foreground": "white", "background": "red"},
"matched_name": {"background": "#e6ecfe"},
"local_name": {"font": "ItalicEditorFont"},
# debugger
"active_focus": {"background": "#F8FC9A", "borderwidth": 1, "relief": "solid"},
"suspended_focus": {"background": "", "borderwidth": 1, "relief": "solid"},
"completed_focus": {
"background": "#BBEDB2",
"borderwidth": 1,
"relief": "flat",
},
"exception_focus": {
"background": "#FFBFD6",
"borderwidth": 1,
"relief": "solid",
},
"expression_box": {"background": "#DCEDF2", "foreground": default_fg},
}
def default_dark():
default_fg = "#B3B3B3"
string_fg = "#8DC76F"
open_string_bg = "#224533"
# s.configure("Local.Code", foreground="#BCCAE8")
# s.configure("MatchedName.Code", background="#193022")
return {
"TEXT": {
"foreground": default_fg,
"insertbackground": default_fg,
"background": "#2d2d2d",
},
"GUTTER": {"foreground": "#606060", "background": "#323232"},
"breakpoint": {"foreground": "pink"},
"current_line": {"background": "#363636"},
"sel": {"foreground": "#eeeeee", "background": "#6E6E6E"},
"definition": {"foreground": default_fg},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg, "background": open_string_bg},
"open_string3": {"foreground": string_fg, "background": open_string_bg},
"builtin": {"foreground": "#A9B1C9"},
"keyword": {"foreground": "#A9B1C9", "font": "BoldEditorFont"},
"number": {"foreground": "#FFCABF"},
"comment": {"foreground": "#D4D44E"},
# shell
"prompt": {"foreground": "#5BEBBB", "font": "BoldEditorFont"},
"magic": {"foreground": "pink"},
"stdin": {"foreground": "LightBlue"},
"stdout": {"foreground": "LightGray"},
"stderr": {"foreground": "#EB5B83"},
"value": {"foreground": "#EBEB5B"},
"hyperlink": {"foreground": "#619DC7", "underline": True},
# paren matcher
"surrounding_parens": {"foreground": "#F0995B", "font": "BoldEditorFont"},
"unclosed_expression": {"background": "#000000"},
# find/replace
"found": {"underline": True},
"current_found": {"foreground": "white", "background": "red"},
"matched_name": {"background": "#474747"},
"local_name": {"font": "ItalicEditorFont"},
# debugger
"active_focus": {"background": "#807238", "borderwidth": 1, "relief": "solid"},
"suspended_focus": {"background": "", "borderwidth": 1, "relief": "solid"},
"completed_focus": {
"background": "#807238",
"borderwidth": 1,
"relief": "flat",
},
"exception_focus": {
"background": "#FFBFD6",
"borderwidth": 1,
"relief": "solid",
},
"expression_box": {"background": "#506E67", "foreground": default_fg},
}
def default_dark_green():
open_string_bg = "#453B22"
return {
"TEXT": {"background": "#273627"},
"GUTTER": {"background": "#33402F"},
"current_line": {"background": "#2E402E"},
"sel": {"background": "#6E6E6E"},
"unclosed_expression": {"background": "#0F1F15"},
"open_string": {"background": open_string_bg},
"open_string3": {"background": open_string_bg},
"keyword": {"foreground": "#88CFB6", "font": "BoldEditorFont"},
"builtin": {"foreground": "#88CFB6"},
# debugger
"active_focus": {"background": "#807238"},
"completed_focus": {"background": "#807238"},
"exception_focus": {"background": "#FFBFD6"},
"expression_box": {"background": "#506E67"},
}
def default_dark_blue():
open_string_bg = "#224533"
return {
"TEXT": {"background": "#272936"},
"GUTTER": {"background": "#2F3640"},
"current_line": {"background": "#2D3040"},
"sel": {"background": "#6E6E6E"},
"unclosed_expression": {"background": "#100B21"},
"open_string": {"background": open_string_bg},
"open_string3": {"background": open_string_bg},
"keyword": {"foreground": "#8899CF", "font": "BoldEditorFont"},
"builtin": {"foreground": "#8899CF"},
# debugger
"active_focus": {"background": "#807238"},
"completed_focus": {"background": "#807238"},
"exception_focus": {"background": "#FFBFD6"},
"expression_box": {"background": "#506E67"},
}
def idle_classic():
string_fg = "#00aa00"
return {
"TEXT": {
"foreground": "black",
"insertbackground": "black",
"background": "white",
},
"GUTTER": {"foreground": "gray", "background": "#efefef"},
"sel": {"foreground": "black", "background": "gray"},
"number": {"foreground": "black"},
"definition": {"foreground": "#0000ff", "font": "EditorFont"},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg},
"open_string3": {"foreground": string_fg},
"keyword": {"foreground": "#ff7700", "font": "EditorFont"},
"builtin": {"foreground": "#900090"},
"comment": {"foreground": "#dd0000"},
"prompt": {"foreground": "#770000"},
"stdin": {"foreground": "black"},
"stdout": {"foreground": "Blue"},
"value": {"foreground": "Blue"},
"stderr": {"foreground": "Red"},
"found": {"foreground": "", "underline": True},
"current_found": {"foreground": "white", "background": "black"},
}
def idle_dark():
normal_fg = "white"
string_fg = "#02ff02"
return {
"TEXT": {
"foreground": normal_fg,
"insertbackground": normal_fg,
"background": "#002240",
},
"sel": {"foreground": "#FFFFFF", "background": "#7e7e7e"},
"number": {"foreground": normal_fg},
"definition": {"foreground": "#5e5eff", "font": "EditorFont"},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg},
"open_string3": {"foreground": string_fg},
"keyword": {"foreground": "#ff8000", "font": "EditorFont"},
"builtin": {"foreground": "#ff00ff"},
"comment": {"foreground": "#dd0000"},
"prompt": {"foreground": "#ff4d4d"},
"stdin": {"foreground": normal_fg},
"stdout": {"foreground": "#c2d1fa"},
"value": {"foreground": "#c2d1fa"},
"stderr": {"foreground": "#ffb3b3"},
"found": {"foreground": "", "underline": True},
"current_found": {"foreground": "#002240", "background": "#fbfbfb"},
}
def desert_sunset():
normal_fg = "#f0e68c"
string_fg = "#ffa0a0"
return {
"TEXT": {
"foreground": normal_fg,
"insertbackground": normal_fg,
"background": "#333333",
},
"GUTTER": {"foreground": "gray", "background": "#404040"},
"sel": {"foreground": "#000000", "background": "gray"},
"number": {"foreground": normal_fg},
"definition": {"foreground": "#98fb98"},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg},
"open_string3": {"foreground": string_fg},
"keyword": {"foreground": "#cc6600"},
"builtin": {"foreground": "#519e51"},
"comment": {"foreground": "#87ceeb"},
"prompt": {"foreground": "#87ceeb"},
"stdin": {"foreground": normal_fg},
"stdout": {"foreground": "#eeeeee"},
"value": {"foreground": "#eeeeee"},
"stderr": {"foreground": "#ff3e40"},
"found": {"foreground": "", "underline": True},
"current_found": {"foreground": "#ffffff", "background": "#333333"},
}
def zenburn():
# https://github.com/mig/gedit-themes/blob/master/zenburn.xml
# https://github.com/trusktr/gedit-color-schemes/blob/master/gtksourceview-3.0/styles/zenburn.xml
normal_fg = "#dcdccc"
string_fg = "#cc9393"
return {
"TEXT": {
"foreground": normal_fg,
"insertbackground": normal_fg,
"background": "#3f3f3f",
},
"GUTTER": {"foreground": "#7f8f8f", "background": "#464646"},
"current_line": {"background": "#4A4A4A"},
"sel": {"foreground": "white", "background": "#506070"},
"number": {"foreground": "#8cd0d3"},
"definition": {"foreground": "#f4a020", "font": "BoldEditorFont"},
"string": {"foreground": string_fg},
"string3": {"foreground": string_fg},
"open_string": {"foreground": string_fg},
"open_string3": {"foreground": string_fg},
"keyword": {"foreground": "#f0dfaf", "font": "BoldEditorFont"},
"builtin": {"foreground": "#efef8f"},
"comment": {"foreground": "#7f9f7f"},
"prompt": {"foreground": "#87ceeb"},
"stdin": {"foreground": normal_fg},
"stdout": {"foreground": "#eeeeee"},
"value": {"foreground": "#eeeeee"},
"stderr": {"foreground": "#ff3e40"},
# paren matcher
"surrounding_parens": {"foreground": "white", "font": "BoldEditorFont"},
} | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap/site/content/docs/5.0/components/scrollspy.md | ---
layout: docs
title: Scrollspy
description: Automatically update Bootstrap navigation or list group components based on scroll position to indicate which link is currently active in the viewport.
group: components
toc: true
---
## How it works
Scrollspy has a few requirements to function properly:
- It must be used on a Bootstrap [nav component]({{< docsref "/components/navs-tabs" >}}) or [list group]({{< docsref "/components/list-group" >}}).
- Scrollspy requires `position: relative;` on the element you're spying on, usually the `<body>`.
- Anchors (`<a>`) are required and must point to an element with that `id`.
When successfully implemented, your nav or list group will update accordingly, moving the `.active` class from one item to the next based on their associated targets.
{{< callout >}}
### Scrollable containers and keyboard access
If you're making a scrollable container (other than the `<body>`), be sure to have a `height` set and `overflow-y: scroll;` applied to it—alongside a `tabindex="0"` to ensure keyboard access.
{{< /callout >}}
## Example in navbar
Scroll the area below the navbar and watch the active class change. The dropdown items will be highlighted as well.
<div class="bd-example">
<nav id="navbar-example2" class="navbar navbar-light bg-light px-3">
<a class="navbar-brand" href="#">Navbar</a>
<ul class="nav nav-pills">
<li class="nav-item">
<a class="nav-link" href="#scrollspyHeading1">First</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#scrollspyHeading2">Second</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" data-bs-toggle="dropdown" href="#" role="button" aria-expanded="false">Dropdown</a>
<ul class="dropdown-menu">
<li><a class="dropdown-item" href="#scrollspyHeading3">Third</a></li>
<li><a class="dropdown-item" href="#scrollspyHeading4">Fourth</a></li>
<li><hr class="dropdown-divider"></li>
<li><a class="dropdown-item" href="#scrollspyHeading5">Fifth</a></li>
</ul>
</li>
</ul>
</nav>
<div data-bs-spy="scroll" data-bs-target="#navbar-example2" data-bs-offset="0" class="scrollspy-example" tabindex="0">
<h4 id="scrollspyHeading1">First heading</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="scrollspyHeading2">Second heading</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="scrollspyHeading3">Third heading</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="scrollspyHeading4">Fourth heading</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="scrollspyHeading5">Fifth heading</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
</div>
</div>
```html
<nav id="navbar-example2" class="navbar navbar-light bg-light px-3">
<a class="navbar-brand" href="#">Navbar</a>
<ul class="nav nav-pills">
<li class="nav-item">
<a class="nav-link" href="#scrollspyHeading1">First</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#scrollspyHeading2">Second</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" data-bs-toggle="dropdown" href="#" role="button" aria-expanded="false">Dropdown</a>
<ul class="dropdown-menu">
<li><a class="dropdown-item" href="#scrollspyHeading3">Third</a></li>
<li><a class="dropdown-item" href="#scrollspyHeading4">Fourth</a></li>
<li><hr class="dropdown-divider"></li>
<li><a class="dropdown-item" href="#scrollspyHeading5">Fifth</a></li>
</ul>
</li>
</ul>
</nav>
<div data-bs-spy="scroll" data-bs-target="#navbar-example2" data-bs-offset="0" class="scrollspy-example" tabindex="0">
<h4 id="scrollspyHeading1">First heading</h4>
<p>...</p>
<h4 id="scrollspyHeading2">Second heading</h4>
<p>...</p>
<h4 id="scrollspyHeading3">Third heading</h4>
<p>...</p>
<h4 id="scrollspyHeading4">Fourth heading</h4>
<p>...</p>
<h4 id="scrollspyHeading5">Fifth heading</h4>
<p>...</p>
</div>
```
## Example with nested nav
Scrollspy also works with nested `.nav`s. If a nested `.nav` is `.active`, its parents will also be `.active`. Scroll the area next to the navbar and watch the active class change.
<div class="bd-example">
<div class="row">
<div class="col-4">
<nav id="navbar-example3" class="navbar navbar-light bg-light flex-column align-items-stretch p-3">
<a class="navbar-brand" href="#">Navbar</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link" href="#item-1">Item 1</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link ms-3 my-1" href="#item-1-1">Item 1-1</a>
<a class="nav-link ms-3 my-1" href="#item-1-2">Item 1-2</a>
</nav>
<a class="nav-link" href="#item-2">Item 2</a>
<a class="nav-link" href="#item-3">Item 3</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link ms-3 my-1" href="#item-3-1">Item 3-1</a>
<a class="nav-link ms-3 my-1" href="#item-3-2">Item 3-2</a>
</nav>
</nav>
</nav>
</div>
<div class="col-8">
<div data-bs-spy="scroll" data-bs-target="#navbar-example3" data-bs-offset="0" class="scrollspy-example-2" tabindex="0">
<h4 id="item-1">Item 1</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h5 id="item-1-1">Item 1-1</h5>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h5 id="item-1-2">Item 1-2</h5>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="item-2">Item 2</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="item-3">Item 3</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h5 id="item-3-1">Item 3-1</h5>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h5 id="item-3-2">Item 3-2</h5>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
</div>
</div>
</div>
</div>
```html
<nav id="navbar-example3" class="navbar navbar-light bg-light flex-column align-items-stretch p-3">
<a class="navbar-brand" href="#">Navbar</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link" href="#item-1">Item 1</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link ms-3 my-1" href="#item-1-1">Item 1-1</a>
<a class="nav-link ms-3 my-1" href="#item-1-2">Item 1-2</a>
</nav>
<a class="nav-link" href="#item-2">Item 2</a>
<a class="nav-link" href="#item-3">Item 3</a>
<nav class="nav nav-pills flex-column">
<a class="nav-link ms-3 my-1" href="#item-3-1">Item 3-1</a>
<a class="nav-link ms-3 my-1" href="#item-3-2">Item 3-2</a>
</nav>
</nav>
</nav>
<div data-bs-spy="scroll" data-bs-target="#navbar-example3" data-bs-offset="0" tabindex="0">
<h4 id="item-1">Item 1</h4>
<p>...</p>
<h5 id="item-1-1">Item 1-1</h5>
<p>...</p>
<h5 id="item-1-2">Item 1-2</h5>
<p>...</p>
<h4 id="item-2">Item 2</h4>
<p>...</p>
<h4 id="item-3">Item 3</h4>
<p>...</p>
<h5 id="item-3-1">Item 3-1</h5>
<p>...</p>
<h5 id="item-3-2">Item 3-2</h5>
<p>...</p>
</div>
```
## Example with list-group
Scrollspy also works with `.list-group`s. Scroll the area next to the list group and watch the active class change.
<div class="bd-example">
<div class="row">
<div class="col-4">
<div id="list-example" class="list-group">
<a class="list-group-item list-group-item-action" href="#list-item-1">Item 1</a>
<a class="list-group-item list-group-item-action" href="#list-item-2">Item 2</a>
<a class="list-group-item list-group-item-action" href="#list-item-3">Item 3</a>
<a class="list-group-item list-group-item-action" href="#list-item-4">Item 4</a>
</div>
</div>
<div class="col-8">
<div data-bs-spy="scroll" data-bs-target="#list-example" data-bs-offset="0" class="scrollspy-example" tabindex="0">
<h4 id="list-item-1">Item 1</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="list-item-2">Item 2</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="list-item-3">Item 3</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
<h4 id="list-item-4">Item 4</h4>
<p>This is some placeholder content for the scrollspy page. Note that as you scroll down the page, the appropriate navigation link is highlighted. It's repeated throughout the component example. We keep adding some more example copy here to emphasize the scrolling and highlighting.</p>
</div>
</div>
</div>
</div>
```html
<div id="list-example" class="list-group">
<a class="list-group-item list-group-item-action" href="#list-item-1">Item 1</a>
<a class="list-group-item list-group-item-action" href="#list-item-2">Item 2</a>
<a class="list-group-item list-group-item-action" href="#list-item-3">Item 3</a>
<a class="list-group-item list-group-item-action" href="#list-item-4">Item 4</a>
</div>
<div data-bs-spy="scroll" data-bs-target="#list-example" data-bs-offset="0" class="scrollspy-example" tabindex="0">
<h4 id="list-item-1">Item 1</h4>
<p>...</p>
<h4 id="list-item-2">Item 2</h4>
<p>...</p>
<h4 id="list-item-3">Item 3</h4>
<p>...</p>
<h4 id="list-item-4">Item 4</h4>
<p>...</p>
</div>
```
## Usage
### Via data attributes
To easily add scrollspy behavior to your topbar navigation, add `data-bs-spy="scroll"` to the element you want to spy on (most typically this would be the `<body>`). Then add the `data-bs-target` attribute with the ID or class of the parent element of any Bootstrap `.nav` component.
```css
body {
position: relative;
}
```
```html
<body data-bs-spy="scroll" data-bs-target="#navbar-example">
...
<div id="navbar-example">
<ul class="nav nav-tabs" role="tablist">
...
</ul>
</div>
...
</body>
```
### Via JavaScript
After adding `position: relative;` in your CSS, call the scrollspy via JavaScript:
```js
var scrollSpy = new bootstrap.ScrollSpy(document.body, {
target: '#navbar-example'
})
```
{{< callout danger >}}
#### Resolvable ID targets required
Navbar links must have resolvable id targets. For example, a `<a href="#home">home</a>` must correspond to something in the DOM like `<div id="home"></div>`.
{{< /callout >}}
{{< callout info >}}
#### Non-visible target elements ignored
Target elements that are not visible will be ignored and their corresponding nav items will never be highlighted.
{{< /callout >}}
### Methods
#### refresh
When using scrollspy in conjunction with adding or removing of elements from the DOM, you'll need to call the refresh method like so:
```js
var dataSpyList = [].slice.call(document.querySelectorAll('[data-bs-spy="scroll"]'))
dataSpyList.forEach(function (dataSpyEl) {
bootstrap.ScrollSpy.getInstance(dataSpyEl)
.refresh()
})
```
#### dispose
Destroys an element's scrollspy. (Removes stored data on the DOM element)
#### getInstance
*Static* method which allows you to get the scrollspy instance associated with a DOM element
```js
var scrollSpyContentEl = document.getElementById('content')
var scrollSpy = bootstrap.ScrollSpy.getInstance(scrollSpyContentEl) // Returns a Bootstrap scrollspy instance
```
#### getOrCreateInstance
*Static* method which allows you to get the scrollspy instance associated with a DOM element, or create a new one in case it wasn't initialised
```js
var scrollSpyContentEl = document.getElementById('content')
var scrollSpy = bootstrap.ScrollSpy.getOrCreateInstance(scrollSpyContentEl) // Returns a Bootstrap scrollspy instance
```
### Options
Options can be passed via data attributes or JavaScript. For data attributes, append the option name to `data-bs-`, as in `data-bs-offset=""`.
<table class="table">
<thead>
<tr>
<th style="width: 100px;">Name</th>
<th style="width: 100px;">Type</th>
<th style="width: 50px;">Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>offset</code></td>
<td>number</td>
<td><code>10</code></td>
<td>Pixels to offset from top when calculating position of scroll.</td>
</tr>
<tr>
<td><code>method</code></td>
<td>string</td>
<td><code>auto</code></td>
<td>Finds which section the spied element is in. <code>auto</code> will choose the best method to get scroll coordinates. <code>offset</code> will use the <a href="https://developer.mozilla.org/en-US/docs/Web/API/Element/getBoundingClientRect"><code>Element.getBoundingClientRect()</code></a> method to get scroll coordinates. <code>position</code> will use the <a href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/offsetTop"><code>HTMLElement.offsetTop</code></a> and <a href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/offsetLeft"><code>HTMLElement.offsetLeft</code></a> properties to get scroll coordinates.</td>
</tr>
<tr>
<td><code>target</code></td>
<td>string | jQuery object | DOM element</td>
<td></td>
<td>Specifies element to apply Scrollspy plugin.</td>
</tr>
</tbody>
</table>
### Events
<table class="table">
<thead>
<tr>
<th style="width: 150px;">Event type</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>activate.bs.scrollspy</code></td>
<td>This event fires on the scroll element whenever a new item becomes activated by the scrollspy.</td>
</tr>
</tbody>
</table>
```js
var firstScrollSpyEl = document.querySelector('[data-bs-spy="scroll"]')
firstScrollSpyEl.addEventListener('activate.bs.scrollspy', function () {
// do something...
})
```
| PypiClean |
/MAGINE-0.1.5.tar.gz/MAGINE-0.1.5/magine/networks/databases/signor.py | import logging
import os
import networkx as nx
import pandas as pd
from magine.data.storage import network_data_dir
from magine.networks.standards import edge_standards
_p_name = os.path.join(network_data_dir, 'signor.p.gz')
from magine.logging import get_logger
logger = get_logger(__name__, log_level=logging.INFO)
def download_signor():
logger.info("Downloading SIGNOR")
col_names = [
'ENTITYA', 'TYPEA', 'IDA', 'DATABASEA', 'ENTITYB', 'TYPEB', 'IDB',
'DATABASEB', 'EFFECT', 'MECHANISM', 'RESIDUE', 'SEQUENCE', 'TAX_ID',
'CELL_DATA', 'TISSUE_DATA', 'MODULATOR_COMPLEX', 'TARGET_COMPLEX',
'MODIFICATIONA', 'MODASEQ', 'MODIFICATIONB', 'MODBSEQ', 'PMID',
'DIRECT', 'SENTENCE', 'SIGNOR_ID', 'NA1', 'NA2', 'NA3']
table = pd.read_csv('https://signor.uniroma2.it/getData.php?organism=9606',
names=col_names, delimiter='\t', index_col=None,
error_bad_lines=False, encoding='utf-8'
)
# filter out non direct
table = table.loc[table['DIRECT'] == 't']
# Filter out non descriptive
table = table.loc[~table['MECHANISM'].isnull()]
# Drop SIGNOR edges, these are generally complexes
table = table[~(table['DATABASEA'] == 'SIGNOR')]
table = table[~(table['DATABASEB'] == 'SIGNOR')]
# Not sure what they mean, so will remove. Ideally other DBs have this info
table = table[~(table['MECHANISM'] == 'post transcriptional regulation')]
col_a = ['ENTITYA', 'TYPEA', 'IDA', 'DATABASEA']
col_b = ['ENTITYB', 'TYPEB', 'IDB', 'DATABASEB']
cols = ['name', 'species_type', 'id', 'db']
species_a = table[col_a].copy()
species_b = table[col_b].copy()
species_a.rename(columns={i: j for i, j in zip(col_a, cols)}, inplace=True)
species_b.rename(columns={i: j for i, j in zip(col_b, cols)}, inplace=True)
species_a.drop_duplicates(inplace=True)
species_b.drop_duplicates(inplace=True)
all_species = pd.concat([species_a, species_b])
all_species.drop_duplicates(inplace=True)
def map_to_activate_inhibit(row):
effect = ''
mechanism = row['MECHANISM']
if 'down-regulates' in row['EFFECT']:
effect = 'inhibit'
elif 'up-regulates' in row['EFFECT']:
effect = 'activate'
if mechanism in edge_standards:
mechanism = edge_standards[mechanism]
elif mechanism == 'transcriptional regulation':
if effect == 'inhibit':
mechanism = 'repression'
elif effect == 'activate':
mechanism = 'expression'
if effect == '':
return mechanism
else:
return "|".join([effect, mechanism])
# relabel edge types
table['interactionType'] = table.apply(map_to_activate_inhibit, axis=1)
table['databaseSource'] = 'SIGNOR'
table['pmid'] = table['PMID']
table['source'] = table['ENTITYA']
table['target'] = table['ENTITYB']
protein_graph = nx.from_pandas_edgelist(
table,
'source',
'target',
edge_attr=['interactionType', 'databaseSource'],
create_using=nx.DiGraph()
)
# add names to graph
for row in all_species.values:
name, species_type, id_name, db = row
if species_type != 'protein':
species_type = 'compound'
if species_type == 'protein':
species_type = 'gene'
protein_graph.add_node(name, databaseSource='SIGNOR',
speciesType=species_type)
nx.write_gpickle(protein_graph, _p_name)
logger.info("Done downloading SIGNOR")
def load_signor(fresh_download=False):
"""
Load reactome functional interaction network
Parameters
----------
fresh_download: bool
Download fresh network
verbose : bool
Returns
-------
nx.DiGraph
"""
if not os.path.exists(_p_name) or fresh_download:
print("Downloading Signor network!")
download_signor()
if not os.path.exists(_p_name):
raise FileNotFoundError("Error downloading reactome FI. ")
tmp_graph = nx.read_gpickle(_p_name)
logger.info("SIGNOR : {} nodes and {} edges".format(len(tmp_graph.nodes),
len(tmp_graph.edges)))
return tmp_graph
if __name__ == '__main__':
download_signor() | PypiClean |
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/installing.rst | .. _installing:
Installing the library
######################
There are several ways to get the pybind11 source, which lives at
`pybind/pybind11 on GitHub <https://github.com/pybind/pybind11>`_. The pybind11
developers recommend one of the first three ways listed here, submodule, PyPI,
or conda-forge, for obtaining pybind11.
.. _include_as_a_submodule:
Include as a submodule
======================
When you are working on a project in Git, you can use the pybind11 repository
as a submodule. From your git repository, use:
.. code-block:: bash
git submodule add -b stable ../../pybind/pybind11 extern/pybind11
git submodule update --init
This assumes you are placing your dependencies in ``extern/``, and that you are
using GitHub; if you are not using GitHub, use the full https or ssh URL
instead of the relative URL ``../../pybind/pybind11`` above. Some other servers
also require the ``.git`` extension (GitHub does not).
From here, you can now include ``extern/pybind11/include``, or you can use
the various integration tools (see :ref:`compiling`) pybind11 provides directly
from the local folder.
Include with PyPI
=================
You can download the sources and CMake files as a Python package from PyPI
using Pip. Just use:
.. code-block:: bash
pip install pybind11
This will provide pybind11 in a standard Python package format. If you want
pybind11 available directly in your environment root, you can use:
.. code-block:: bash
pip install "pybind11[global]"
This is not recommended if you are installing with your system Python, as it
will add files to ``/usr/local/include/pybind11`` and
``/usr/local/share/cmake/pybind11``, so unless that is what you want, it is
recommended only for use in virtual environments or your ``pyproject.toml``
file (see :ref:`compiling`).
Include with conda-forge
========================
You can use pybind11 with conda packaging via `conda-forge
<https://github.com/conda-forge/pybind11-feedstock>`_:
.. code-block:: bash
conda install -c conda-forge pybind11
Include with vcpkg
==================
You can download and install pybind11 using the Microsoft `vcpkg
<https://github.com/Microsoft/vcpkg/>`_ dependency manager:
.. code-block:: bash
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
vcpkg install pybind11
The pybind11 port in vcpkg is kept up to date by Microsoft team members and
community contributors. If the version is out of date, please `create an issue
or pull request <https://github.com/Microsoft/vcpkg/>`_ on the vcpkg
repository.
Global install with brew
========================
The brew package manager (Homebrew on macOS, or Linuxbrew on Linux) has a
`pybind11 package
<https://github.com/Homebrew/homebrew-core/blob/master/Formula/pybind11.rb>`_.
To install:
.. code-block:: bash
brew install pybind11
.. We should list Conan, and possibly a few other C++ package managers (hunter,
.. perhaps). Conan has a very clean CMake integration that would be good to show.
Other options
=============
Other locations you can find pybind11 are `listed here
<https://repology.org/project/python:pybind11/versions>`_; these are maintained
by various packagers and the community.
| PypiClean |
/BeanDateUtils-0.5.tar.gz/BeanDateUtils-0.5/date_utils/date_utils.py | import datetime
import time
def compute_interval_days(interval_days, input_time=None):
"""
@:param: interval_days(int), input_time(datetime.datetime.now())
计算指定时间在间隔天数后的时间, 如果未传入指定时间则以当前时间为准。
@:return "yyyy-mm-dd"
example:
now-time: 2019-08-06
compute_interval_days(-3) ==> 2019-08-03
compute_interval_days(3) ==> 2019-08-09
compute_interval_days(3, datetime.datetime(2019, 8, 3)) ==> 2019-07-31
"""
if not input_time:
input_time = datetime.date.today()
else:
if isinstance(input_time, datetime.date):
pass
else:
raise Exception("传入时间类型错误, 应该为 datetime.datetime(y, m, d)")
output_time = input_time + datetime.timedelta(days=int(interval_days))
return output_time
def compute_time_stamp():
"""
返回当前时间的毫秒时间戳
"""
now_time = time.time()
now_time_stamp = int(round(now_time * 1000))
return now_time_stamp
def compute_time_delta(interval_time=0):
"""
返回当前时间之间某个时间段的毫秒时间戳
@:param: interval_time: 间隔时间(秒)
"""
now_time = datetime.datetime.now()
past_time = now_time - datetime.timedelta(seconds=interval_time)
t_time = time.mktime(past_time.timetuple()) + past_time.microsecond / 1E6
return int(round(t_time * 1000))
def compute_time(interval=0, time_format="%Y-%m-%d"):
"""
:param
interval: 与当前时间的间隔日期
time_format: 返回结果时间的字符串格式
:return
返回当前时间的年月日格式: yyyy-mm-dd
example: interval = 1表示一天后的时间, -1表示前一天的时间
"""
if not isinstance(interval, int):
raise ValueError("input value error, params must be int type")
now_time = datetime.datetime.now()
if interval >= 0:
real_time = now_time + datetime.timedelta(days=interval)
else:
real_time = now_time - datetime.timedelta(days=-interval)
day_str = real_time.strftime(time_format)
return day_str
def compute_last_week_time(input_time=None, time_format="%Y-%m-%d"):
"""
:param
input_time: 指定计算的时间
time_format: 指定计算时间的字符串格式
:return start_time, end_time
根据指定时间计算返回时间 (返回时间为上上周6到上周6的日期)
返回格式: yyyy-mm-dd, yyyy-mm-dd
"""
if not input_time:
input_time = datetime.datetime.now()
else:
input_time = change_str_to_datetime(input_time, str_format=time_format)
end_time = input_time - datetime.timedelta(days=(input_time.weekday() + 2))
start_time = end_time - datetime.timedelta(days=7)
start_time_str = start_time.strftime(time_format)
end_time_str = end_time.strftime(time_format)
return start_time_str, end_time_str
def compute_week_time(input_time=None, time_format="%Y-%m-%d"):
"""
:param
input_time: 指定计算的时间
time_format: 指定计算时间的字符串格式
:return start_time, end_time
根据指定时间计算指定时间 (返回时间为上周6到本周6的日期)
返回格式: yyyy-mm-dd, yyyy-mm-dd
"""
if not input_time:
input_time = datetime.datetime.now()
else:
input_time = change_str_to_datetime(input_time, str_format=time_format)
interval_time = 5 - input_time.weekday()
end_time = input_time + datetime.timedelta(days=interval_time)
start_time = end_time - datetime.timedelta(days=7)
start_time_str = start_time.strftime(time_format)
end_time_str = end_time.strftime(time_format)
return start_time_str, end_time_str
def compute_next_week_first_day(input_time=None, time_format="%Y-%m-%d"):
"""
:param
input_time: 指定计算的时间
time_format: 指定计算时间的字符串格式
:return day_time
根据指定时间计算指定时间下周的第一天
返回格式: yyyy-mm-dd
"""
if not input_time:
input_time = datetime.datetime.now()
else:
input_time = change_str_to_datetime(input_time, str_format=time_format)
interval_time = 7 - input_time.weekday()
first_day_time = input_time + datetime.timedelta(days=interval_time)
first_day_time_str = first_day_time.strftime(time_format)
return first_day_time_str
def change_str_to_datetime(input_time=None, str_format="%Y-%m-%d"):
"""
:param input_time: 指定需要转换的时间, 默认当前时间 "2019-08-09"
:param str_format: 字符时间的格式, 默认%Y-%m-%d
:return:
"""
spec_time = input_time or change_datetime_to_str(str_format=str_format)
return datetime.datetime.strptime(spec_time, str_format)
def change_datetime_to_str(input_time=None, str_format="%Y-%m-%d"):
"""
:param input_time: 指定需要转换的时间, 默认当前时间
:param str_format: 字符时间的格式, 默认%Y-%m-%d
:return:
"""
spec_time = input_time or datetime.datetime.now()
return spec_time.strftime(str_format)
def compute_interval_day(input_time):
"""
传入指定时间,计算与当前时间与传入时间之间的间隔天数
:param input_time
type: str, format: "%Y-%m-%d"
"""
now_time = datetime.datetime.now()
day_format = "%Y-%m-%d"
end_time_str = now_time.strftime(day_format)
start_time_str = input_time.strftime(day_format)
end_sec = time.mktime(time.strptime(end_time_str, day_format))
start_sec = time.mktime(time.strptime(start_time_str, day_format))
work_days = int((end_sec - start_sec) / (24 * 60 * 60))
return work_days
def compute_timestamp(time_str, time_format):
"""
返回指定时间的时间戳, 返回时间戳(精度秒)
"""
return int(time.mktime(datetime.datetime.strptime(time_str, time_format).timetuple()))
if __name__ == '__main__':
# print(compute_interval_days(-3))
# print(compute_interval_days(-3, datetime.datetime(2019, 8, 3, 15, 0, 0)))
# a.compute_interval_days(-3)
# print(DateComputeUtil.compute_time_stamp())
# print(DateComputeUtil.compute_time_delta(60))
# print(DateComputeUtil.compute_time_delta(0))
# print(DateComputeUtil.compute_time())
# print(compute_last_week_time())
# print(compute_week_time())
# print(DateComputeUtil.compute_interval_day(datetime.datetime(2019,7,1,10,30,30)))
# print()
# print(change_datetime_to_str())
# print(change_str_to_datetime())
print(compute_next_week_first_day()) | PypiClean |
/Helios_Scanner-1.1-py3-none-any.whl/helios/core/login.py | from base64 import b64encode
import re
import sys
import logging
import requests
try:
from urllib import parse
except ImportError:
import urlparse as parse
class LoginAction:
session_obj = None
headers = {}
cookies = {}
logger = None
def __init__(self, logger=logging.INFO):
self.session_obj = requests.session()
self.logger = logging.getLogger("Login")
self.logger.setLevel(logger)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logger)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def basic_auth(self, up_str):
value = b64encode(up_str.encode()).decode()
header = "Basic %s" % value
self.logger.info("Using header Authorization: %s" % header)
self.headers['Authorization'] = header
def login_form(self, url, data, headers={}):
try:
data = dict(parse.parse_qsl(data))
except Exception as e:
self.logger.error("Login Error: login payload should be in urlencoded format: %s" % str(e))
return None
for k in headers:
self.headers[k] = headers[k]
self.session_obj.get(url)
self.logger.debug("Login payload: %s" % str(data))
return self.session_obj.post(url, data=data, headers=self.headers, allow_redirects=False)
def login_form_csrf(self, url, data, headers={}, token_url=None):
try:
data = dict(parse.parse_qsl(data))
except Exception as e:
self.logger.error("Login Error: login payload should be in urlencoded format: %s" % str(e))
return None
if not token_url:
token_url = url
for k in headers:
self.headers[k] = headers[k]
page = self.session_obj.get(token_url)
page_data = {}
for x in re.findall('(<input.+?>)', page.text, re.IGNORECASE):
n = re.search('name="(.+?)"', x, re.IGNORECASE)
v = re.search('value="(.+?)"', x, re.IGNORECASE)
if n and v:
page_data[n.group(1)] = v.group(1)
for custom in data:
page_data[custom] = data[custom]
self.logger.debug("Login payload: %s" % str(page_data))
return self.session_obj.post(url, data=page_data, headers=self.headers, allow_redirects=False)
def pre_parse(self, options):
headers = options.login_header
if not options.login_type:
return None
self.logger.info("Running Login Sequence")
if headers:
try:
for header in headers:
s = header.split(':')
key = s[0].strip()
value = ':'.join(s[1:])
self.headers[key] = value
except Exception as e:
self.logger.warning("Login Error: Error processing headers: %s" % str(e))
if options.login_type == "basic":
creds = options.login_creds
if not creds:
self.logger.error("Login Error: --login-creds is required for Basic Auth")
return None
self.basic_auth(creds)
if options.login_type == "header":
if not headers:
self.logger.error("Login Error: at least one --login-header is required for Header Auth")
return None
token_url = options.token_url
url = options.login_url
data = options.login_data
if not token_url:
token_url = url
try:
if options.login_type == "form":
return self.login_form(url=token_url, data=data)
if options.login_type == "form-csrf":
return self.login_form_csrf(url=url, data=data, token_url=token_url)
except Exception as e:
self.logger.error("Error in Login sequence: %s" % str(e)) | PypiClean |
/Cnc25D-0.1.10.tar.gz/Cnc25D-0.1.10/cnc25d/bagel.py | ################################################################
# header for Python / FreeCAD compatibility
################################################################
import cnc25d_api
cnc25d_api.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
#FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI
################################################################
# import
################################################################
import math
import sys, argparse
#from datetime import datetime
#import os, errno
#import re # to detect .dxf or .svg
#import Tkinter # to display the outline in a small GUI
#
import Part
#from FreeCAD import Base
# 3rd parties
#import svgwrite
#from dxfwrite import DXFEngine
# cnc25d
################################################################
# bagel constraint_constructor
################################################################
def bagel_constraint_constructor(ai_parser, ai_variant=0):
"""
Add arguments relative to the bagel
"""
r_parser = ai_parser
## diameters
r_parser.add_argument('--bagel_axle_diameter','--bgad', action='store', type=float, default=10.0,
help="Set the axle_diameter. Default: 10.0")
r_parser.add_argument('--bagel_axle_internal_diameter','--bgaid', action='store', type=float, default=20.0,
help="Set the axle_internal_diameter. If equal to 0.0, set to 2*bagel_axle_diameter. Default: 0.0")
r_parser.add_argument('--bagel_axle_external_diameter','--bgaed', action='store', type=float, default=0.0,
help="Set the axle_external_diameter. If equal to 0.0, set to 2*bagel_axle_internal_diameter. Default: 0.0")
## axle_holes
if(ai_variant!=1):
r_parser.add_argument('--axle_hole_nb','--ahn', action='store', type=int, default=6,
help="Set the number of the axle-holes. If equal to 0, no axle-hole is created. Default: 6")
r_parser.add_argument('--axle_hole_diameter','--ahd', action='store', type=float, default=4.0,
help="Set the diameter of the axle-holes. Default: 4.0")
r_parser.add_argument('--axle_hole_position_diameter','--ahpd', action='store', type=float, default=0.0,
help="Set the diameter of the axle-hole position circle. If equal to 0.0, set to (axle_internal_diameter+axle_external_diameter)/2. Default: 0.0")
r_parser.add_argument('--axle_hole_angle','--aha', action='store', type=float, default=0.0,
help="Set the position angle of the first axle-hole. Default: 0.0")
## part thickness
r_parser.add_argument('--external_bagel_thickness','--ebt', action='store', type=float, default=2.0,
help="Set the thickness (z-size) of the external_bagel part. Default: 2.0")
if(ai_variant!=1):
r_parser.add_argument('--middle_bagel_thickness','--mbt', action='store', type=float, default=6.0,
help="Set the thickness (z-size) of the middle_bagel part. Default: 6.0")
r_parser.add_argument('--internal_bagel_thickness','--ibt', action='store', type=float, default=2.0,
help="Set the thickness (z-size) of the internal_bagel part. Default: 2.0")
### manufacturing
r_parser.add_argument('--bagel_extra_cut_thickness','--bgect', action='store', type=float, default=0.0,
help="Set the extra-cut-thickness for the internal-bagel cut. It can be used to compensate the manufacturing process or to check the 3D assembly with FreeCAD. Default: 0.0")
### output
# return
return(r_parser)
################################################################
# bagel constraint_check
################################################################
def bagel_constraint_check(c):
""" check the bagel constraint c and set the dynamic default values
"""
### precision
radian_epsilon = math.pi/1000
################################################################
# parameter check and dynamic-default values
################################################################
# bagel_axle_diameter
c['bagel_axle_radius'] = c['bagel_axle_diameter']/2.0
if(c['bagel_axle_radius']<radian_epsilon):
print("ERR152: Error, bagel_axle_radius {:0.3f} is too small".format(c['bagel_axle_radius']))
sys.exit(2)
# bagel_axle_internal_diameter
c['bagel_axle_internal_radius'] = c['bagel_axle_internal_diameter']/2.0
if(c['bagel_axle_internal_radius']==0):
c['bagel_axle_internal_radius'] = 2*c['bagel_axle_radius']
if(c['bagel_axle_internal_radius']<c['bagel_axle_radius']):
print("ERR159: Error, bagel_axle_internal_radius {:0.3f} must be bigger than bagel_axle_radius {:0.3f}".format(c['bagel_axle_internal_radius'], c['bagel_axle_radius']))
sys.exit(2)
# bagel_axle_external_diameter
c['bagel_axle_external_radius'] = c['bagel_axle_external_diameter']/2.0
if(c['bagel_axle_external_radius']==0):
c['bagel_axle_external_radius'] = 2*c['bagel_axle_internal_radius']
if(c['bagel_axle_external_radius']<c['bagel_axle_internal_radius']+radian_epsilon):
print("ERR166: Error, bagel_axle_external_radius {:0.3f} must be bigger than bagel_axle_internal_radius {:0.3f}".format(c['bagel_axle_external_radius'], c['bagel_axle_internal_radius']))
sys.exit(2)
# axle_hole_nb
c['axle_hole_radius'] = 0.0
c['axle_hole_position_radius'] = 0.0
if(c['axle_hole_nb']>0):
# axle_hole_diameter
c['axle_hole_radius'] = c['axle_hole_diameter']/2.0
if(c['axle_hole_radius']<radian_epsilon):
print("ERR173: Error, axle_hole_radius {:0.3f} must be strictly positive".format(c['axle_hole_radius']))
sys.exit(2)
# axle_hole_position_diameter
c['axle_hole_position_radius'] = c['axle_hole_position_diameter']/2.0
if(c['axle_hole_position_radius']==0.0):
c['axle_hole_position_radius'] = (c['bagel_axle_internal_radius']+c['bagel_axle_external_radius'])/2.0
if(c['axle_hole_position_radius'] < c['bagel_axle_internal_radius']+c['axle_hole_radius']+radian_epsilon):
print("ERR180: Error: axle_hole_position_radius {:0.3f} is too small compare to bagel_axle_internal_radius {:0.3f} and axle_hole_radius {:0.3f}".format(c['axle_hole_position_radius'], c['bagel_axle_internal_radius'], c['axle_hole_radius']))
sys.exit(2)
if(c['axle_hole_position_radius'] > c['bagel_axle_external_radius']-c['axle_hole_radius']-radian_epsilon):
print("ERR183: Error: axle_hole_position_radius {:0.3f} is too big compare to bagel_axle_external_radius {:0.3f} and axle_hole_radius {:0.3f}".format(c['axle_hole_position_radius'], c['bagel_axle_external_radius'], c['axle_hole_radius']))
sys.exit(2)
# axle_hole_angle
# external_bagel_thickness
if(c['external_bagel_thickness']<radian_epsilon):
print("ERR188: Error, external_bagel_thickness {:0.3f} is too small".format(c['external_bagel_thickness']))
sys.exit(2)
# middle_bagel_thickness
if(c['middle_bagel_thickness']<radian_epsilon):
print("ERR192: Error, middle_bagel_thickness {:0.3f} is too small".format(c['middle_bagel_thickness']))
sys.exit(2)
# internal_bagel_thickness
if(c['internal_bagel_thickness']<radian_epsilon):
print("ERR196: Error, internal_bagel_thickness {:0.3f} is too small".format(c['internal_bagel_thickness']))
sys.exit(2)
# bagel_extra_cut_thickness
if(abs(c['bagel_extra_cut_thickness'])>c['bagel_axle_radius']/2.0):
print("ERR212: Error, bagel_extra_cut_thickness {:0.3f} is too big compare to bagel_axle_radius {:0.3f}".format(c['bagel_extra_cut_thickness'], c['bagel_axle_radius']))
sys.exit(2)
return(c)
################################################################
# bagel 2D-figures construction
################################################################
def bagel_2d_construction(c):
""" construct the 2D-figures with outlines at the A-format for the bagel design
"""
### external_bagel
external_bagel = []
external_bagel.append((0.0, 0.0, c['bagel_axle_external_radius']))
external_bagel.append((0.0, 0.0, c['bagel_axle_radius']))
for i in range(c['axle_hole_nb']):
a = i*2*math.pi/c['axle_hole_nb']+c['axle_hole_angle']
external_bagel.append((0.0+c['axle_hole_position_radius']*math.cos(a), 0.0+c['axle_hole_position_radius']*math.sin(a), c['axle_hole_radius']))
### middle_bagel
middle_bagel = []
middle_bagel.append((0.0, 0.0, c['bagel_axle_internal_radius']))
middle_bagel.append((0.0, 0.0, c['bagel_axle_radius']))
### internal_bagel
# intermediate parameters
cut_y = c['bagel_extra_cut_thickness']
cut_x1 = math.sqrt(c['bagel_axle_radius']**2+cut_y**2)
cut_x2 = math.sqrt(c['bagel_axle_external_radius']**2+cut_y**2)
# outline construction
ib_ol_A = []
ib_ol_A.append((cut_x2, cut_y, 0))
ib_ol_A.append((0.0, c['bagel_axle_external_radius'], -1*cut_x2, cut_y, 0))
ib_ol_A.append((-1*cut_x1, cut_y, 0))
ib_ol_A.append((0.0, c['bagel_axle_radius'], cut_x1, cut_y, 0))
ib_ol_A.append((cut_x2, cut_y, 0))
#ib_ol = cnc25d_api.cnc_cut_outline(ib_ol_A, "internal_bagel_ol")
# figure construction
ib_figure = []
ib_figure.append(ib_ol_A)
ib_figure_2 = []
ib_figure_2.append(ib_ol_A)
if(c['axle_hole_nb']>0):
a_step = math.pi/c['axle_hole_nb']
for i in range(c['axle_hole_nb']/2):
a = (2*i+1)*a_step
ib_figure.append((0.0+c['axle_hole_position_radius']*math.cos(a), 0.0+c['axle_hole_position_radius']*math.sin(a), c['axle_hole_radius']))
ib_figure = cnc25d_api.rotate_and_translate_figure(ib_figure, 0.0, 0.0, c['axle_hole_angle']-a_step, 0.0, 0.0)
for i in range(c['axle_hole_nb']/2):
a = (2*i+1+(c['axle_hole_nb']%2))*a_step
ib_figure_2.append((0.0+c['axle_hole_position_radius']*math.cos(a), 0.0+c['axle_hole_position_radius']*math.sin(a), c['axle_hole_radius']))
ib_figure_2 = cnc25d_api.rotate_and_translate_figure(ib_figure_2, 0.0, 0.0, c['axle_hole_angle']-a_step, 0.0, 0.0)
internal_bagel = ib_figure
internal_bagel_2 = cnc25d_api.rotate_and_translate_figure(ib_figure_2, 0.0, 0.0, math.pi, 0.0, 0.0)
### figures output
# part_list
part_list = []
part_list.append(external_bagel)
part_list.append(middle_bagel)
part_list.append(internal_bagel)
part_list.append(internal_bagel_2)
# part_list_figure
x_space = 2.2*c['bagel_axle_external_radius']
part_list_figure = []
for i in range(len(part_list)):
part_list_figure.extend(cnc25d_api.rotate_and_translate_figure(part_list[i], 0.0, 0.0, 0.0, i*x_space, 0.0))
## bagel_part_overview
bagel_assembly_figure = []
bagel_assembly_figure.extend(cnc25d_api.rotate_and_translate_figure(external_bagel, 0.0, 0.0, 0.0, 0, 0))
bagel_assembly_figure.extend(cnc25d_api.rotate_and_translate_figure(middle_bagel, 0.0, 0.0, 0.0, 0, 0))
bagel_assembly_figure.extend(cnc25d_api.rotate_and_translate_figure(internal_bagel, 0.0, 0.0, 0.0, 0, 0))
bagel_assembly_figure.extend(cnc25d_api.rotate_and_translate_figure(internal_bagel_2, 0.0, 0.0, 0.0, 0, 0))
###
r_figures = {}
r_height = {}
r_figures['external_bagel'] = external_bagel
r_height['external_bagel'] = c['external_bagel_thickness']
r_figures['middle_bagel'] = middle_bagel
r_height['middle_bagel'] = c['middle_bagel_thickness']
r_figures['internal_bagel'] = internal_bagel
r_height['internal_bagel'] = c['internal_bagel_thickness']
r_figures['internal_bagel_2'] = internal_bagel_2
r_height['internal_bagel_2'] = c['internal_bagel_thickness']
r_figures['part_list'] = part_list_figure
r_height['part_list'] = 1.0
r_figures['bagel_assembly'] = bagel_assembly_figure
r_height['bagel_assembly'] = 1.0
###
return((r_figures, r_height))
################################################################
# bagel 3D assembly-configuration construction
################################################################
def bagel_3d_construction(c):
""" construct the 3D-assembly-configurations of the bagel design
"""
### freecad-object assembly configuration
# intermediate parameters
aer = c['bagel_axle_external_radius']
air = c['bagel_axle_internal_radius']
ebt = c['external_bagel_thickness']
mbt = c['middle_bagel_thickness']
ibt = c['internal_bagel_thickness']
# conf1
bagel_assembly_conf1 = []
bagel_assembly_conf1.append(('external_bagel', -1*aer, -1*aer, 2*aer, 2*aer, ebt, 'i', 'xz', -1*aer, 0, -1*aer))
bagel_assembly_conf1.append(('middle_bagel', -1*air, -1*air, 2*air, 2*air, mbt, 'i', 'xz', -1*air, ebt, -1*air))
bagel_assembly_conf1.append(('internal_bagel', -1*aer, -1*aer, 2*aer, 2*aer, ibt, 'i', 'xz', -1*aer, ebt+mbt, -1*aer))
bagel_assembly_conf1.append(('internal_bagel_2', -1*aer, -1*aer, 2*aer, 2*aer, ibt, 'i', 'xz', -1*aer, ebt+mbt, -1*aer))
###
r_assembly = {}
r_slice = {}
r_assembly['bagel_assembly_conf1'] = bagel_assembly_conf1
r_slice['bagel_assembly_conf1'] = ()
#
return((r_assembly, r_slice))
################################################################
# bagel_info
################################################################
def bagel_info(c):
""" create the text info related to the bagel design
"""
# b_parameter_info
b_parameter_info = """
bagel diameters:
bagel_axle_radius: {:0.3f} diameter: {:0.3f}
bagel_axle_internal_radius: {:0.3f} diameter: {:0.3f}
bagel_axle_external_radius: {:0.3f} diameter: {:0.3f}
""".format(c['bagel_axle_radius'], 2*c['bagel_axle_radius'], c['bagel_axle_internal_radius'], 2*c['bagel_axle_internal_radius'], c['bagel_axle_external_radius'], 2*c['bagel_axle_external_radius'])
b_parameter_info += """
axle_fastening_holes:
axle_hole_nb: {:d}
axle_hole_radius: {:0.3f} diameter: {:0.3f}
axle_hole_position_radius: {:0.3f} diameter: {:0.3f}
axle_hole_angle: {:0.3f} (radian) {:0.3f} (degree)
""".format(c['axle_hole_nb'], c['axle_hole_radius'], 2*c['axle_hole_radius'], c['axle_hole_position_radius'], 2*c['axle_hole_position_radius'], c['axle_hole_angle'], c['axle_hole_angle']*180/math.pi)
b_parameter_info += """
bagel tickness:
external_bagel_thickness: {:0.3f}
middle_bagel_thickness: {:0.3f}
internal_bagel_thickness: {:0.3f}
""".format(c['external_bagel_thickness'], c['middle_bagel_thickness'], c['internal_bagel_thickness'])
b_parameter_info += """
manufacturing:
bagel_extra_cut_thickness: {:0.3f}
""".format(c['bagel_extra_cut_thickness'])
#print(b_parameter_info)
return(b_parameter_info)
################################################################
# self test
################################################################
def bagel_self_test():
"""
This is the non-regression test of bagel.
Look at the Tk window to check errors.
"""
r_tests = [
["simplest test" , ""],
["no axle_holes" , "--axle_hole_nb 0"],
["odd number of axle_holes" , "--axle_hole_nb 5"],
["extra cut" , "--bagel_extra_cut_thickness 1.0"],
["extra cut negative" , "--bagel_extra_cut_thickness -2.0"],
["outputfile" , "--output_file_basename test_output/bagel_self_test.dxf"],
["last test" , "--bagel_axle_internal_diameter 25.0"]]
return(r_tests)
################################################################
# bagel design declaration
################################################################
class bagel(cnc25d_api.bare_design):
""" bagel design
"""
def __init__(self, constraint={}):
""" configure the bagel design
"""
figs = []
self.design_setup(
s_design_name = "bagel_design",
f_constraint_constructor = bagel_constraint_constructor,
f_constraint_check = bagel_constraint_check,
f_2d_constructor = bagel_2d_construction,
d_2d_simulation = {},
f_3d_constructor = bagel_3d_construction,
f_info = bagel_info,
l_display_figure_list = ['bagel_assembly'],
s_default_simulation = "",
#l_2d_figure_file_list = [],
l_2d_figure_file_list = figs,
l_3d_figure_file_list = figs,
l_3d_conf_file_list = ['bagel_assembly_conf1'],
f_cli_return_type = None,
l_self_test_list = bagel_self_test())
self.apply_constraint(constraint)
################################################################
# main
################################################################
# this works with python and freecad :)
if __name__ == "__main__":
FreeCAD.Console.PrintMessage("bagel.py says hello!\n")
my_bagel = bagel()
my_bagel.cli()
if(cnc25d_api.interpretor_is_freecad()):
my_bagel.apply_cli("--bagel_extra_cut_thickness 1.0")
#my_bagel.outline_display()
Part.show(my_bagel.get_fc_obj_3dconf('bagel_assembly_conf1')) | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/codemirror/js/mode/coffeescript/coffeescript.js | (function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("coffeescript", function(conf) {
var ERRORCLASS = "error";
function wordRegexp(words) {
return new RegExp("^((" + words.join(")|(") + "))\\b");
}
var operators = /^(?:->|=>|\+[+=]?|-[\-=]?|\*[\*=]?|\/[\/=]?|[=!]=|<[><]?=?|>>?=?|%=?|&=?|\|=?|\^=?|\~|!|\?)/;
var delimiters = /^(?:[()\[\]{},:`=;]|\.\.?\.?)/;
var identifiers = /^[_A-Za-z$][_A-Za-z$0-9]*/;
var properties = /^(@|this\.)[_A-Za-z$][_A-Za-z$0-9]*/;
var wordOperators = wordRegexp(["and", "or", "not",
"is", "isnt", "in",
"instanceof", "typeof"]);
var indentKeywords = ["for", "while", "loop", "if", "unless", "else",
"switch", "try", "catch", "finally", "class"];
var commonKeywords = ["break", "by", "continue", "debugger", "delete",
"do", "in", "of", "new", "return", "then",
"this", "throw", "when", "until"];
var keywords = wordRegexp(indentKeywords.concat(commonKeywords));
indentKeywords = wordRegexp(indentKeywords);
var stringPrefixes = /^('{3}|\"{3}|['\"])/;
var regexPrefixes = /^(\/{3}|\/)/;
var commonConstants = ["Infinity", "NaN", "undefined", "null", "true", "false", "on", "off", "yes", "no"];
var constants = wordRegexp(commonConstants);
// Tokenizers
function tokenBase(stream, state) {
// Handle scope changes
if (stream.sol()) {
if (state.scope.align === null) state.scope.align = false;
var scopeOffset = state.scope.offset;
if (stream.eatSpace()) {
var lineOffset = stream.indentation();
if (lineOffset > scopeOffset && state.scope.type == "coffee") {
return "indent";
} else if (lineOffset < scopeOffset) {
return "dedent";
}
return null;
} else {
if (scopeOffset > 0) {
dedent(stream, state);
}
}
}
if (stream.eatSpace()) {
return null;
}
var ch = stream.peek();
// Handle docco title comment (single line)
if (stream.match("####")) {
stream.skipToEnd();
return "comment";
}
// Handle multi line comments
if (stream.match("###")) {
state.tokenize = longComment;
return state.tokenize(stream, state);
}
// Single line comment
if (ch === "#") {
stream.skipToEnd();
return "comment";
}
// Handle number literals
if (stream.match(/^-?[0-9\.]/, false)) {
var floatLiteral = false;
// Floats
if (stream.match(/^-?\d*\.\d+(e[\+\-]?\d+)?/i)) {
floatLiteral = true;
}
if (stream.match(/^-?\d+\.\d*/)) {
floatLiteral = true;
}
if (stream.match(/^-?\.\d+/)) {
floatLiteral = true;
}
if (floatLiteral) {
// prevent from getting extra . on 1..
if (stream.peek() == "."){
stream.backUp(1);
}
return "number";
}
// Integers
var intLiteral = false;
// Hex
if (stream.match(/^-?0x[0-9a-f]+/i)) {
intLiteral = true;
}
// Decimal
if (stream.match(/^-?[1-9]\d*(e[\+\-]?\d+)?/)) {
intLiteral = true;
}
// Zero by itself with no other piece of number.
if (stream.match(/^-?0(?![\dx])/i)) {
intLiteral = true;
}
if (intLiteral) {
return "number";
}
}
// Handle strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenFactory(stream.current(), false, "string");
return state.tokenize(stream, state);
}
// Handle regex literals
if (stream.match(regexPrefixes)) {
if (stream.current() != "/" || stream.match(/^.*\//, false)) { // prevent highlight of division
state.tokenize = tokenFactory(stream.current(), true, "string-2");
return state.tokenize(stream, state);
} else {
stream.backUp(1);
}
}
// Handle operators and delimiters
if (stream.match(operators) || stream.match(wordOperators)) {
return "operator";
}
if (stream.match(delimiters)) {
return "punctuation";
}
if (stream.match(constants)) {
return "atom";
}
if (stream.match(keywords)) {
return "keyword";
}
if (stream.match(identifiers)) {
return "variable";
}
if (stream.match(properties)) {
return "property";
}
// Handle non-detected items
stream.next();
return ERRORCLASS;
}
function tokenFactory(delimiter, singleline, outclass) {
return function(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^'"\/\\]/);
if (stream.eat("\\")) {
stream.next();
if (singleline && stream.eol()) {
return outclass;
}
} else if (stream.match(delimiter)) {
state.tokenize = tokenBase;
return outclass;
} else {
stream.eat(/['"\/]/);
}
}
if (singleline) {
if (conf.mode.singleLineStringErrors) {
outclass = ERRORCLASS;
} else {
state.tokenize = tokenBase;
}
}
return outclass;
};
}
function longComment(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^#]/);
if (stream.match("###")) {
state.tokenize = tokenBase;
break;
}
stream.eatWhile("#");
}
return "comment";
}
function indent(stream, state, type) {
type = type || "coffee";
var offset = 0, align = false, alignOffset = null;
for (var scope = state.scope; scope; scope = scope.prev) {
if (scope.type === "coffee") {
offset = scope.offset + conf.indentUnit;
break;
}
}
if (type !== "coffee") {
align = null;
alignOffset = stream.column() + stream.current().length;
} else if (state.scope.align) {
state.scope.align = false;
}
state.scope = {
offset: offset,
type: type,
prev: state.scope,
align: align,
alignOffset: alignOffset
};
}
function dedent(stream, state) {
if (!state.scope.prev) return;
if (state.scope.type === "coffee") {
var _indent = stream.indentation();
var matched = false;
for (var scope = state.scope; scope; scope = scope.prev) {
if (_indent === scope.offset) {
matched = true;
break;
}
}
if (!matched) {
return true;
}
while (state.scope.prev && state.scope.offset !== _indent) {
state.scope = state.scope.prev;
}
return false;
} else {
state.scope = state.scope.prev;
return false;
}
}
function tokenLexer(stream, state) {
var style = state.tokenize(stream, state);
var current = stream.current();
// Handle "." connected identifiers
if (current === ".") {
style = state.tokenize(stream, state);
current = stream.current();
if (/^\.[\w$]+$/.test(current)) {
return "variable";
} else {
return ERRORCLASS;
}
}
// Handle scope changes.
if (current === "return") {
state.dedent += 1;
}
if (((current === "->" || current === "=>") &&
!state.lambda &&
!stream.peek())
|| style === "indent") {
indent(stream, state);
}
var delimiter_index = "[({".indexOf(current);
if (delimiter_index !== -1) {
indent(stream, state, "])}".slice(delimiter_index, delimiter_index+1));
}
if (indentKeywords.exec(current)){
indent(stream, state);
}
if (current == "then"){
dedent(stream, state);
}
if (style === "dedent") {
if (dedent(stream, state)) {
return ERRORCLASS;
}
}
delimiter_index = "])}".indexOf(current);
if (delimiter_index !== -1) {
while (state.scope.type == "coffee" && state.scope.prev)
state.scope = state.scope.prev;
if (state.scope.type == current)
state.scope = state.scope.prev;
}
if (state.dedent > 0 && stream.eol() && state.scope.type == "coffee") {
if (state.scope.prev) state.scope = state.scope.prev;
state.dedent -= 1;
}
return style;
}
var external = {
startState: function(basecolumn) {
return {
tokenize: tokenBase,
scope: {offset:basecolumn || 0, type:"coffee", prev: null, align: false},
lastToken: null,
lambda: false,
dedent: 0
};
},
token: function(stream, state) {
var fillAlign = state.scope.align === null && state.scope;
if (fillAlign && stream.sol()) fillAlign.align = false;
var style = tokenLexer(stream, state);
if (fillAlign && style && style != "comment") fillAlign.align = true;
state.lastToken = {style:style, content: stream.current()};
if (stream.eol() && stream.lambda) {
state.lambda = false;
}
return style;
},
indent: function(state, text) {
if (state.tokenize != tokenBase) return 0;
var scope = state.scope;
var closer = text && "])}".indexOf(text.charAt(0)) > -1;
if (closer) while (scope.type == "coffee" && scope.prev) scope = scope.prev;
var closes = closer && scope.type === text.charAt(0);
if (scope.align)
return scope.alignOffset - (closes ? 1 : 0);
else
return (closes ? scope.prev : scope).offset;
},
lineComment: "#",
fold: "indent"
};
return external;
});
CodeMirror.defineMIME("text/x-coffeescript", "coffeescript");
}); | PypiClean |
/CCC-2.0.1.tar.gz/CCC-2.0.1/ccc/template_design/views.py | from __future__ import unicode_literals
from datetime import datetime
import pendulum
from django.conf import settings
from django.http import JsonResponse, QueryDict
from django.http.response import HttpResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView
from django.views.generic.edit import DeleteView, FormView
from django.views.generic.list import ListView
from django.utils.decorators import method_decorator
from ccc.campaigns.models import Campaign
from ccc.campaigns.utils.shortcut import save_or_update_premium_template
from ccc.common.mixins import LoginRequiredMixin
from ccc.packages.decorators import check_user_subscription
from ccc.template_design.forms import (EmailTemplateDesignForm,
WebTemplateDesignForm)
from ccc.template_design.gcloud_tasks import (email_and_web_analytics,
save_contact,
save_email_template)
from ccc.template_design.models import (CampaignTemplateDesign,
LandingPageFormData, TemplateDesign)
from slugify import slugify
class TemplateDesignDeleteView(LoginRequiredMixin, DeleteView):
template_name = 'crm/template_design/email_template_form.html'
model = TemplateDesign
success_url = reverse_lazy('srm:template-design:design_landing_page')
@method_decorator(check_user_subscription)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class MyPageTemplateView(TemplateView):
template_name = 'crm/template_design/my_page.html'
model = TemplateDesign
@method_decorator(csrf_exempt)
@method_decorator(check_user_subscription)
def dispatch(self, request, *args, **kwargs):
return super(MyPageTemplateView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
campaign_template = CampaignTemplateDesign.objects.get(template__pk=kwargs.get('pk'))
LandingPageFormData.objects.create(template=campaign_template.template, data=request.POST.dict())
save_contact(user_id=request.user.id, campaign_id=campaign_template.campaign_id, data=request.POST.dict())
return self.get(request, *args, **kwargs)
def get_analytics_data(self, obj):
qdict = QueryDict('', mutable=True)
qdict.update({'type': 'web', 'template_id': obj.id, 'template_name': obj.name})
return qdict.urlencode()
def get_context_data(self, **kwargs):
context = super(MyPageTemplateView, self).get_context_data(**kwargs)
obj = self.model.objects.get(pk=kwargs['pk'])
context['analytic_data'] = self.get_analytics_data(obj)
context['object_data'] = obj
if self.request.method == 'POST':
confirmation_message = "Thanks your registration recieved"
context['confirmation_message'] = confirmation_message
return context
class TemplateDesignFormView(LoginRequiredMixin, FormView):
"""Template Design View """
template_name = 'crm/template_design/email_template_form.html'
form_class = EmailTemplateDesignForm
model = TemplateDesign
success_url = reverse_lazy('srm:template-design:design_landing_page')
@method_decorator(check_user_subscription)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_instance(self, pk):
"""Get instance"""
return self.model.objects.get(pk=pk)
def get_context_data(self, **kwargs):
context = super(TemplateDesignFormView, self).get_context_data(**kwargs)
context['client_id'] = settings.BEE_CLIENT_ID
context['client_secret'] = settings.BEE_SECRET_ID
return context
def get_form_kwargs(self):
kwargs = super(TemplateDesignFormView, self).get_form_kwargs()
pk = self.kwargs.get('pk')
if pk:
if not (self.request.method == 'POST' and self.get_instance(pk).is_public):
kwargs.update({'instance': self.get_instance(pk)})
return kwargs
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = super(TemplateDesignFormView, self).get_initial()
initial['user'] = self.request.user
pk = self.kwargs.get('pk')
if pk and self.get_instance(pk).is_public and not (self.request.method == 'POST'):
initial['name'] = ''
return initial
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
form.save(commit=False)
form.instance.update_thumbnail()
form.instance.name_slug = slugify(form.instance.name)
form.save()
# Async task
save_email_template(**{'id': form.instance.id}).execute()
next_url = self.request.GET.get('next')
if next_url:
self.success_url = next_url
return JsonResponse({'next_url': self.get_success_url()})
class WebTemplateDesignFormView(TemplateDesignFormView):
template_name = 'crm/template_design/web_template_form.html'
form_class = WebTemplateDesignForm
@method_decorator(check_user_subscription)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WebTemplateDesignFormView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
pk = self.kwargs.get('pk')
initial = super(WebTemplateDesignFormView, self).get_initial()
template_type = 'web'
initial['template_type'] = 'web'
if pk and self.request.method == 'GET':
campaign_tem_design = self.get_instance(
pk).campaigntemplatedesign_set.filter(template_type=template_type).first()
if campaign_tem_design:
initial['campaign'] = campaign_tem_design.campaign_id
return initial
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
form.save()
campaign = Campaign.objects.get(pk=form.data['campaign'])
save_or_update_premium_template(campaign, form.instance, 'web')
# form.instance.update_thumbnail()
# Async task
save_email_template(**{'id': form.instance.id}).execute()
next_url = self.request.GET.get('next')
if next_url:
self.success_url = next_url
return JsonResponse({'next_url': self.get_success_url()})
class TemplateDesignListView(LoginRequiredMixin, ListView):
"""Template Design View """
# template_name = 'ccc/template_design/base.html'
template_name = 'crm/landing-page.html'
model = TemplateDesign
queryset = TemplateDesign.objects.all()
@method_decorator(check_user_subscription)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(TemplateDesignListView, self).get_queryset()
return queryset.filter(user=self.request.user, is_active=True, is_public=False, template_type="web")
def get_context_data(self, **kwargs):
context = super(TemplateDesignListView, self).get_context_data(**kwargs)
context['public_web_template'] = TemplateDesign.objects.filter(
is_active=True, is_public=True, template_type="web")
return context
class EmailAndWebAnalyticsView(ListView):
@method_decorator(check_user_subscription)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
data = request.GET.dict()
data.update({'created_date': datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC')).isoformat(),
'ip_address': self.request.META.get('HTTP_X_REAL_IP')})
# async task
email_and_web_analytics(**data).execute()
return HttpResponse("Done") | PypiClean |
/Auptimizer-2.0.tar.gz/Auptimizer-2.0/src/aup/Proposer/hpbandster/examples/example_5_mnist.py | import os
import pickle
import argparse
import hpbandster.core.nameserver as hpns
import hpbandster.core.result as hpres
from hpbandster.optimizers import BOHB
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Example 5 - CNN on MNIST')
parser.add_argument('--min_budget', type=float, help='Minimum number of epochs for training.', default=1)
parser.add_argument('--max_budget', type=float, help='Maximum number of epochs for training.', default=9)
parser.add_argument('--n_iterations', type=int, help='Number of iterations performed by the optimizer', default=16)
parser.add_argument('--worker', help='Flag to turn this into a worker process', action='store_true')
parser.add_argument('--run_id', type=str, help='A unique run id for this optimization run. An easy option is to use the job id of the clusters scheduler.')
parser.add_argument('--nic_name',type=str, help='Which network interface to use for communication.', default='lo')
parser.add_argument('--shared_directory',type=str, help='A directory that is accessible for all processes, e.g. a NFS share.', default='.')
parser.add_argument('--backend',help='Toggles which worker is used. Choose between a pytorch and a keras implementation.', choices=['pytorch', 'keras'], default='keras')
args=parser.parse_args()
if args.backend == 'pytorch':
from example_5_pytorch_worker import PyTorchWorker as worker
else:
from example_5_keras_worker import KerasWorker as worker
# Every process has to lookup the hostname
host = hpns.nic_name_to_host(args.nic_name)
if args.worker:
import time
time.sleep(5) # short artificial delay to make sure the nameserver is already running
w = worker(run_id=args.run_id, host=host, timeout=120)
w.load_nameserver_credentials(working_directory=args.shared_directory)
w.run(background=False)
exit(0)
# This example shows how to log live results. This is most useful
# for really long runs, where intermediate results could already be
# interesting. The core.result submodule contains the functionality to
# read the two generated files (results.json and configs.json) and
# create a Result object.
result_logger = hpres.json_result_logger(directory=args.shared_directory, overwrite=False)
# Start a nameserver:
NS = hpns.NameServer(run_id=args.run_id, host=host, port=0, working_directory=args.shared_directory)
ns_host, ns_port = NS.start()
# Start local worker
w = worker(run_id=args.run_id, host=host, nameserver=ns_host, nameserver_port=ns_port, timeout=120)
w.run(background=True)
# Run an optimizer
bohb = BOHB( configspace = worker.get_configspace(),
run_id = args.run_id,
host=host,
nameserver=ns_host,
nameserver_port=ns_port,
result_logger=result_logger,
min_budget=args.min_budget, max_budget=args.max_budget,
)
res = bohb.run(n_iterations=args.n_iterations)
# store results
with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh:
pickle.dump(res, fh)
# shutdown
bohb.shutdown(shutdown_workers=True)
NS.shutdown() | PypiClean |
/Andy_mess_server-0.0.1-py3-none-any.whl/server/server/config_window.py | from PyQt5.QtWidgets import QDialog, QLabel, QLineEdit, QPushButton, QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
import os
class ConfigWindow(QDialog):
"""Класс окно настроек."""
def __init__(self, config):
super().__init__()
self.config = config
self.initUI()
def initUI(self):
"""Настройки окна"""
self.setFixedSize(365, 260)
self.setWindowTitle('Настройки сервера')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
# Надпись о файле базы данных:
self.db_path_label = QLabel('Путь до файла базы данных: ', self)
self.db_path_label.move(10, 10)
self.db_path_label.setFixedSize(240, 15)
# Строка с путём базы
self.db_path = QLineEdit(self)
self.db_path.setFixedSize(250, 20)
self.db_path.move(10, 30)
self.db_path.setReadOnly(True)
# Кнопка выбора пути.
self.db_path_select = QPushButton('Обзор...', self)
self.db_path_select.move(275, 28)
# Метка с именем поля файла базы данных
self.db_file_label = QLabel('Имя файла базы данных: ', self)
self.db_file_label.move(10, 68)
self.db_file_label.setFixedSize(180, 15)
# Поле для ввода имени файла
self.db_file = QLineEdit(self)
self.db_file.move(200, 66)
self.db_file.setFixedSize(150, 20)
# Метка с номером порта
self.port_label = QLabel('Номер порта для соединений:', self)
self.port_label.move(10, 108)
self.port_label.setFixedSize(180, 15)
# Поле для ввода номера порта
self.port = QLineEdit(self)
self.port.move(200, 108)
self.port.setFixedSize(150, 20)
# Метка с адресом для соединений
self.ip_label = QLabel('С какого IP принимаем соединения:', self)
self.ip_label.move(10, 148)
self.ip_label.setFixedSize(180, 15)
# Метка с напоминанием о пустом поле.
self.ip_label_note = QLabel(
' оставьте это поле пустым, чтобы\n принимать соединения с любых адресов.',
self)
self.ip_label_note.move(10, 168)
self.ip_label_note.setFixedSize(500, 30)
# Поле для ввода ip
self.ip = QLineEdit(self)
self.ip.move(200, 148)
self.ip.setFixedSize(150, 20)
# Кнопка сохранения настроек
self.save_btn = QPushButton('Сохранить', self)
self.save_btn.move(190, 220)
# Кнапка закрытия окна
self.close_button = QPushButton('Закрыть', self)
self.close_button.move(275, 220)
self.close_button.clicked.connect(self.close)
self.db_path_select.clicked.connect(self.open_file_dialog)
self.show()
self.db_path.insert(self.config['SETTINGS']['Database_path'])
self.db_file.insert(self.config['SETTINGS']['Database_file'])
self.port.insert(self.config['SETTINGS']['Default_port'])
self.ip.insert(self.config['SETTINGS']['Listen_Address'])
self.save_btn.clicked.connect(self.save_server_config)
def open_file_dialog(self):
"""Метод обработчик открытия окна выбора папки."""
global dialog
dialog = QFileDialog(self)
path = dialog.getExistingDirectory()
path = path.replace('/', '\\')
self.db_path.clear()
self.db_path.insert(path)
def save_server_config(self):
"""
Метод сохранения настроек.
Проверяет правильность введённых данных и
если всё правильно сохраняет ini файл.
"""
global config_window
message = QMessageBox()
self.config['SETTINGS']['Database_path'] = self.db_path.text()
self.config['SETTINGS']['Database_file'] = self.db_file.text()
try:
port = int(self.port.text())
except ValueError:
message.warning(self, 'Ошибка', 'Порт должен быть числом')
else:
self.config['SETTINGS']['Listen_Address'] = self.ip.text()
if 1023 < port < 65536:
self.config['SETTINGS']['Default_port'] = str(port)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, '..')
with open(f"{dir_path}/{'server_dist+++.ini'}", 'w') as conf:
self.config.write(conf)
message.information(
self, 'OK', 'Настройки успешно сохранены!')
else:
message.warning(
self, 'Ошибка', 'Порт должен быть от 1024 до 65536') | PypiClean |
/LMSQuery-0.4.1.tar.gz/LMSQuery-0.4.1/README.md | # LMSQuery
[](https://pypi.python.org/pypi/LMSQuery)
[](https://pypi.python.org/pypi/LMSQuery)
[](https://landscape.io/github/roberteinhaus/lmsquery/master)
**Query library for Logitech Media Server**
This library provides easy to use functions to send queries to a Logitech Media Server (https://github.com/Logitech/slimserver)
### Installation
pip install lmsquery
### Usage Example
import lmsquery
lms = lmsquery.LMSQuery('127.0.0.1', '9000') # use ip and port of lms server
players = lms.get_players()
for player in players:
print player['name']
| PypiClean |
/AlgoSolver-0.1.4.tar.gz/AlgoSolver-0.1.4/docs/tutorial.md | # Tutorial
Here is an example of someone using the [AlgoSolver](https://github.com/nickbohm555/AlgoSolver/) library for a competetive programming competition or coding inteerview.
## Prerequisites
Before starting, make sure to run the following:
```
pip install AlgoSolver
```
Also at the top of the file, make sure to import the library.
```
from algo_solver import sorting as sort
from algo_solver import searching as search
from algo_solver import graphs as graph
```
## Example problem Number 1
It is quite common to perform DFS in technical interviews or CP competitions. Here is an example. Find the shortest path between point every point on the graph to point 'F'. Let's say the input is given below:
```
input_graph = {
'A': set(['B', 'C']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E']),
}
find_shortest(input_graph, 'F'):
return the distance of the shortest path from A to F.
```
## Solution
With AlgoSolver, save time on implementation.
```python
from algo_solver import graphs as graph
find_shortest(input_graph, 'F'):
# get a list of every key
for key in input_graph:
starting = key
distance = graph.find_distance_unweighted_graph(input_graph, starting , 'F')
print(distance)
```
## Example problem Number 2
It is quite common to perform binary search in technical interviews or CP competitions. Here is an example. Find whether a number 22 exists in a sorted array. We could do this in linear time which would take O(n) time but using bianry search we would save us time to O(logn).
```
input_array = [4, 6, 7, 10, 12, 15, 22, 99, 111, 256, 777]
does_exist(input_array, 22):
return whether or not 22 exists in the array.
```
## Solution
With AlgoSolver, save time on implementation.
```python
from algo_solver import searching as search
does_exist(input_array, num):
index = search.binary_search(input_graph, num)
if index == -1:
print('does not exist')
else:
print('Exists - found in logn time')
```
## Example problem Number 3
It is quite common to perform sorting operations in technical interviews or CP competitions. Here is an example. Sort an array in O(n) time with the knowledge that the max value is not greater than 50. Here we can perform bucket sort on an array. No need to memorize the implementation.
```
input_array = [33,22,11,21,34,32,19, 23, 39, 1, 4, 6]
```
## Solution
With AlgoSolver, save time on implementation.
```python
from algo_solver import sorting as sort
new_arr = sort.bucket_sort(input_array)
print(new_arr)
``` | PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/widgets/unsupervised/owkmeans.py | from concurrent.futures import Future # pylint: disable=unused-import
from typing import Optional, List, Dict # pylint: disable=unused-import
import numpy as np
from AnyQt.QtCore import Qt, QTimer, QAbstractTableModel, QModelIndex, QThread, \
pyqtSlot as Slot
from AnyQt.QtGui import QIntValidator
from AnyQt.QtWidgets import QGridLayout, QTableView
from Orange.clustering import KMeans
from Orange.clustering.kmeans import KMeansModel # pylint: disable=unused-import
from Orange.data import Table, Domain, DiscreteVariable, ContinuousVariable
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.widgets.utils.annotated_data import get_next_name, \
ANNOTATED_DATA_SIGNAL_NAME, add_columns
from Orange.widgets.utils.concurrent import ThreadExecutor, FutureSetWatcher
from Orange.widgets.utils.sql import check_sql_input
from Orange.widgets.widget import Input, Output
class ClusterTableModel(QAbstractTableModel):
def __init__(self, parent=None):
super().__init__(parent)
self.scores = []
self.start_k = 0
def rowCount(self, index=QModelIndex()):
return 0 if index.isValid() else len(self.scores)
def columnCount(self, index=QModelIndex()):
return 1
def flags(self, index):
if isinstance(self.scores[index.row()], str):
return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def set_scores(self, scores, start_k):
self.modelAboutToBeReset.emit()
self.scores = scores
self.start_k = start_k
self.modelReset.emit()
def clear_scores(self):
self.modelAboutToBeReset.emit()
self.scores = []
self.start_k = 0
self.modelReset.emit()
def data(self, index, role=Qt.DisplayRole):
score = self.scores[index.row()]
valid = not isinstance(score, str)
if role == Qt.DisplayRole:
return "{:.3f}".format(score) if valid else "NA"
elif role == Qt.TextAlignmentRole:
return Qt.AlignVCenter | Qt.AlignLeft
elif role == Qt.ToolTipRole and not valid:
return score
elif role == gui.BarRatioRole and valid:
return score
def headerData(self, row, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return str(row + self.start_k)
class Task:
futures = [] # type: List[Future]
watcher = ... # type: FutureSetWatcher
cancelled = False
def __init__(self, futures, watcher):
self.futures = futures
self.watcher = watcher
def cancel(self):
self.cancelled = True
for f in self.futures:
f.cancel()
class NotEnoughData(ValueError):
pass
class OWKMeans(widget.OWWidget):
name = "k均值聚类"
description = "用基于轮廓的质量估计做k均值聚类算法"
icon = "icons/KMeans.svg"
priority = 2100
class Inputs:
data = Input("数据", Table)
class Outputs:
annotated_data = Output(
ANNOTATED_DATA_SIGNAL_NAME, Table, default=True,
replaces=["Annotated Data"]
)
centroids = Output("质心", Table)
class Error(widget.OWWidget.Error):
failed = widget.Msg("聚类失败\n错误:{}")
not_enough_data = widget.Msg(
"({})个唯一数据实例太少,在集群{}中"
)
class Warning(widget.OWWidget.Warning):
no_silhouettes = widget.Msg(
"大于5000个样本轮廓分数不计算"
)
not_enough_data = widget.Msg(
"({})个唯一数据实例太少,在集群{}中"
)
INIT_METHODS = "Initialize with KMeans++", "Random initialization"
resizing_enabled = False
buttons_area_orientation = Qt.Vertical
k = Setting(3)
k_from = Setting(2)
k_to = Setting(8)
optimize_k = Setting(False)
max_iterations = Setting(300)
n_init = Setting(10)
smart_init = Setting(0) # KMeans++
auto_commit = Setting(True)
settings_version = 2
@classmethod
def migrate_settings(cls, settings, version):
# type: (Dict, int) -> None
if version < 2:
if 'auto_apply' in settings:
settings['auto_commit'] = settings['auto_apply']
del settings['auto_apply']
def __init__(self):
super().__init__()
self.data = None # type: Optional[Table]
self.clusterings = {}
self.__executor = ThreadExecutor(parent=self)
self.__task = None # type: Optional[Task]
layout = QGridLayout()
bg = gui.radioButtonsInBox(
self.controlArea, self, "optimize_k", orientation=layout,
box="Number of Clusters",
# Because commit is only wrapped when creating the auto_commit
# buttons, we can't pass it as the callback here, so we can add
# this hacky lambda to call the wrapped commit when necessary
callback=lambda: self.commit(),
)
layout.addWidget(
gui.appendRadioButton(bg, "Fixed:", addToLayout=False), 1, 1)
sb = gui.hBox(None, margin=0)
gui.spin(
sb, self, "k", minv=2, maxv=30,
controlWidth=60, alignment=Qt.AlignRight, callback=self.update_k)
gui.rubber(sb)
layout.addWidget(sb, 1, 2)
layout.addWidget(
gui.appendRadioButton(bg, "From", addToLayout=False), 2, 1)
ftobox = gui.hBox(None)
ftobox.layout().setContentsMargins(0, 0, 0, 0)
layout.addWidget(ftobox, 2, 2)
gui.spin(
ftobox, self, "k_from", minv=2, maxv=29,
controlWidth=60, alignment=Qt.AlignRight,
callback=self.update_from)
gui.widgetLabel(ftobox, "to")
gui.spin(
ftobox, self, "k_to", minv=3, maxv=30,
controlWidth=60, alignment=Qt.AlignRight,
callback=self.update_to)
gui.rubber(ftobox)
box = gui.vBox(self.controlArea, "Initialization")
gui.comboBox(
box, self, "smart_init", items=self.INIT_METHODS,
callback=self.invalidate)
layout = QGridLayout()
gui.widgetBox(box, orientation=layout)
layout.addWidget(gui.widgetLabel(None, "Re-runs: "), 0, 0, Qt.AlignLeft)
sb = gui.hBox(None, margin=0)
layout.addWidget(sb, 0, 1)
gui.lineEdit(
sb, self, "n_init", controlWidth=60,
valueType=int, validator=QIntValidator(), callback=self.invalidate)
layout.addWidget(
gui.widgetLabel(None, "Maximum iterations: "), 1, 0, Qt.AlignLeft)
sb = gui.hBox(None, margin=0)
layout.addWidget(sb, 1, 1)
gui.lineEdit(
sb, self, "max_iterations", controlWidth=60, valueType=int,
validator=QIntValidator(), callback=self.invalidate)
self.apply_button = gui.auto_commit(
self.buttonsArea, self, "auto_commit", "Apply", box=None,
commit=self.commit)
gui.rubber(self.controlArea)
box = gui.vBox(self.mainArea, box="Silhouette Scores")
self.mainArea.setVisible(self.optimize_k)
self.table_model = ClusterTableModel(self)
table = self.table_view = QTableView(self.mainArea)
table.setModel(self.table_model)
table.setSelectionMode(QTableView.SingleSelection)
table.setSelectionBehavior(QTableView.SelectRows)
table.setItemDelegate(gui.ColoredBarItemDelegate(self, color=Qt.cyan))
table.selectionModel().selectionChanged.connect(self.select_row)
table.setMaximumWidth(200)
table.horizontalHeader().setStretchLastSection(True)
table.horizontalHeader().hide()
table.setShowGrid(False)
box.layout().addWidget(table)
def adjustSize(self):
self.ensurePolished()
s = self.sizeHint()
self.resize(s)
def update_k(self):
self.optimize_k = False
self.commit()
def update_from(self):
self.k_to = max(self.k_from + 1, self.k_to)
self.optimize_k = True
self.commit()
def update_to(self):
self.k_from = min(self.k_from, self.k_to - 1)
self.optimize_k = True
self.commit()
def enough_data_instances(self, k):
"""k cannot be larger than the number of data instances."""
return len(self.data) >= k
@staticmethod
def _compute_clustering(data, k, init, n_init, max_iter, silhouette):
# type: (Table, int, str, int, int, bool) -> KMeansModel
if k > len(data):
raise NotEnoughData()
return KMeans(
n_clusters=k, init=init, n_init=n_init, max_iter=max_iter,
compute_silhouette_score=silhouette,
)(data)
@Slot(int, int)
def __progress_changed(self, n, d):
assert QThread.currentThread() is self.thread()
assert self.__task is not None
self.progressBarSet(100 * n / d)
@Slot(int, Exception)
def __on_exception(self, idx, ex):
assert QThread.currentThread() is self.thread()
assert self.__task is not None
if isinstance(ex, NotEnoughData):
self.Error.not_enough_data(len(self.data), self.k_from + idx)
# Only show failed message if there is only 1 k to compute
elif not self.optimize_k:
self.Error.failed(str(ex))
self.clusterings[self.k_from + idx] = str(ex)
@Slot(int, object)
def __clustering_complete(self, _, result):
# type: (int, KMeansModel) -> None
assert QThread.currentThread() is self.thread()
assert self.__task is not None
self.clusterings[result.k] = result
@Slot()
def __commit_finished(self):
assert QThread.currentThread() is self.thread()
assert self.__task is not None
assert self.data is not None
self.__task = None
self.setBlocking(False)
self.progressBarFinished()
if self.optimize_k:
self.update_results()
if self.optimize_k and all(isinstance(self.clusterings[i], str)
for i in range(self.k_from, self.k_to + 1)):
# Show the error of the last clustering
self.Error.failed(self.clusterings[self.k_to])
self.send_data()
def __launch_tasks(self, ks):
# type: (List[int]) -> None
"""Execute clustering in separate threads for all given ks."""
futures = [self.__executor.submit(
self._compute_clustering,
data=self.data,
k=k,
init=['random', 'k-means++'][self.smart_init],
n_init=self.n_init,
max_iter=self.max_iterations,
silhouette=True,
) for k in ks]
watcher = FutureSetWatcher(futures)
watcher.resultReadyAt.connect(self.__clustering_complete)
watcher.progressChanged.connect(self.__progress_changed)
watcher.exceptionReadyAt.connect(self.__on_exception)
watcher.doneAll.connect(self.__commit_finished)
self.__task = Task(futures, watcher)
self.progressBarInit(processEvents=False)
self.setBlocking(True)
def cancel(self):
if self.__task is not None:
task, self.__task = self.__task, None
task.cancel()
task.watcher.resultReadyAt.disconnect(self.__clustering_complete)
task.watcher.progressChanged.disconnect(self.__progress_changed)
task.watcher.exceptionReadyAt.disconnect(self.__on_exception)
task.watcher.doneAll.disconnect(self.__commit_finished)
self.progressBarFinished()
self.setBlocking(False)
def run_optimization(self):
if not self.enough_data_instances(self.k_from):
self.Error.not_enough_data(len(self.data), self.k_from)
return
if not self.enough_data_instances(self.k_to):
self.Warning.not_enough_data(len(self.data), self.k_to)
return
needed_ks = [k for k in range(self.k_from, self.k_to + 1)
if k not in self.clusterings]
if needed_ks:
self.__launch_tasks(needed_ks)
else:
# If we don't need to recompute anything, just set the results to
# what they were before
self.update_results()
def cluster(self):
# Check if the k already has a computed clustering
if self.k in self.clusterings:
self.send_data()
return
# Check if there is enough data
if not self.enough_data_instances(self.k):
self.Error.not_enough_data(len(self.data), self.k)
return
self.__launch_tasks([self.k])
def commit(self):
self.cancel()
self.clear_messages()
# Some time may pass before the new scores are computed, so clear the
# old scores to avoid potential confusion. Hiding the mainArea could
# cause flickering when the clusters are computed quickly, so this is
# the better alternative
self.table_model.clear_scores()
self.mainArea.setVisible(self.optimize_k and self.data is not None)
if self.data is None:
self.send_data()
return
if self.optimize_k:
self.run_optimization()
else:
self.cluster()
QTimer.singleShot(100, self.adjustSize)
def invalidate(self, force=False):
self.cancel()
self.Error.clear()
self.Warning.clear()
self.clusterings = {}
self.table_model.clear_scores()
if force:
self.unconditional_commit()
else:
self.commit()
def update_results(self):
scores = [
mk if isinstance(mk, str) else mk.silhouette for mk in (
self.clusterings[k] for k in range(self.k_from, self.k_to + 1))
]
best_row = max(
range(len(scores)), default=0,
key=lambda x: 0 if isinstance(scores[x], str) else scores[x]
)
self.table_model.set_scores(scores, self.k_from)
self.table_view.selectRow(best_row)
self.table_view.setFocus(Qt.OtherFocusReason)
self.table_view.resizeRowsToContents()
def selected_row(self):
indices = self.table_view.selectedIndexes()
if indices:
return indices[0].row()
def select_row(self):
self.send_data()
def send_data(self):
if self.optimize_k:
row = self.selected_row()
k = self.k_from + row if row is not None else None
else:
k = self.k
km = self.clusterings.get(k)
if not self.data or km is None or isinstance(km, str):
self.Outputs.annotated_data.send(None)
self.Outputs.centroids.send(None)
return
domain = self.data.domain
cluster_var = DiscreteVariable(
get_next_name(domain, "Cluster"),
values=["C%d" % (x + 1) for x in range(km.k)]
)
clust_ids = km(self.data)
silhouette_var = ContinuousVariable(get_next_name(domain, "Silhouette"))
if km.silhouette_samples is not None:
self.Warning.no_silhouettes.clear()
scores = np.arctan(km.silhouette_samples) / np.pi + 0.5
else:
self.Warning.no_silhouettes()
scores = np.nan
new_domain = add_columns(domain, metas=[cluster_var, silhouette_var])
new_table = self.data.transform(new_domain)
new_table.get_column_view(cluster_var)[0][:] = clust_ids.X.ravel()
new_table.get_column_view(silhouette_var)[0][:] = scores
centroids = Table(Domain(km.pre_domain.attributes), km.centroids)
self.Outputs.annotated_data.send(new_table)
self.Outputs.centroids.send(centroids)
@Inputs.data
@check_sql_input
def set_data(self, data):
self.data = data
self.invalidate()
def send_report(self):
# False positives (Setting is not recognized as int)
# pylint: disable=invalid-sequence-index
if self.optimize_k and self.selected_row() is not None:
k_clusters = self.k_from + self.selected_row()
else:
k_clusters = self.k
init_method = self.INIT_METHODS[self.smart_init]
init_method = init_method[0].lower() + init_method[1:]
self.report_items((
("Number of clusters", k_clusters),
("Optimization", "{}, {} re-runs limited to {} steps".format(
init_method, self.n_init, self.max_iterations))))
if self.data is not None:
self.report_data("Data", self.data)
if self.optimize_k:
self.report_table(
"Silhouette scores for different numbers of clusters",
self.table_view)
def onDeleteWidget(self):
self.cancel()
super().onDeleteWidget()
def main(): # pragma: no cover
import sys
from AnyQt.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWKMeans()
d = Table(sys.argv[1] if len(sys.argv) > 1 else "iris.tab")
ow.set_data(d)
ow.show()
a.exec()
ow.saveSettings()
if __name__ == "__main__": # pragma: no cover
main() | PypiClean |
/GroopM-0.3.4.tar.gz/GroopM-0.3.4/groopm/ellipsoid.py |
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2012/2013"
__credits__ = ["Michael Imelfort"]
__license__ = "GPL3"
__maintainer__ = "Michael Imelfort"
__email__ = "[email protected]"
###############################################################################
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg
np.seterr(all='raise')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class EllipsoidTool:
"""Some stuff for playing with ellipsoids"""
def __init__(self): pass
def getMinVolEllipse(self, P, tolerance=0.01, retA=False):
""" Find the minimum volume ellipsoid which holds all the points
Based on work by Nima Moshtagh
http://www.mathworks.com/matlabcentral/fileexchange/9542
and also by looking at:
http://cctbx.sourceforge.net/current/python/scitbx.math.minimum_covering_ellipsoid.html
Which is based on the first reference anyway!
Here, P is a numpy array of 3D points like this:
P = [[x,y,z],
[x,y,z],
[x,y,z]]
Returns:
(center, radii, rotation)
"""
(N, d) = np.shape(P)
# Q will be out working array
Q = np.copy(P.T)
Q = np.vstack([Q, np.ones(N)])
QT = Q.T
# initializations
err = 1 + tolerance
u = np.array([1.0 / N for i in range(N)]) # first iteration
# Khachiyan Algorithm
singular = False
while err > tolerance:
V = np.dot(Q, np.dot(np.diag(u), QT))
try:
M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix
except linalg.linalg.LinAlgError:
# most likely a singular matrix
# permute the values a little and then we'll try again
from random import random, randint
PP = np.copy(P)
for i in range(N):
if randint(0,3) == 0:
j = randint(0,2)
if randint(0,1) != 0:
PP[i,j] += random()
else:
PP[i,j] -= random()
(A, center, radii, rotation) = self.getMinVolEllipse(PP, retA=True)
singular = True
break
j = np.argmax(M)
maximum = M[j]
step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))
new_u = (1.0 - step_size) * u
new_u[j] += step_size
err = np.linalg.norm(new_u - u)
u = new_u
if not singular:
# center of the ellipse
center = np.dot(P.T, u)
# the A matrix for the ellipse
try:
A = linalg.inv(
np.dot(P.T, np.dot(np.diag(u), P)) -
np.array([[a * b for b in center] for a in center])
) / d
except linalg.linalg.LinAlgError:
# the matrix is singular so we need to return a degenerate ellipse
#print '[Notice] Degenerate ellipse constructed indicating a bin with extremely small coverage divergence.'
center = np.mean(P, axis=0)
radii = np.max(P,axis=0) - np.min(P, axis=0)
if len(P[0]) == 3:
rotation = [[0,0,0],[0,0,0],[0,0,0]]
else:
rotation = [[0,0],[0,0]]
if retA:
return (None, center, radii, rotation)
else:
return (center, radii, rotation)
# Get the values we'd like to return
try:
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
except np.linalg.linalg.LinAlgError:
# hack -> better than crashing...
rotation = np.eye(3)
radii = np.ones(3)
else:
# hack -> better than crashing...
rotation = np.eye(3)
radii = np.ones(3)
if retA:
return (A, center, radii, rotation)
else:
return (center, radii, rotation)
def getEllipsoidVolume(self, radii):
"""Calculate the volume of the blob"""
if len(radii) == 2:
return np.pi*radii[0]*radii[1]
else:
return (4.0/3.0)*np.pi*radii[0]*radii[1]*radii[2]
def doesIntersect3D(self, A, cA, B, cB):
"""Rough test to see if ellipsoids A and B intersect
Not perfect, should work for "well overlapping" ones
We assume that the volume of B is less than (or =) volume of A
"""
#To make things simple, we just check if the points on a wire frame of
#B lie within A
# Quick check if the centre of B is within ellipse A. This deals with
# degenerate cases where B is only a single point or an otherwise
# degenerate ellipse.
p_c = cB - cA
try:
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
except (TypeError, ValueError):
return False
if A is None or B is None: # degenerate ellipse that can't be processed
return False
U, s, rotation = linalg.svd(B)
try:
radii_B = 1.0/np.sqrt(s)
except FloatingPointError:
# the given matrix B was made on a group of only one point
# we need only check if the one point (the center)
# in in A
p_c = cB - cA
return np.dot(p_c.T, np.dot(A, p_c)) <= 1
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii_B[0] * np.outer(np.cos(u), np.sin(v))
y = radii_B[1] * np.outer(np.sin(u), np.sin(v))
z = radii_B[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
# make a point on the wireFrame
wire_point = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + cB
# test if it's inside
# work out (p-c)'A(p-c) and see if it's <= 1
p_c = wire_point - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
return False
def doesIntersect2D(self, A, cA, B, cB):
"""Rough test to see if ellipsoids A and B intersect
Not perfect, should work for "well overlapping" ones
We assume that the volume of B is less than (or =) volume of A
"""
#To make things simple, we just check if the points on a wire frame of
#B lie within A
# Quick check if the centre of B is within ellipse A. This deals with
# degenerate cases where B is only a single point or an otherwise
# degenerate ellipse.
p_c = cB - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
if A == None or B == None: # degenerate ellipse that can't be processed
return False
U, s, rotation = linalg.svd(B)
try:
radii_B = 1.0/np.sqrt(s)
except FloatingPointError:
# the given matrix B was made on a group of only one point
# we need only check if the one point (the center)
# in in A
p_c = cB - cA
return np.dot(p_c.T, np.dot(A, p_c)) <= 1
u = np.linspace(0.0, 2.0 * np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii_B[0] * np.cos(u)
y = radii_B[1] * np.sin(u)
# rotate accordingly
for i in range(len(x)):
# make a point on the wireFrame
edge_point = np.dot([x[i],y[i]], rotation) + cB
# test if it's inside
# work out (p-c)'A(p-c) and see if it's <= 1
p_c = edge_point - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
return False
def plotEllipsoid(self, center, radii, rotation, ax=None, plotAxes=False, cageColor='b', cageAlpha=0.2, label=None):
"""Plot an ellipsoid"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0],0.0,0.0],
[0.0,radii[1],0.0],
[0.0,0.0,radii[2]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
# plot axes
for p in axes:
X3 = np.linspace(-p[0], p[0], 100) + center[0]
Y3 = np.linspace(-p[1], p[1], 100) + center[1]
Z3 = np.linspace(-p[2], p[2], 100) + center[2]
ax.plot(X3, Y3, Z3, color=cageColor)
# plot ellipsoid
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color=cageColor, alpha=cageAlpha)
if label is not None:
ax.text(center[0],
center[1],
center[2],
label,
color=[0,0,0],
weight='bold'
)
if make_ax:
plt.show()
plt.close(fig)
del fig
def plotEllipse(self, center, radii, rotation, ax=None, plotAxes=False, cageColor='b', cageAlpha=0.2, label=None, linewidth=-1):
"""plot an ellipse"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111)
u = np.linspace(0.0, 2.0 * np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.cos(u)
y = radii[1] * np.sin(u)
# rotate accordingly
for i in range(len(x)):
[x[i],y[i]] = np.dot([x[i],y[i]], rotation) + center
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0],0.0],[0.0,radii[1]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
# plot axes
for p in axes:
X3 = np.linspace(-p[0], p[0], 100) + center[0]
Y3 = np.linspace(-p[1], p[1], 100) + center[1]
ax.plot(X3, Y3, color=cageColor)
# plot ellipsoid
if linewidth == -1:
ax.plot(x, y, color=cageColor, alpha=cageAlpha)
else:
ax.plot(x, y, color=cageColor, alpha=cageAlpha, linewidth=linewidth, zorder = 10)
if label is not None:
ax.text(center[0],
center[1],
label,
color=[0,0,0],
weight='bold'
)
if make_ax:
plt.show()
plt.close(fig)
del fig
###############################################################################
###############################################################################
###############################################################################
############################################################################### | PypiClean |
/FormAlchemy-1.5.6.tar.gz/FormAlchemy-1.5.6/formalchemy/fields.py |
import cgi
import logging
logger = logging.getLogger('formalchemy.' + __name__)
from copy import copy, deepcopy
import datetime
import warnings
from six import string_types,text_type, next
from sqlalchemy.orm.interfaces import MANYTOMANY
from sqlalchemy.orm.interfaces import ONETOMANY
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm import class_mapper, Query
from sqlalchemy.orm.attributes import ScalarAttributeImpl, ScalarObjectAttributeImpl, CollectionAttributeImpl
from sqlalchemy.orm.properties import CompositeProperty, ColumnProperty
try:
from sqlalchemy import exc as sqlalchemy_exceptions
except ImportError:
from sqlalchemy import exceptions as sqlalchemy_exceptions
from sqlalchemy.orm import compile_mappers, object_session, class_mapper
from formalchemy import helpers as h
from formalchemy import fatypes, validators
from formalchemy.exceptions import FieldNotFoundError
from formalchemy import config
from formalchemy.i18n import get_translator
from formalchemy.i18n import _
from formalchemy.helpers import html_escape
__all__ = ['Field', 'FieldRenderer',
'TextFieldRenderer', 'TextAreaFieldRenderer',
'PasswordFieldRenderer', 'HiddenFieldRenderer',
'DateFieldRenderer', 'TimeFieldRenderer',
'DateTimeFieldRenderer',
'CheckBoxFieldRenderer', 'CheckBoxSet',
'deserialize_once']
########################## RENDERER STUFF ############################
def _stringify(k, null_value=u''):
if k is None:
return null_value
if isinstance(k, string_types):
return k
elif hasattr(k, '__unicode__'):
return k.__unicode__()
elif isinstance(k, datetime.timedelta):
return '%s.%s' % (k.days, k.seconds)
else:
return text_type(k)
def _htmlify(k, null_value=u''):
if hasattr(k, '__html__'):
try:
return h.literal(k.__html__())
except TypeError:
# not callable. skipping
pass
return html_escape(_stringify(k, null_value))
class _NoDefault(object):
def __repr__(self):
return '<NoDefault>'
NoDefault = _NoDefault()
del _NoDefault
def deserialize_once(func):
"""Simple deserialization caching decorator.
To be used on a Renderer object's `deserialize` function, to cache its
result while it's being called once for ``validate()`` and another time
when doing ``sync()``.
"""
def cache(self, *args, **kwargs):
if hasattr(self, '_deserialization_result'):
return self._deserialization_result
self._deserialization_result = func(self, *args, **kwargs)
return self._deserialization_result
return cache
class FieldRenderer(object):
"""
This should be the super class of all Renderer classes.
Renderers generate the html corresponding to a single Field,
and are also responsible for deserializing form data into
Python objects.
Subclasses should override `render` and `deserialize`.
See their docstrings for details.
"""
def __init__(self, field):
self.field = field
assert isinstance(self.field, AbstractField)
@property
def name(self):
"""Name of rendered input element.
The `name` of a field will always look like:
[fieldset_prefix-]ModelName-[pk]-fieldname
The fieldset_prefix is defined when instantiating the
`FieldSet` object, by passing the `prefix=` keyword argument.
The `ModelName` is taken by introspection from the model
passed in at that same moment.
The `pk` is the primary key of the object being edited.
If you are creating a new object, then the `pk` is an
empty string.
The `fieldname` is, well, the field name.
.. note::
This method as the direct consequence that you can not `create`
two objects of the same class, using the same FieldSet, on the
same page. You can however, create more than one object
of a certain class, provided that you create multiple FieldSet
instances and pass the `prefix=` keyword argument.
Otherwise, FormAlchemy deals very well with editing multiple
existing objects of same/different types on the same page,
without any name clash. Just be careful with multiple object
creation.
When creating your own Renderer objects, use `self.name` to
get the field's `name` HTML attribute, both when rendering
and deserializing.
"""
clsname = self.field.model.__class__.__name__
pk = self.field.parent._bound_pk
assert pk != ''
if isinstance(pk, string_types) or not hasattr(pk, '__iter__'):
pk_string = _stringify(pk)
else:
# remember to use a delimiter that can be used in the DOM (specifically, no commas).
# we don't have to worry about escaping the delimiter, since we never try to
# deserialize the generated name. All we care about is generating unique
# names for a given model's domain.
pk_string = u'_'.join([_stringify(k) for k in pk])
components = dict(model=clsname, pk=pk_string, name=self.field.name)
name = self.field.parent._format % components
if self.field.parent._prefix is not None:
return u'%s-%s' % (self.field.parent._prefix, name)
return name
@property
def value(self):
"""
Submitted value, or field value converted to string.
Return value is always either None or a string.
"""
if not self.field.is_readonly() and self.params is not None:
# submitted value. do not deserialize here since that requires valid data, which we might not have
try:
v = self._serialized_value()
except FieldNotFoundError as e:
v = None
else:
v = None
# empty field will be '' -- use default value there, too
if v:
return v
value = self.field.model_value
if value is None:
return None
if self.field.is_collection:
return [self.stringify_value(v) for v in value]
else:
return self.stringify_value(value)
@property
def _value(self):
warnings.warn('FieldRenderer._value is deprecated. Use '\
'FieldRenderer.value instead')
return self.value
@property
def raw_value(self):
"""return fields field.raw_value (mean real objects, not ForeignKeys)
"""
return self.field.raw_value
@property
def request(self):
"""return the ``request`` bound to the
:class:`~formalchemy.forms.FieldSet`` during
:func:`~formalchemy.forms.FieldSet.bind`"""
return self.field.parent._request
def get_translator(self, **kwargs):
"""return a GNUTranslations object in the most convenient way
"""
if 'F_' in kwargs:
return kwargs.pop('F_')
if 'lang' in kwargs:
lang = kwargs.pop('lang')
else:
lang = 'en'
return get_translator(lang=lang, request=self.request)
def render(self, **kwargs):
"""
Render the field. Use `self.name` to get a unique name for the
input element and id. `self.value` may also be useful if
you are not rendering multiple input elements.
When rendering, you can verify `self.errors` to know
if you are rendering a new form, or re-displaying a form with
errors. Knowing that, you could select the data either from
the model, or the web form submission.
"""
raise NotImplementedError()
def render_readonly(self, **kwargs):
"""render a string representation of the field value"""
value = self.raw_value
if value is None:
return ''
if isinstance(value, list):
return h.literal(', ').join([self.stringify_value(item, as_html=True) for item in value])
if isinstance(value, string_types):
return value
return self.stringify_value(value, as_html=True)
@property
def params(self):
"""This gives access to the POSTed data, as received from
the web user. You should call `.getone`, or `.getall` to
retrieve a single value or multiple values for a given
key.
For example, when coding a renderer, you'd use:
.. sourcecode:: py
vals = self.params.getall(self.name)
to catch all the values for the renderer's form entry.
"""
return self.field.parent.data
@property
def _params(self):
warnings.warn('FieldRenderer._params is deprecated. Use '\
'FieldRenderer.params instead')
return self.params
def _serialized_value(self):
"""
Returns the appropriate value to deserialize for field's
datatype, from the user-submitted data. Only called
internally, so, if you are overriding `deserialize`,
you can use or ignore `_serialized_value` as you please.
This is broken out into a separate method so multi-input
renderers can stitch their values back into a single one
to have that can be handled by the default deserialize.
Do not attempt to deserialize here; return value should be a
string (corresponding to the output of `str` for your data
type), or for a collection type, a a list of strings,
or None if no value was submitted for this renderer.
The default _serialized_value returns the submitted value(s)
in the input element corresponding to self.name.
"""
try:
if self.field.is_collection:
return self.params.getall(self.name)
return self.params.getone(self.name)
except KeyError:
raise FieldNotFoundError('%s not found in %r' % (self.name, self.params))
def deserialize(self):
"""Turns the user-submitted data into a Python value.
The raw data received from the web can be accessed via
`self.params`. This dict-like object usually accepts the
`getone()` and `getall()` method calls.
For SQLAlchemy
collections, return a list of primary keys, and !FormAlchemy
will take care of turning that into a list of objects.
For manually added collections, return a list of values.
You will need to override this in a child Renderer object
if you want to mangle the data from your web form, before
it reaches your database model. For example, if your render()
method displays a select box filled with items you got from a
CSV file or another source, you will need to decide what to do
with those values when it's time to save them to the database
-- or is this field going to determine the hashing algorithm
for your password ?.
This function should return the value that is going to be
assigned to the model *and* used in the place of the model
value if there was an error with the form.
.. note::
Note that this function will be called *twice*, once when
the fieldset is `.validate()`d -- with its value only tested,
and a second time when the fieldset is `.sync()`d -- and its
value assigned to the model. Also note that deserialize() can
also raise a ValidationError() exception if it finds some
errors converting its values.
If calling this function twice poses a problem to your logic, for
example, if you have heavy database queries, or temporary objects
created in this function, consider using the ``deserialize_once``
decorator, provided using:
.. sourcecode:: py
from formalchemy.fields import deserialize_once
@deserialize_once
def deserialize(self):
... my stuff ...
return calculated_only_once
Finally, you should only have to override this if you are using custom
(e.g., Composite) types.
"""
if self.field.is_collection:
return [self._deserialize(subdata) for subdata in self._serialized_value()]
return self._deserialize(self._serialized_value())
def _deserialize(self, data):
if isinstance(self.field.type, fatypes.Boolean):
if isinstance(data, bool):
return data
if data is not None:
if data.lower() in ['1', 't', 'true', 'yes']: return True
if data.lower() in ['0', 'f', 'false', 'no']: return False
if data is None or data == self.field._null_option[1]:
return None
if isinstance(self.field.type, fatypes.Interval):
return datetime.timedelta(validators.float_(data, self))
if isinstance(self.field.type, fatypes.Integer):
return validators.integer(data, self)
if isinstance(self.field.type, fatypes.Float):
return validators.float_(data, self)
if isinstance(self.field.type, fatypes.Numeric):
if self.field.type.asdecimal:
return validators.decimal_(data, self)
else:
return validators.float_(data, self)
def _date(data):
if isinstance(data, datetime.date):
return data
if data == 'YYYY-MM-DD' or data == '-MM-DD' or not data.strip():
return None
try:
return datetime.date(*[int(st) for st in data.split('-')])
except:
raise validators.ValidationError('Invalid date')
def _time(data):
if isinstance(data, datetime.time):
return data
if data == 'HH:MM:SS' or not data.strip():
return None
try:
return datetime.time(*[int(st) for st in data.split(':')])
except:
raise validators.ValidationError('Invalid time')
if isinstance(self.field.type, fatypes.Date):
return _date(data)
if isinstance(self.field.type, fatypes.Time):
return _time(data)
if isinstance(self.field.type, fatypes.DateTime):
if isinstance(data, datetime.datetime):
return data
if 'Z' in data:
data = data.strip('Z')
if 'T' in data:
data_date, data_time = data.split('T')
elif ' ' in data:
data_date, data_time = data.split(' ')
else:
raise validators.ValidationError('Incomplete datetime: %s' % data)
dt, tm = _date(data_date), _time(data_time)
if dt is None and tm is None:
return None
elif dt is None or tm is None:
raise validators.ValidationError('Incomplete datetime')
return datetime.datetime(dt.year, dt.month, dt.day, tm.hour, tm.minute, tm.second)
return data
def stringify_value(self, v, as_html=False):
if as_html:
return _htmlify(v, null_value=self.field._null_option[1])
return _stringify(v, null_value=self.field._null_option[1])
def __repr__(self):
return '<%s for %r>' % (self.__class__.__name__, self.field)
class EscapingReadonlyRenderer(FieldRenderer):
"""
In readonly mode, html-escapes the output of the default renderer
for this field type. (Escaping is not performed by default because
it is sometimes useful to have the renderer include raw html in its
output. The FormAlchemy admin app extension for Pylons uses this,
for instance.)
"""
def __init__(self, field):
FieldRenderer.__init__(self, field)
self._renderer = field._get_renderer()(field)
def render(self, **kwargs):
return self._renderer.render(**kwargs)
def render_readonly(self, **kwargs):
return h.HTML(self._renderer.render_readonly(**kwargs))
class TextFieldRenderer(FieldRenderer):
"""render a field as a text field"""
@property
def length(self):
return self.field.type.length
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, maxlength=self.length, **kwargs)
class IntegerFieldRenderer(FieldRenderer):
"""render an integer as a text field"""
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, **kwargs)
class FloatFieldRenderer(FieldRenderer):
"""render a float as a text field"""
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, **kwargs)
class IntervalFieldRenderer(FloatFieldRenderer):
"""render an interval as a text field"""
def _deserialize(self, data):
value = FloatFieldRenderer._deserialize(self, data)
if isinstance(value, (float, int)):
return datetime.timedelta(value)
return value
class PasswordFieldRenderer(TextFieldRenderer):
"""Render a password field"""
def render(self, **kwargs):
return h.password_field(self.name, value=self.value, maxlength=self.length, **kwargs)
def render_readonly(self):
return '*' * 6
class TextAreaFieldRenderer(FieldRenderer):
"""render a field as a textarea"""
def render(self, **kwargs):
if isinstance(kwargs.get('size'), tuple):
kwargs['size'] = 'x'.join([str(i) for i in kwargs['size']])
return h.text_area(self.name, content=self.value, **kwargs)
class CheckBoxFieldRenderer(FieldRenderer):
"""render a boolean value as checkbox field"""
def render(self, **kwargs):
value = self.value or ''
return h.check_box(self.name, True,
checked=_simple_eval(value.capitalize()),
**kwargs)
def _serialized_value(self):
if self.name not in self.params:
return None
return FieldRenderer._serialized_value(self)
def deserialize(self):
if self._serialized_value() is None:
return False
return FieldRenderer.deserialize(self)
class FileFieldRenderer(FieldRenderer):
"""render a file input field"""
remove_label = _('Remove')
def __init__(self, *args, **kwargs):
FieldRenderer.__init__(self, *args, **kwargs)
self._data = None # caches FieldStorage data
self._filename = None
def render(self, **kwargs):
if self.field.model_value:
checkbox_name = '%s--remove' % self.name
return h.literal('%s %s %s') % (
h.file_field(self.name, **kwargs),
h.check_box(checkbox_name),
h.label(self.remove_label, for_=checkbox_name))
else:
return h.file_field(self.name, **kwargs)
def get_size(self):
value = self.raw_value
if value is None:
return 0
return len(value)
def readable_size(self):
length = self.get_size()
if length == 0:
return '0 KB'
if length <= 1024:
return '1 KB'
if length > 1048576:
return '%0.02f MB' % (length / 1048576.0)
return '%0.02f KB' % (length / 1024.0)
def render_readonly(self, **kwargs):
"""
render only the binary size in a human readable format but you can
override it to whatever you want
"""
return self.readable_size()
def deserialize(self):
data = FieldRenderer.deserialize(self)
if isinstance(data, cgi.FieldStorage):
if data.filename:
# FieldStorage can only be read once so we need to cache the
# value since FA call deserialize during validation and
# synchronisation
if self._data is None:
self._filename = data.filename
self._data = data.file.read()
data = self._data
else:
data = None
checkbox_name = '%s--remove' % self.name
if not data and not self.params.has_key(checkbox_name):
data = getattr(self.field.model, self.field.name)
return data is not None and data or ''
class DateFieldRenderer(FieldRenderer):
"""Render a date field"""
@property
def format(self):
return config.date_format
@property
def edit_format(self):
return config.date_edit_format
def render_readonly(self, **kwargs):
value = self.raw_value
return value and value.strftime(self.format) or ''
def _render(self, **kwargs):
data = self.params
value = self.field.model_value
F_ = self.get_translator(**kwargs)
month_options = [(F_('Month'), 'MM')] + [(F_('month_%02i' % i), str(i)) for i in range(1, 13)]
day_options = [(F_('Day'), 'DD')] + [(i, str(i)) for i in range(1, 32)]
mm_name = self.name + '__month'
dd_name = self.name + '__day'
yyyy_name = self.name + '__year'
is_date_type = isinstance(value, (datetime.datetime, datetime.date, datetime.time))
values = []
for key, default in (('month', 'MM'), ('day', 'DD')):
name = self.name + '__' + key
v = default
if data is not None and name in data:
v = data[name]
if v.isdigit():
pass
elif is_date_type:
v = getattr(value, key)
values.append(v)
mm, dd = values
# could be blank so don't use and/or construct
if data is not None and yyyy_name in data:
yyyy = data[yyyy_name]
else:
yyyy = str(self.field.model_value and self.field.model_value.year or 'YYYY')
selects = dict(
m=h.select(mm_name, [mm], month_options, **kwargs),
d=h.select(dd_name, [dd], day_options, **kwargs),
y=h.text_field(yyyy_name, value=yyyy, maxlength=4, size=4, **kwargs))
value = [selects.get(l) for l in self.edit_format.split('-')]
return h.literal('\n').join(value)
def render(self, **kwargs):
return h.content_tag('span', self._render(**kwargs), id=self.name)
def _serialized_value(self):
return '-'.join([self.params.getone(self.name + '__' + subfield) for subfield in ['year', 'month', 'day']])
class TimeFieldRenderer(FieldRenderer):
"""Render a time field"""
format = '%H:%M:%S'
def is_time_type(self):
return isinstance(self.field.model_value, (datetime.datetime, datetime.date, datetime.time))
def render_readonly(self, **kwargs):
value = self.raw_value
return isinstance(value, datetime.time) and value.strftime(self.format) or ''
def _render(self, **kwargs):
data = self.params
value = self.field.model_value
F_ = self.get_translator(**kwargs)
opts = {}
opts['hour'] = [(str(i),str(i)) for i in range(24)]
opts['minute'] = [(str(i),str(i)) for i in range(60)]
opts['second'] = [(str(i),str(i)) for i in range(60)]
hh_name = self.name + '__hour'
mm_name = self.name + '__minute'
ss_name = self.name + '__second'
is_time_type = isinstance(value, (datetime.datetime, datetime.date, datetime.time))
values = []
for key, text, default in (('hour', F_('HH'), 'HH'), ('minute', F_('MM'), 'MM'), ('second', F_('SS'), 'SS')):
opts[key] = [(text,default)] + opts[key]
name = self.name + '__' + key
v = default
if data is not None and name in data:
v = data[name]
if v.isdigit():
pass
elif is_time_type:
v = getattr(value, key)
values.append(v)
hh, mm, ss = values
return h.literal(':').join([
h.select(hh_name, [hh], opts['hour'], **kwargs),
h.select(mm_name, [mm], opts['minute'], **kwargs),
h.select(ss_name, [ss], opts['second'], **kwargs)])
def render(self, **kwargs):
return h.content_tag('span', self._render(**kwargs), id=self.name)
def _serialized_value(self):
return ':'.join([self.params.getone(self.name + '__' + subfield) for subfield in ['hour', 'minute', 'second']])
class DateTimeFieldRenderer(DateFieldRenderer, TimeFieldRenderer):
"""Render a date time field"""
format = '%Y-%m-%d %H:%M:%S'
def render(self, **kwargs):
return h.content_tag('span', DateFieldRenderer._render(self, **kwargs) + h.literal(' ') + TimeFieldRenderer._render(self, **kwargs), id=self.name)
def _serialized_value(self):
return DateFieldRenderer._serialized_value(self) + ' ' + TimeFieldRenderer._serialized_value(self)
class EmailFieldRenderer(FieldRenderer):
'''
Render a HTML5 email input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='email', **kwargs)
class UrlFieldRenderer(FieldRenderer):
'''
Render a HTML5 url input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='url', **kwargs)
class NumberFieldRenderer(IntegerFieldRenderer):
'''
Render a HTML5 number input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='number', **kwargs)
class RangeFieldRenderer(FieldRenderer):
'''
Render a HTML5 range input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='range', **kwargs)
class HTML5DateFieldRenderer(FieldRenderer):
'''
Render a HTML5 date input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='date', **kwargs)
class HTML5DateTimeFieldRenderer(FieldRenderer):
'''
Render a HTML5 datetime input field
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='datetime', **kwargs)
class LocalDateTimeFieldRenderer(FieldRenderer):
'''
Render a HTML5 datetime-local input field.
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='datetime-local', **kwargs)
class MonthFieldRender(FieldRenderer):
'''
Render a HTML5 month input field.
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='month', **kwargs)
class WeekFieldRenderer(FieldRenderer):
'''
Render a HTML5 week input field.
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='week', **kwargs)
class HTML5TimeFieldRenderer(FieldRenderer):
'''
Render a HTML5 time input field.
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='time', **kwargs)
class ColorFieldRenderer(FieldRenderer):
'''
Render a HTML5 color input field.
'''
def render(self, **kwargs):
return h.text_field(self.name, value=self.value, type='color', **kwargs)
def _extract_options(options):
if isinstance(options, dict):
options = options.items()
for choice in options:
# Choice is a list/tuple...
if isinstance(choice, (list, tuple)):
if len(choice) != 2:
raise Exception('Options should consist of two items, a name and a value; found %d items in %r' % (len(choice, choice)))
yield choice
# ... or just a string.
else:
if not isinstance(choice, string_types):
raise Exception('List, tuple, or string value expected as option (got %r)' % choice)
yield (choice, choice)
class RadioSet(FieldRenderer):
"""render a field as radio"""
widget = staticmethod(h.radio_button)
format = '%(field)s%(label)s'
def _serialized_value(self):
if self.name not in self.params:
return None
return FieldRenderer._serialized_value(self)
def _is_checked(self, choice_value, value=NoDefault):
if value is NoDefault:
value = self.value
return value == _stringify(choice_value)
def render(self, options, **kwargs):
value = self.value
self.radios = []
if callable(options):
options = options(self.field.parent)
for i, (choice_name, choice_value) in enumerate(_extract_options(options)):
choice_id = '%s_%i' % (self.name, i)
radio = self.widget(self.name, choice_value, id=choice_id,
checked=self._is_checked(choice_value, value),
**kwargs)
label = h.label(choice_name, for_=choice_id)
self.radios.append(h.literal(self.format % dict(field=radio,
label=label)))
return h.tag("br").join(self.radios)
class CheckBoxSet(RadioSet):
widget = staticmethod(h.check_box)
def _serialized_value(self):
if self.name not in self.params:
return []
return FieldRenderer._serialized_value(self)
def _is_checked(self, choice_value, value=NoDefault):
if value is NoDefault:
value = self.value
if value is None:
value = []
return _stringify(choice_value) in value
class SelectFieldRenderer(FieldRenderer):
"""render a field as select"""
def _serialized_value(self):
if self.name not in self.params:
if self.field.is_collection:
return []
return None
return FieldRenderer._serialized_value(self)
def render(self, options, **kwargs):
if callable(options):
L = _normalized_options(options(self.field.parent))
if not self.field.is_required() and not self.field.is_collection:
L.insert(0, self.field._null_option)
else:
L = list(options)
if len(L) > 0:
if len(L[0]) == 2:
L = [(k, self.stringify_value(v)) for k, v in L]
else:
L = [_stringify(k) for k in L]
return h.select(self.name, self.value, L, **kwargs)
def render_readonly(self, options=None, **kwargs):
"""render a string representation of the field value.
Try to retrieve a value from `options`
"""
if not options or self.field.is_scalar_relation:
return FieldRenderer.render_readonly(self)
value = self.raw_value
if value is None:
return ''
if callable(options):
L = _normalized_options(options(self.field.parent))
else:
L = list(options)
if len(L) > 0:
if len(L[0]) == 2:
L = [(v, k) for k, v in L]
else:
L = [(k, _stringify(k)) for k in L]
D = dict(L)
if isinstance(value, list):
return u', '.join([_stringify(D.get(item, item)) for item in value])
return _stringify(D.get(value, value))
class HiddenFieldRenderer(FieldRenderer):
"""render a field as an hidden field"""
def render(self, **kwargs):
return h.hidden_field(self.name, value=self.value, **kwargs)
def render_readonly(self):
return ''
def HiddenFieldRendererFactory(cls):
"""A factory to generate a new class to hide an existing renderer"""
class Renderer(cls, HiddenFieldRenderer):
def render(self, **kwargs):
html = super(Renderer, self).render(**kwargs)
return h.content_tag('div', html, style="display:none;")
def render_readonly(self):
return ''
attrs = dict(__doc__="""Hidden %s renderer""" % cls.__name__)
renderer = type('Hidden%s' % cls.__name__, (Renderer,), attrs)
return renderer
HiddenDateFieldRenderer = HiddenFieldRendererFactory(DateFieldRenderer)
HiddenTimeFieldRenderer = HiddenFieldRendererFactory(TimeFieldRenderer)
HiddenDateTimeFieldRenderer = HiddenFieldRendererFactory(DateTimeFieldRenderer)
################## FIELDS STUFF ####################
def _pk_one_column(instance, column):
try:
attr = getattr(instance, column.key)
except AttributeError:
# FIXME: this is not clean but the only way i've found to retrieve the
# real attribute name of the primary key.
# This is needed when you use something like:
# id = Column('UGLY_NAMED_ID', primary_key=True)
# It's a *really* needed feature
cls = instance.__class__
for k in instance._sa_class_manager.keys():
props = getattr(cls, k).property
if hasattr(props, 'columns'):
if props.columns[0] is column:
attr = getattr(instance, k)
break
return attr
def _pk(instance):
# Return the value of this instance's primary key, suitable for passing to Query.get().
# Will be a tuple if PK is multicolumn.
try:
columns = class_mapper(type(instance)).primary_key
except sqlalchemy_exceptions.InvalidRequestError:
# try to get pk from model attribute
if hasattr(instance, '_pk'):
return getattr(instance, '_pk', None) or None
return None
if len(columns) == 1:
return _pk_one_column(instance, columns[0])
return tuple([_pk_one_column(instance, column) for column in columns])
from ast import literal_eval
def _simple_eval(source):
if source == '':
return None
return literal_eval(source)
def _query_options(L):
"""
Return a list of tuples of `(item description, item pk)`
for each item in the iterable L, where `item description`
is the result of str(item) and `item pk` is the item's primary key.
"""
return [(_stringify(item), _pk(item)) for item in L]
def _normalized_options(options):
"""
If `options` is an SA query or an iterable of SA instances, it will be
turned into a list of `(item description, item value)` pairs. Otherwise, a
copy of the original options will be returned with no further validation.
"""
if isinstance(options, Query):
options = options.all()
if callable(options):
return options
i = iter(options)
try:
first = next(i)
except StopIteration:
return []
try:
class_mapper(type(first))
except:
return list(options)
return _query_options(options)
def _foreign_keys(property):
# 0.4/0.5 compatibility fn
try:
return property.foreign_keys
except AttributeError:
return [r for l, r in property.synchronize_pairs]
def _model_equal(a, b):
if not isinstance(a, type):
a = type(a)
if not isinstance(b, type):
b = type(b)
return a is b
class AbstractField(object):
"""
Contains the information necessary to render (and modify the rendering of)
a form field
Methods taking an `options` parameter will accept several ways of
specifying those options:
- an iterable of SQLAlchemy objects; `str()` of each object will be the description, and the primary key the value
- a SQLAlchemy query; the query will be executed with `all()` and the objects returned evaluated as above
- an iterable of (description, value) pairs
- a dictionary of {description: value} pairs
Options can be "chained" indefinitely because each modification returns a new
:mod:`Field <formalchemy.fields>` instance, so you can write::
>>> from formalchemy.tests import FieldSet, User
>>> fs = FieldSet(User)
>>> fs.append(Field('foo').dropdown(options=[('one', 1), ('two', 2)]).radio())
or::
>>> fs.configure(options=[fs.name.label('Username').readonly()])
"""
_null_option = (u'None', u'')
_valide_options = [
'validate', 'renderer', 'hidden', 'required', 'readonly',
'null_as', 'label', 'multiple', 'options', 'validators',
'size', 'instructions', 'metadata', 'html', 'attrs']
def __init__(self, parent, name=None, type=fatypes.String, **kwattrs):
# the FieldSet (or any ModelRenderer) owning this instance
self.parent = parent
# Renderer for this Field. this will
# be autoguessed, unless the user forces it with .dropdown,
# .checkbox, etc.
self._renderer = None
# other render options, such as size, multiple, etc.
self.render_opts = {}
# validator functions added with .validate()
self.validators = []
# errors found by _validate() (which runs implicit and
# explicit validators)
self.errors = []
self._readonly = False
# label to use for the rendered field. autoguessed if not specified by .label()
self.label_text = None
# optional attributes to pass to renderers
self.html_options = {}
# True iff this Field is a primary key
self.is_pk = False
# True iff this Field is a raw foreign key
self.is_raw_foreign_key = False
# Field metadata, for customization
self.metadata = {}
self.name = name
self.type = type
def __deepcopy__(self, memo):
wrapper = copy(self)
wrapper.render_opts = dict(self.render_opts)
wrapper.validators = list(self.validators)
wrapper.errors = list(self.errors)
wrapper._renderer = copy(self._renderer)
if hasattr(wrapper._renderer, 'field'):
wrapper._renderer.field = wrapper
return wrapper
@property
def requires_label(self):
return not isinstance(self.renderer, HiddenFieldRenderer)
def query(self, *args, **kwargs):
"""Perform a query in the parent's session"""
if self.parent.session:
session = self.parent.session
else:
session = object_session(self.model)
if session:
return session.query(*args, **kwargs)
raise Exception(("No session found. Either bind a session explicitly, "
"or specify relation options manually so FormAlchemy doesn't try to autoload them."))
def _validate(self):
if self.is_readonly():
return True
self.errors = []
try:
# Call renderer.deserialize(), because the deserializer can
# also raise a ValidationError
value = self._deserialize()
except validators.ValidationError as e:
self.errors.append(e.message)
return False
L = list(self.validators)
if self.is_required() and validators.required not in L:
L.append(validators.required)
for validator in L:
if value is None and not getattr(validator, 'accepts_none', False):
continue
try:
validator(value, self)
except validators.ValidationError as e:
self.errors.append(e.message)
except TypeError:
warnings.warn(DeprecationWarning('Please provide a field argument to your %r validator. Your validator will break in FA 1.5' % validator))
try:
validator(value)
except validators.ValidationError as e:
self.errors.append(e.message)
return not self.errors
def is_required(self):
"""True iff this Field must be given a non-empty value"""
return validators.required in self.validators
def is_readonly(self):
"""True iff this Field is in readonly mode"""
return self._readonly
@property
def model(self):
return self.parent.model
def _modified(self, **kwattrs):
# return a copy of self, with the given attributes modified
copied = deepcopy(self)
for attr, value in kwattrs.items():
setattr(copied, attr, value)
return copied
def set(self, **kwattrs):
"""
Sets different properties on the Field object. In contrast to the
other methods that tweak a Field, this one changes thing
IN-PLACE, without creating a new object and returning it.
This is the behavior for the other methods like ``readonly()``,
``required()``, ``with_html()``, ``with_metadata``,
``with_renderer()``, ``with_null_as()``, ``label()``,
``hidden()``, ``validate()``, etc...
Allowed attributes are:
* ``validate`` - append one single validator
* ``validators`` - appends a list of validators
* ``renderer`` - sets the renderer used (``.with_renderer(val)``
equiv.)
* ``hidden`` - marks a field as hidden (changes the renderer)
* ``required`` - adds the default 'required' validator to the field
* ``readonly`` - sets the readonly attribute (``.readonly(val)``
equiv.)
* ``null_as`` - sets the 'null_as' attribute (``.with_null_as(val)``
equiv.)
* ``label`` - sets the label (``.label(val)`` equiv.)
* ``multiple`` - marks the field as a multi-select (used by some
renderers)
* ``options`` - sets `.render_opts['options']` (for selects and similar
fields, used by some renderers)
* ``size`` - sets render_opts['size'] with this val (normally an
attribute to ``textarea()``, ``dropdown()``, used by some renderers)
* ``instructions`` - shortcut to update `metadata['instructions']`
* ``metadata`` - dictionary that `updates` the ``.metadata`` attribute
* ``html`` - dictionary that updates the ``.html_options`` attribute
(``.with_html()`` equiv.)
NOTE: everything in ``.render_opts``, updated with everything in
``.html_options`` will be passed as keyword arguments to the `render()`
function of the Renderer set for the field.
Example::
>>> field = Field('myfield')
>>> field.set(label='My field', renderer=SelectFieldRenderer,
... options=[('Value', 1)],
... validators=[lambda x: x, lambda y: y])
Field(myfield)
>>> field.label_text
'My field'
>>> field.renderer
<SelectFieldRenderer for Field(myfield)>
"""
mapping = dict(renderer='_renderer',
readonly='_readonly',
null_as='_null_option',
label='label_text')
for attr,value in kwattrs.items():
if attr == 'validate':
self.validators.append(value)
elif attr == 'validators':
self.validators.extend(value)
elif attr == 'metadata':
self.metadata.update(value)
elif attr == 'html':
self.html_options.update(value)
elif attr == 'instructions':
self.metadata['instructions'] = value
elif attr == 'required':
if value:
if validators.required not in self.validators:
self.validators.append(validators.required)
else:
if validators.required in self.validators:
self.validators.remove(validators.required)
elif attr == 'hidden':
if isinstance(self.type, fatypes.Date):
renderer = HiddenDateFieldRenderer
elif isinstance(self.type, fatypes.Time):
renderer = HiddenTimeFieldRenderer
elif isinstance(self.type, fatypes.DateTime):
renderer = HiddenDateTimeFieldRenderer
else:
renderer = HiddenFieldRenderer
self._renderer = renderer
elif attr == 'attrs':
self.render_opts.update(value)
elif attr in mapping:
attr = mapping.get(attr)
setattr(self, attr, value)
elif attr in ('multiple', 'options', 'size'):
if attr == 'options' and value is not None:
value = _normalized_options(value)
self.render_opts[attr] = value
else:
raise ValueError('Invalid argument %s' % attr)
return self
def with_null_as(self, option):
"""Render null as the given option tuple of text, value."""
return self._modified(_null_option=option)
def with_renderer(self, renderer):
"""
Return a copy of this Field, with a different renderer.
Used for one-off renderer changes; if you want to change the
renderer for all instances of a Field type, modify
FieldSet.default_renderers instead.
"""
return self._modified(_renderer=renderer)
def bind(self, parent):
"""Return a copy of this Field, bound to a different parent"""
return self._modified(parent=parent)
def with_metadata(self, **attrs):
"""Attach some metadata attributes to the Field, to be used by
conditions in templates.
Example usage:
>>> test = Field('test')
>>> field = test.with_metadata(instructions='use this widget this way')
...
And further in your templates you can verify:
>>> 'instructions' in field.metadata
True
and display the content in a <span> or something.
"""
new_attr = self.metadata.copy()
new_attr.update(attrs)
return self._modified(metadata=new_attr)
def validate(self, validator):
"""
Add the `validator` function to the list of validation
routines to run when the `FieldSet`'s `validate` method is
run. Validator functions take one parameter: the value to
validate. This value will have already been turned into the
appropriate data type for the given `Field` (string, int, float,
etc.). It should raise `ValidationError` if validation
fails with a message explaining the cause of failure.
"""
field = deepcopy(self)
field.validators.append(validator)
return field
def required(self):
"""
Convenience method for `validate(validators.required)`. By
default, NOT NULL columns are required. You can only add
required-ness, not remove it.
"""
return self.validate(validators.required)
def with_html(self, **html_options):
"""
Give some HTML options to renderer.
Trailing underscore (_) characters will be stripped. For example,
you might want to add a `class` attribute to your checkbox. You
would need to specify `.options(class_='someclass')`.
For WebHelpers-aware people: those parameters will be passed to
the `text_area()`, `password()`, `text()`, etc.. webhelpers.
NOTE: Those options can override generated attributes and can mess
the `sync` calls, or `label`-tag associations (if you change
`name`, or `id` for example). Use with caution.
"""
new_opts = copy(self.html_options)
for k, v in html_options.items():
new_opts[k.rstrip('_')] = v
return self._modified(html_options=new_opts)
def label(self, text=NoDefault):
"""Get or set the label for the field. If a value is provided then change
the label associated with this field. By default, the field name is
used, modified for readability (e.g., 'user_name' -> 'User name').
"""
if text is NoDefault:
if self.label_text is not None:
text = self.label_text
else:
text = self.parent.prettify(self.key)
if text:
F_ = get_translator(request=self.parent._request)
return h.escape_once(F_(text))
else:
return ''
return self._modified(label_text=text)
def label_tag(self, **html_options):
"""return the <label /> tag for the field."""
html_options.update(for_=self.renderer.name)
if 'class_' in html_options:
html_options['class_'] += self.is_required() and ' field_req' or ' field_opt'
else:
html_options['class_'] = self.is_required() and 'field_req' or 'field_opt'
return h.content_tag('label', self.label(), **html_options)
def attrs(self, **kwargs):
"""update ``render_opts``"""
self.render_opts.update(kwargs)
return self._modified(render_opts=self.render_opts)
def readonly(self, value=True):
"""
Render the field readonly.
By default, this marks a field to be rendered as read-only.
Setting the `value` argument to `False` marks the field as editable.
"""
return self._modified(_readonly=value)
def hidden(self):
"""Render the field hidden. (Value only, no label.)"""
if isinstance(self.type, fatypes.Date):
renderer = HiddenDateFieldRenderer
elif isinstance(self.type, fatypes.Time):
renderer = HiddenTimeFieldRenderer
elif isinstance(self.type, fatypes.DateTime):
renderer = HiddenDateTimeFieldRenderer
else:
renderer = HiddenFieldRenderer
return self._modified(_renderer=renderer, render_opts={})
def password(self):
"""Render the field as a password input, hiding its value."""
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['password']
field.render_opts = {}
return field
def textarea(self, size=None):
"""
Render the field as a textarea. Size must be a string
(`"25x10"`) or tuple (`25, 10`).
"""
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['textarea']
if size:
field.render_opts = {'size': size}
return field
def radio(self, options=None):
"""Render the field as a set of radio buttons."""
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['radio']
if options is None:
options = self.render_opts.get('options')
else:
options = _normalized_options(options)
field.render_opts = {'options': options}
return field
def checkbox(self, options=None):
"""Render the field as a set of checkboxes."""
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['checkbox']
if options is None:
options = self.render_opts.get('options')
else:
options = _normalized_options(options)
field.render_opts = {'options': options}
return field
def dropdown(self, options=None, multiple=False, size=5):
"""
Render the field as an HTML select field.
(With the `multiple` option this is not really a 'dropdown'.)
"""
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['dropdown']
if options is None:
options = self.render_opts.get('options')
else:
options = _normalized_options(options)
field.render_opts = {'multiple': multiple, 'options': options}
if multiple:
field.render_opts['size'] = size
return field
def reset(self):
"""
Return the field with all configuration changes reverted.
"""
return deepcopy(self.parent._fields[self.name])
#==========================================================================
# HTML5 specific input types
#==========================================================================
def date(self):
'''
Render the field as a HTML5 date input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['date']
return field
def datetime(self):
'''
Render the field as a HTML5 datetime input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['datetime']
return field
def datetime_local(self):
'''
Render the field as a HTML5 datetime-local input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['date']
return field
def month(self):
'''
Render the field as a HTML5 month input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['month']
return field
def week(self):
'''
Render the field as a HTML5 week input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['week']
return field
def time(self):
'''
Render the field as a HTML5 time input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['time']
return field
def color(self):
'''
Render the field as a HTML5 color input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['color']
return field
def range(self, min_=None, max_=None, step=None, value=None):
'''
Render the field as a HTML5 range input type, starting at `min_`,
ending at `max_`, with legal increments every `step` distance. The
default is set by `value`.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['range']
field.render_opts = {}
if min_:
field.render_opts["min"] = min_
if max_:
field.render_opts["max"] = max_
if step:
field.render_opts["step"] = step
if value:
field.render_opts["value"] = value
return field
def number(self, min_=None, max_=None, step=None, value=None):
'''
Render the field as a HTML5 number input type, starting at `min_`,
ending at `max_`, with legal increments every `step` distance. The
default is set by `value`.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['number']
field.render_opts = {}
if min_:
field.render_opts["min"] = min_
if max_:
field.render_opts["max"] = max_
if step:
field.render_opts["step"] = step
if value:
field.render_opts["value"] = value
return field
def url(self):
'''
Render the field as a HTML5 url input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['url']
return field
def email(self):
'''
Render the field as a HTML5 email input type.
'''
field = deepcopy(self)
field._renderer = lambda f: f.parent.default_renderers['email']
return field
def _get_renderer(self):
for t in self.parent.default_renderers:
if not isinstance(t, string_types) and type(self.type) is t:
return self.parent.default_renderers[t]
for t in self.parent.default_renderers:
if not isinstance(t, string_types) and isinstance(self.type, t):
return self.parent.default_renderers[t]
raise TypeError(
'No renderer found for field %s. '
'Type %s has no default renderer' % (self.name, self.type))
@property
def renderer(self):
if self._renderer is None:
self._renderer = self._get_renderer()
try:
self._renderer = self._renderer(self)
except TypeError:
pass
if not isinstance(self._renderer, FieldRenderer):
# must be a Renderer class. instantiate.
self._renderer = self._renderer(self)
return self._renderer
def _get_render_opts(self):
"""
Calculate the final options dict to be sent to renderers.
"""
# Use options from internally set render_opts
opts = dict(self.render_opts)
# Override with user-specified options (with .with_html())
opts.update(self.html_options)
return opts
def render(self):
"""
Render this Field as HTML.
"""
if self.is_readonly():
return self.render_readonly()
opts = self._get_render_opts()
if (isinstance(self.type, fatypes.Boolean)
and not opts.get('options')
and self.renderer.__class__ in [self.parent.default_renderers['dropdown'], self.parent.default_renderers['radio']]):
opts['options'] = [('Yes', True), ('No', False)]
return self.renderer.render(**opts)
def render_readonly(self):
"""
Render this Field as HTML for read only mode.
"""
return self.renderer.render_readonly(**self._get_render_opts())
def _pkify(self, value):
"""return the PK for value, if applicable"""
return value
@property
def value(self):
"""
The value of this Field: use the corresponding value in the bound `data`,
if any; otherwise, use the value in the bound `model`. For SQLAlchemy models,
if there is still no value, use the default defined on the corresponding `Column`.
For SQLAlchemy collections,
a list of the primary key values of the items in the collection is returned.
Invalid form data will cause an error to be raised. Controllers should thus validate first.
Renderers should thus never access .value; use .model_value instead.
"""
# TODO add ._validated flag to save users from themselves?
if not self.is_readonly() and self.parent.data is not None:
v = self._deserialize()
if v is not None:
return self._pkify(v)
return self.model_value
@property
def model_value(self):
"""
raw value from model, transformed if necessary for use as a form input value.
"""
raise NotImplementedError()
@property
def raw_value(self):
"""
raw value from model. different from `.model_value` in SQLAlchemy fields, because for reference types,
`.model_value` will return the foreign key ID. This will return the actual object
referenced instead.
"""
raise NotImplementedError()
def _deserialize(self):
return self.renderer.deserialize()
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,self.name)
class Field(AbstractField):
"""
A manually-added form field
"""
def __init__(self, name=None, type=fatypes.String, value=None, **kwattrs):
"""
Create a new Field object.
- `name`:
field name
- `type=types.String`:
data type, from formalchemy.types (Integer, Float, String,
LargeBinary, Boolean, Date, DateTime, Time) or a custom type
- `value=None`:
default value. If value is a callable, it will be passed the current
bound model instance when the value is read. This allows creating a
Field whose value depends on the model once, then binding different
instances to it later.
* `name`: field name
* `type`: data type, from formalchemy.types (Boolean, Integer, String, etc.),
or a custom type for which you have added a renderer.
* `value`: default value. If value is a callable, it will be passed
the current bound model instance when the value is read. This allows
creating a Field whose value depends on the model once, then
binding different instances to it later.
"""
AbstractField.__init__(self, None) # parent will be set by ModelRenderer.add
self.type = type()
self.name = self.key = name
self._value = value
self.is_relation = False
self.is_scalar_relation = False
self.set(**kwattrs)
def set(self, **kwattrs):
if 'value' in kwattrs:
self._value = kwattrs.pop('value')
return AbstractField.set(self, **kwattrs)
@property
def model_value(self):
return self.raw_value
@property
def is_collection(self):
if isinstance(self.type, (fatypes.List, fatypes.Set)):
return True
return self.render_opts.get('multiple', False) or isinstance(self.renderer, self.parent.default_renderers['checkbox'])
@property
def raw_value(self):
try:
# this is NOT the same as getattr -- getattr will return the class's
# value for the attribute name, which for a manually added Field will
# be the Field object. So force looking in the instance __dict__ only.
return self.model.__dict__[self.name]
except (KeyError, AttributeError):
pass
if callable(self._value):
return self._value(self.model)
return self._value
def sync(self):
"""Set the attribute's value in `model` to the value given in `data`"""
if not self.is_readonly():
self._value = self._deserialize()
def __unicode__(self):
return self.render_readonly()
__str__ = __unicode__
def __eq__(self, other):
# we override eq so that when we configure with options=[...], we can match the renders in options
# with the ones that were generated at FieldSet creation time
try:
return self.name == other.name and _model_equal(self.model, other.model)
except (AttributeError, ValueError):
return False
def __hash__(self):
return hash(self.name)
class AttributeField(AbstractField):
"""
Field corresponding to an SQLAlchemy attribute.
"""
def __init__(self, instrumented_attribute, parent):
"""
>>> from formalchemy.tests import FieldSet, Order
>>> fs = FieldSet(Order)
>>> print(fs.user.key)
user
>>> print(fs.user.name)
user_id
"""
AbstractField.__init__(self, parent)
# we rip out just the parts we care about from InstrumentedAttribute.
# impl is the AttributeImpl. So far all we care about there is ".key,"
# which is the name of the attribute in the mapped class.
self._impl = instrumented_attribute.impl
# property is the PropertyLoader which handles all the interesting stuff.
# mapper, columns, and foreign keys are all located there.
self._property = instrumented_attribute.property
# True iff this is a multi-valued (one-to-many or many-to-many) SA relation
self.is_collection = isinstance(self._impl, CollectionAttributeImpl)
# True iff this is the 'one' end of a one-to-many relation
self.is_scalar_relation = isinstance(self._impl, ScalarObjectAttributeImpl)
# True iff this field represents a mapped SA relation
self.is_relation = self.is_scalar_relation or self.is_collection
self.is_composite = isinstance(self._property, CompositeProperty)
_columns = self._columns
self.is_pk = bool([c for c in self._columns if c.primary_key])
self.is_raw_foreign_key = bool(isinstance(self._property, ColumnProperty) and _foreign_keys(self._property.columns[0]))
self.is_composite_foreign_key = len(_columns) > 1 and not [c for c in _columns if not _foreign_keys(c)]
if self.is_composite:
# this is a little confusing -- we need to return an _instance_ of
# the correct type, which for composite values will be the value
# itself. SA should probably have called .type something
# different, or just not instantiated them...
self.type = self._property.composite_class.__new__(self._property.composite_class)
else:
# Test whether this is a multi-column foreign key, or a
# joined-inheritance table. In the latter case it doesn't
# matter which key we pick, as they're natural-joined anyway.
#
# Using names here is a hack.
# Also, joined inheritance needs test cases.
if len(_columns) > 1:
names = set()
for c in _columns:
names.add(c.key)
else:
names = (1,)
if len(names) > 1:
self.type = None
else:
self.type = _columns[0].type
self.key = self._impl.key
self._column_name = '_'.join([c.name for c in _columns])
# The name of the form input. usually the same as the key, except for
# single-valued SA relation properties. For example, for order.user,
# name will be 'user_id' (assuming that is indeed the name of the foreign
# key to users), but for user.orders, name will be 'orders'.
if self.is_collection or self.is_composite or not hasattr(self.model, self._column_name):
self.name = self.key
else:
self.name = self._column_name
# smarter default "required" value
if not self.is_collection and not self.is_readonly() and [c for c in _columns if not c.nullable]:
self.validators.append(validators.required)
info = dict([(str(k), v) for k, v in self.info.items() if k in self._valide_options])
if self.is_relation and 'label' not in info:
m = self._property.mapper.class_
label = getattr(m, '__label__', None)
if self._property.direction in (MANYTOMANY, ONETOMANY):
label = getattr(m, '__plural__', label)
if label:
info['label'] = label
self.set(**info)
@property
def info(self):
"""return the best information from SA's Column.info"""
info = None
if self.is_relation:
pairs = self._property.local_remote_pairs
if len(pairs):
for pair in reversed(pairs):
for col in pair:
if col.table in self._property.parent.tables and not col.primary_key:
return getattr(col, 'info', None)
elif col.table in self._property.mapper.tables:
if col.primary_key:
if self._property.direction == MANYTOMANY:
return getattr(col, 'info', None)
else:
parent_info = getattr(col, 'info', {})
info = {}
for k, v in parent_info.items():
if k.startswith('backref_'):
info[k[8:]] = v
return info
else:
try:
col = getattr(self.model.__table__.c, self.key)
except AttributeError:
return {}
else:
return getattr(col, 'info', None)
return {}
def is_readonly(self):
from sqlalchemy.sql.expression import _Label
return AbstractField.is_readonly(self) or isinstance(self._columns[0], _Label)
@property
def _columns(self):
if self.is_scalar_relation:
# If the attribute is a foreign key, return the Column that this
# attribute is mapped from -- e.g., .user -> .user_id.
return _foreign_keys(self._property)
elif isinstance(self._impl, ScalarAttributeImpl) or self._impl.__class__.__name__ in ('ProxyImpl', '_ProxyImpl'): # 0.4 compatibility: ProxyImpl is a one-off class for each synonym, can't import it
# normal property, mapped to a single column from the main table
prop = getattr(self._property, '_proxied_property', None)
if prop is None:
prop = self._property
try:
return tuple(prop.local_columns)
# it's a set, we want something indexable
except AttributeError: # compatibility for SQLAlchemy < 0.9
return prop.columns
else:
# collection -- use the mapped class's PK
assert self.is_collection, self._impl.__class__
return self._property.mapper.primary_key
def relation_type(self):
"""
The type of object in the collection (e.g., `User`).
Calling this is only valid when `is_relation` is True.
"""
return self._property.mapper.class_
def _pkify(self, value):
"""return the PK for value, if applicable"""
if value is None:
return None
if self.is_collection:
return [_pk(item) for item in value]
if self.is_relation:
return _pk(value)
return value
@property
def model_value(self):
return self._pkify(self.raw_value)
@property
def raw_value(self):
if self.is_scalar_relation:
v = getattr(self.model, self.key)
else:
try:
v = getattr(self.model, self.name)
except AttributeError:
v = getattr(self.model, self.key)
if v is not None:
return v
_columns = self._columns
if len(_columns) == 1 and _columns[0].default:
try:
from sqlalchemy.sql.expression import Function
except ImportError:
from sqlalchemy.sql.expression import _Function as Function
arg = _columns[0].default.arg
if callable(arg) or isinstance(arg, Function):
# callables often depend on the current time, e.g. datetime.now or the equivalent SQL function.
# these are meant to be the value *at insertion time*, so it's not strictly correct to
# generate a value at form-edit time.
pass
else:
return arg
return None
def sync(self):
"""Set the attribute's value in `model` to the value given in `data`"""
if not self.is_readonly():
setattr(self.model, self.name, self._deserialize())
def __eq__(self, other):
# we override eq so that when we configure with options=[...], we can match the renders in options
# with the ones that were generated at FieldSet creation time
try:
return self._impl is other._impl and _model_equal(self.model, other.model)
except (AttributeError, ValueError):
return False
def __hash__(self):
return hash(self._impl)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,self.key)
def render(self):
if self.is_readonly():
return self.render_readonly()
if self.is_relation and self.render_opts.get('options') is None:
if self.is_required() or self.is_collection:
self.render_opts['options'] = []
else:
self.render_opts['options'] = [self._null_option]
# todo 2.0 this does not handle primaryjoin (/secondaryjoin) alternate join conditions
q = self.query(self.relation_type())
order_by = self._property.order_by
if order_by:
if not isinstance(order_by, list):
order_by = [order_by]
q = q.order_by(*order_by)
self.render_opts['options'] += _query_options(q)
logger.debug('options for %s are %s' % (self.name, self.render_opts['options']))
if self.is_collection and isinstance(self.renderer, self.parent.default_renderers['dropdown']):
self.render_opts['multiple'] = True
if 'size' not in self.render_opts:
self.render_opts['size'] = 5
return AbstractField.render(self)
def _get_renderer(self):
if self.is_relation:
return self.parent.default_renderers['dropdown']
return AbstractField._get_renderer(self)
def _deserialize(self):
# for multicolumn keys, we turn the string into python via _simple_eval; otherwise,
# the key is just the raw deserialized value (which is already an int, etc., as necessary)
if len(self._columns) > 1:
python_pk = _simple_eval
else:
python_pk = lambda st: st
if self.is_collection:
return [self.query(self.relation_type()).get(python_pk(pk)) for pk in self.renderer.deserialize()]
if self.is_composite_foreign_key:
return self.query(self.relation_type()).get(python_pk(self.renderer.deserialize()))
return self.renderer.deserialize() | PypiClean |
/Gbtestapi0.3-0.1a10-py3-none-any.whl/gailbot/services/organizer/settings/settingManager.py | from typing import Dict, Union, List
import os
from .objects import SettingDict, SettingObject, PluginSuiteSetObj, EngineSetObj
from gailbot.core.utils.general import (
is_file,
is_directory,
read_toml,
get_name,
make_dir,
delete,
filepaths_in_dir,
)
from gailbot.core.utils.logger import makelogger
logger = makelogger("setting_manager")
class ExistingSettingName(Exception):
def __init__(self, name: str, *args: object) -> None:
super().__init__(*args)
self.name = name
def __str__(self) -> str:
return f"the setting name {self.name} already exist"
class SettingManager:
"""
Manages all available settings
"""
profiles: Dict[str, SettingObject] = dict()
engine_settings: Dict[str, EngineSetObj] = dict()
def __init__(self, workspace: str, load_exist: bool = True) -> None:
"""constructing the setting manager
Args:
workspace (str): the path to the directory stores all the
setting files
load_exist (bool, optional): if true , load existing setting in
workspace. Defaults to True.
"""
self.workspace = workspace
self.engine_set_space = os.path.join(workspace, "engine_setting")
self.default_setting = None
self.default_engine_setting = None
if not is_directory(self.workspace):
make_dir(self.workspace)
if not is_directory(self.engine_set_space):
make_dir(self.engine_set_space)
if load_exist:
engine_files = filepaths_in_dir(self.engine_set_space, ["toml"])
for file in engine_files:
self.load_set_from_file(file, self.add_new_engine, overwrite=True)
setting_files = filepaths_in_dir(self.workspace, ["toml"])
for file in setting_files:
self.load_set_from_file(file, self.add_new_setting, overwrite=True)
def load_set_from_file(self, file_path, addfun, overwrite: bool = False) -> bool:
"""load the setting from local file
Args:
file_path (str): the file path
overwrite (bool, optional): if true, the loaded
file will overwrite existing setting with same name. Defaults to False.
Returns:
bool: return true if the loading is successful, false if the file
cannot be loaded
"""
if is_file(file_path):
data = read_toml(file_path)
try:
name = get_name(file_path)
data = read_toml(file_path)
return addfun(name, data, overwrite)
except Exception as e:
logger.error(e, exc_info=e)
return False
#####################################################################
# Functions for managing engine setting #
#####################################################################
def get_engine_setting_names(self) -> List[str]:
"""return a list of available engine setting name
Returns:
List[str]: a list of engine setting names
"""
return list(self.engine_settings.keys())
def add_new_engine(self, name, engine: Dict[str, str], overwrite: bool = False):
"""add a new engine setting
Args:
name (str): the name of the engine setting
engine (Dict[str, str]): the data of the engine setting,
one required field is the type of the
engine
overwrite (bool, optional): if True, overwrite the existing engine s
etting with the same name. Defaults to False.
Raises:
ExistingSettingName: if the engine setting name has been taken, and overwrite is set to False
Returns:
bool: return true if the setting is successfully added, false otherwise
"""
if self.is_engine_setting(name) and (not overwrite):
raise ExistingSettingName(name)
try:
setting: EngineSetObj = EngineSetObj(engine, name)
assert setting.engine_setting
self.engine_settings[name] = setting
self.save_engine_setting(name)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def remove_engine_setting(self, name):
"""remove the engine setting from the disk
Args:
name (str): the name of the engine setting
Returns:
bool: return true if the engine setting is removed successfully
"""
try:
assert self.is_engine_setting(name)
assert not self.engine_settings[name].is_in_use()
del self.engine_settings[name]
if is_file(self.get_engine_src_path(name)):
delete(self.get_engine_src_path(name))
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def is_engine_setting_in_use(self, name) -> bool:
"""check if the engine setting is in use
Args:
name (str): the name of the engine setting
"""
return self.engine_settings[name].is_in_use()
def is_engine_setting(self, name):
"""check if the given setting is engine setting
Args:
name (str): the name that identify the engine setting
Returns:
bool: true if the setting is engine setting false otherwise
"""
return name in self.engine_settings
def save_engine_setting(self, name: str) -> Union[bool, str]:
"""save the setting as a local file
Args:
name (str): the setting name
Returns:
Union[bool, str]: return the saved file path if the setting is
saved successfully, return false otherwise
"""
try:
out_path = self.get_engine_src_path(name)
if is_file(out_path):
delete(out_path)
self.engine_settings[name].save_setting(out_path)
return out_path
except Exception as e:
logger.error(e, exc_info=e)
return False
def update_engine_setting(self, name: str, setting_data: Dict[str, str]) -> bool:
"""
update the engine setting
Args:
name(str)
setting_data(Dict[str, str])
Returns:
bool
"""
if self.is_engine_setting(name):
try:
engine_setting = self.engine_settings[name]
assert engine_setting.update_setting(setting_data)
assert self.save_engine_setting(name)
for profile in engine_setting.applied_in_profiles:
## update the engine setting on the disk
self.save_setting(profile)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def get_engine_src_path(self, name: str) -> str:
"""given a engine setting name, return its path
Args:
name (str): the engine setting name
Returns:
str: a path to store the setting file
Note:
This is a function to form a path to the local setting file
in a unified format, the path does not guaranteed to indicate
an existing setting file
"""
return os.path.join(self.engine_set_space, name + ".toml")
def get_engine_setting_data(self, name: str) -> Union[bool, Dict[str, str]]:
"""get the setting data of the engine setting
Args:
name (str): the name of the engine
Returns:
Union[bool, Dict[str, str]]: return the dictionary that stores the
the engine data if the data engine
name is a valid engine in the setting
manager, else return false
"""
if self.is_engine_setting(name):
return self.engine_settings[name].get_setting_dict()
else:
return False
def _get_profile_engine(self, profile_name: str) -> EngineSetObj:
"""return the engine used in the profile identifies by profile name
Args:
profile_name (str): the name of the profile to be queried
Returns:
EngineSetObj: the engine object
"""
profile_obj = self.profiles[profile_name]
engine_obj = self.engine_settings[profile_obj.engine_setting_name]
return engine_obj
def set_to_default_engine_setting(self, setting_name: str) -> bool:
"""set one setting to be the default setting
Args:
name (str): the name of the setting
Returns:
bool: return true if the default setting can be set,
false otherwise
"""
if setting_name in self.profiles:
self.default_engine_setting = setting_name
return True
else:
return False
def get_default_engine_setting_name(self) -> str:
"""return the name of the default engine
Returns:
str: _description_
"""
return self.default_engine_setting
#####################################################################
# Functions for managing profile setting #
#####################################################################
def get_setting_names(self) -> List[str]:
"""return a list of available setting names
Returns:
List[str]: a list of setting names
"""
return list(self.profiles.keys())
def remove_setting(self, name: str) -> bool:
"""given the setting name, remove the setting and the local
setting file
Args:
name (str): the name that identify the setting
Returns:
bool: return true if the removal is successful, false if
the setting does not exist
"""
if self.is_setting(name):
settingObj = self.profiles.pop(name)
self.engine_settings[settingObj.engine_setting_name].remove_applied_profile(
name
)
if is_file(self.get_profile_src_path(name)):
delete(self.get_profile_src_path(name))
return True
else:
return False
def get_setting(self, name: str) -> Union[SettingObject, bool]:
"""given the setting name, return the corresponding setting
Args:
name (str): a name that identifies the setting
Returns:
Union [SettingObject, bool]: return the setting object if the
setting is found, return false if the setting does not exist
"""
if self.is_setting(name):
return self.profiles[name]
else:
return False
def add_new_setting(
self, name: str, data: SettingDict, overwrite: bool = False
) -> Union[bool, str]:
"""add a new setting
Args:
name (str): the setting name that identifies the setting
setting (Dict[str, str]): a dictionary that stores the setting
overwrite (bool, optional): if true, the given setting will overwrite
an existing setting if a setting with the same name exist.
Defaults to False.
Returns:
Union[bool, str]: True if the setting creates successfully,
False otherwise
Raises:
ExistingSettingName: raised when the setting name already exist
and the overwrite option is set to false
"""
logger.info(f"get engine {data}")
if self.is_setting(name):
if overwrite:
self.remove_setting(name)
else:
raise ExistingSettingName(name)
try:
engine_set_name = data["engine_setting_name"]
engine_obj = self.engine_settings[engine_set_name]
plugin_obj = PluginSuiteSetObj(data["plugin_setting"])
setting: SettingObject = SettingObject(
engine_setting=engine_obj,
engine_setting_name=engine_set_name,
plugin_setting=plugin_obj,
name=name,
)
self.engine_settings[engine_set_name].add_applied_profile(name)
assert setting and setting.engine_setting
self.profiles[name] = setting
self.save_setting(name)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def is_setting(self, name: str) -> bool:
"""tell if a setting exists in the setting manager
Args:
name (str): the setting name
Returns:
bool: return true if the given name is an existing setting, false
otherwise
"""
return name in self.profiles
def update_setting(self, name: str, setting_data: SettingDict) -> bool:
"""update the setting
Args:
name (str): setting name
setting_data (Dict[str, str]): the updated setting content
Returns:
bool: return true if the setting is updated, false if the
setting does not exist or the new setting dictionary
cannot be validated
"""
if self.is_setting(name):
try:
profile_setting = self.profiles[name]
orig_engine = profile_setting.engine_setting.name
engine_set_name = setting_data["engine_setting_name"]
engine_obj = self.engine_settings[engine_set_name]
plugin_obj = PluginSuiteSetObj(setting_data["plugin_setting"])
assert profile_setting.update_setting(
engine_setting=engine_obj, plugin_setting=plugin_obj
)
assert self.save_setting(name)
return True
except Exception as e:
logger.error(e, exc_info=e)
else:
return False
def rename_setting(self, name: str, new_name: str) -> bool:
"""rename a setting
Args:
name (str): the original name of the setting
new_name (str): the new name of the setting
Returns:
bool: return true if the setting can be renamed, false
if the setting does not exist or if the new_name
has been taken by other existing setting
"""
if self.is_setting(name):
if self.is_setting(new_name):
logger.error(f"new name{ new_name} has been taken")
return False
temp = self.profiles.pop(name)
engine_applied = self._get_profile_engine(name)
engine_applied.remove_applied_profile(name)
temp.name = new_name
engine_applied.add_applied_profile(new_name)
self.profiles[new_name] = temp
self.save_setting(new_name)
if is_file(self.get_profile_src_path(name)):
delete(self.get_profile_src_path(name))
logger.info("update_setting")
return self.profiles[new_name] != None
else:
logger.error("the setting is not found")
return False
def save_setting(self, name: str) -> Union[bool, str]:
"""save the setting as a local file
Args:
name (str): the setting name
Returns:
Union[bool, str]: return the saved file path if the setting
is saved successfully, return false otherwise
"""
try:
out_path = self.get_profile_src_path(name)
if is_file(out_path):
delete(out_path)
self.profiles[name].save_setting(out_path)
return out_path
except Exception as e:
logger.error(e, exc_info=e)
return False
def get_setting_dict(self, setting_name: str) -> Union[bool, SettingDict]:
"""return the setting data as a dictionary
Args:
setting_name (str): the name that identifies the setting
Returns:
Union[bool, SettingDict]: if the setting exists, return the setting
data, else return false
"""
if setting_name in self.profiles:
return self.profiles[setting_name].get_data()
else:
return False
def get_profile_src_path(self, name: str) -> str:
"""given a setting name, return its path
Args:
name (str): the setting name
Returns:
str: a path to store the setting file
Note:
This is a function to form a path to the local setting file
in a unified format, the path does not guaranteed to indicate
an existing setting file
"""
return os.path.join(self.workspace, name + ".toml")
def delete_all_settings(self) -> bool:
"""delete all settings
Returns:
bool: True if the deletion is successful, false if not
"""
try:
for setting in self.get_setting_names():
if setting != "default":
self.remove_setting(setting)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def get_all_settings_data(self) -> Dict[str, SettingDict]:
"""
return a dictionary that stores all available setting data
"""
setting_dict = dict()
for key, setting_object in self.profiles.items():
setting_dict[key] = setting_object.data
logger.info(f"setting data {setting_dict}")
return setting_dict
def set_to_default_setting(self, setting_name: str) -> bool:
"""set one setting to be the default setting
Args:
name (str): the name of the setting
Returns:
bool: return true if the default setting can be set,
false otherwise
"""
if setting_name in self.profiles:
self.default_setting = setting_name
return True
else:
return False
def get_default_profile_setting_name(self) -> str:
"""get the default setting name
Returns:
str: the default setting
"""
return self.default_setting
#####################################################################
# function for managing plugin setting #
#####################################################################
def is_suite_in_use(self, suite_name: str) -> bool:
"""given a suite_name, check if this suite is used
in any of the setting
Args:
suite_name (str): the name of the plugin suite
Returns:
bool: return true if the suite is used in any of the setting,
false otherwise
"""
for setting_obj in self.profiles.values():
if suite_name in setting_obj.get_plugin_setting():
return True
return False | PypiClean |
/Augmently-1.0.9.tar.gz/Augmently-1.0.9/README.md | # 🖼️💥 Augmently :
An Open Source library for Data Augmentation for image classification.\
With Flips, Square crops and resiszing, and Salt and Pepper Noise.\
# 📁Note On Folder Format:
Currently the library works only for image grouped each in their own class folder.
#### For example:
──My_animal_images_folder
├── Dogs
│ ├── dog_img_1.jpg
│ ├── dog_img_2.jpg
│ ├── ...
│ └── dog_img_n.jpeg
├── Cats
├── ...
└── Elephants
# ⭐Start Using it:
Download the Augmently folder and place it in your project folder.
COPY PASTE the code below:
from Augmently.Augmently import create_resized_cropped_square_class_images, create_salt_and_pepper_class_images, create_salt_and_pepper_class_images
# 🤖 The Functions:
## 🔲 Crop to Square Size - create_resized_cropped_square_class_images()
### What it does:
Creates a new folder with your images both resized and cropped to the square image length of your choice
### Arguments:
image_data_path (String) , output_path (string), square_length (Number)
### Example Usage:
create_resized_cropped_square_class_images_in_new_folder("Desktop/image_folder", "Desktop/image_folder_square_resized_224pixels", 224 )
## 🧂Salt and Pepper Noise - create_salt_and_pepper_class_images()
### What it does:
Creates a new folder with your images with your desired amount of salt and pepper noise pixels added to your images
### Arguments:
image_data_path (String) , output_path (String) , noise_amount (Number)
### Example Usage:
create_salt_and_pepper_class_images_in_new_folder("Desktop/image_folder", "Desktop/image_folder_salt_pepper_0.05", 0.05 )
## ↔️ Flip images - create_flipped_class_images()
### What it does:
Creates a new folder with your images flipped
### Arguments:
image_data_path (String) , output_path (string)
### Example Usage:
create_flipped_class_images_in_new_folder("Desktop/image_folder", "Desktop/image_folder_flipped")
# 🔜 In Progress:
## 🔄 Square Cropped Rotations - create_rotated_class_images()
### What it does:
Creates a new folder with your images with your desired amount of maximum rotation to your images
### Arguments:
image_data_path (String) , output_path (String) , max_rotation (Number in degrees)
### Example Usage:
create_rotated_images_in_new_folder("Desktop/image_folder", "Desktop/image_folder_rotated_360", 360 )
## 🌈 Gaussian Noise - create_gaussian_noise_class_images()
### What it does:
Creates a new folder with your images with your desired amount of gaussian noise to your images
### Arguments:
image_data_path (String) , output_path (String) , amount_noise (Number)
### Example Usage:
create_gaussian_noise_images_in_new_folder("Desktop/image_folder", "Desktop/image_gaussian_0.05", 0.05 )
## ⬛ Add Occluding Black Squares - create_occluding_squares_class_images()
### What it does:
Creates a new folder with your images with your desired max size of occluding black squares to your images
### Arguments:
image_data_path (String) , output_path (String) , max_square_len (Number in px)
### Example Usage:
create_occluding_square_images_in_new_folder("Desktop/image_folder", "Desktop/image_black_square_20", 20 )
## 🔍 Random Resized Zooms - create_zoom_class_images()
### What it does:
Creates a new folder with your images with your desired max zoom (resized to original len) to your images
### Arguments:
image_data_path (String) , output_path (String) , max_zoom_square_len (Number in px)
### Example Usage:
create_zoomed_images_in_new_folder("Desktop/image_folder", "Desktop/image_zoom_120", 120 )
| PypiClean |
/Aoss_Tower_a1_Conversion-1.7.0.tar.gz/Aoss_Tower_a1_Conversion-1.7.0/convertFromASCIIToNETCDF/Util.py | from datetime import datetime
from datetime import timedelta
"""
This class takes dates and generates paths based upon those
date time objects.
"""
class Util(object):
# the purpose of this function is to return
# the filename that the parser is going to use
# based upon a datetime object
# @param datetime object
#
# @return filename
def FILENAME(self, date):
month = date.month
if(month < 10):
month = "0" + (str)(month)
else:
month = (str)(month)
day = date.day
if(day < 10):
day = "0" + (str)(day)
else:
day = (str)(day)
#get total date
totalDate = (str)(date.year) + "-" + month + "-" + day
#returns file name
return '/mnt/inst-data/aoss-tower/' + (str)(date.year) + '/' + month + '/rig_tower.' + totalDate + '.ascii'
# create path based on the date
# @return filepath
# @param date
def destinationPath(self, date):
year = str(date.year)
if date.month < 10:
month = "0" + str(date.month)
else:
month = str(date.month)
if(date.day < 10):
day = "0" + str(date.day)
else:
day = str(date.day)
#all file paths start with
startofPath = "/data3/kgao/testAll15/"
#next part of path is year + year-month
eOPath = year + "/" + year + "-" + month + "/"
return startofPath + eOPath
# create netCDF4 file name
# @return file name
# @param date
def ncFileName(self, date):
year = str(date.year)
if date.month < 10:
month = "0" + str(date.month)
else:
month = str(date.month)
if(date.day < 10):
day = "0" + str(date.day)
else:
day = str(date.day)
#create netCDF name
netCDFName = "rig-tower." + year + "-" + month + "-" + day + ".nc"
#returns newly created name
return netCDFName
# altitude value is not exact
# return altitude value
# @return altitude value
# no parameters
def ALTITUDE(self):
return 328
# create a date format from datetime
# @param datetime obj
# @return YYYY-MM-DD
def dateFormat(self, date):
year = str(date.year)
if date.month < 10:
month = "0" + str(date.month)
else:
month = str(date.month)
if(date.day < 10):
day = "0" + str(date.day)
else:
day = str(date.day)
#return YYYY-MM-DD
return year + "-" + month + "-" + day
# The purpose of this function is to generate yesterday's datetime
# obj.
# no parameters
# @return yesterday's datetime object
def getYesterdaysDTobj(self):
#time difference of 1 day
td = timedelta(1)
return datetime.today() - td | PypiClean |
/Findig-0.1.0-py3-none-any.whl/findig/tools/dataset.py | from abc import ABCMeta, abstractmethod
from collections.abc import Callable, Iterable, Mapping, MutableMapping
from contextlib import contextmanager
from itertools import islice
from werkzeug.utils import cached_property
from findig.context import ctx
from findig.utils import extremum
class AbstractDataSet(Iterable, metaclass=ABCMeta):
"""
An abstract data set is a representation of a collection of items.
Concrete implementations must provide *at least* an implementation
for ``__iter__``, which should return an iterator of
:class:`AbstractRecord` instances.
"""
def __str__(self):
return "[{}]".format(
", ".join(str(item) for item in self)
)
def fetch(self, **search_spec):
"""
Fetch an :class:`AbstractRecord` matching the search specification.
If this is called outside a request, a lazy record is returned
immediately (i.e., the backend isn't hit until the record is
explicitly queried).
"""
if hasattr(ctx, 'request') and ctx.request.method.lower() in ('get', 'head'):
# We're inside a GET request, so we can immediately grab a
# record and return it
return self.fetch_now(**search_spec)
else:
# We're not inside a request; we don't wan't to hit the
# database searching for the record unless the record is
# explicitly accessed.
cls = LazyMutableRecord \
if isinstance(self, MutableDataSet) \
else LazyRecord
return cls(lambda: self.fetch_now(**search_spec))
def fetch_now(self, **search_spec):
"""
Fetch an :class:`AbstractRecord` matching the search specification.
Unlike :meth:`fetch`, this function will always hit the backend.
"""
for record in self:
if FilteredDataSet.check_match(record, search_spec):
return record
else:
raise LookupError("No matching item found.")
def filtered(self, **search_spec):
"""
Return a filtered view of this data set.
Each keyword represents the name of a field that is checked, and
the corresponding argument indicates what it is checked against. If
the argument is :class:`~collections.abc.Callable`, then it should
be a predicate that returns ``True`` if the field is valid (be aware
that the predicate will passed be ``None`` if the field isn't
present on the record), otherwise it is compared against the field
for equality.
"""
return FilteredDataSet(self, **search_spec)
def limit(self, count, offset=0):
"""
Return a limited version of this data set.
:param offset: The number of items to skip from the beginning
:param count: The maximum number of items to return
"""
return DataSetSlice(self, offset, offset+count)
def sorted(self, *sort_spec, descending=False):
"""
Return a sorted view of this data set.
The method takes a variable number of arguments that specify its
sort specification.
If a single, callable argument is provided, it is taken as a
sort key for a record.
Otherwise, the arguments are taken as field names to be sorted,
in the same order given in the argument list. Records that omit
one of these fields appear later in the sorted set than
those that don't.
"""
return OrderedDataSet(self, *sort_spec, descending=descending)
class MutableDataSet(AbstractDataSet, metaclass=ABCMeta):
"""
An abstract data set that can add new child elements.
"""
@abstractmethod
def add(self, data):
"""Add a new child item to the data set."""
class AbstractRecord(Mapping, metaclass=ABCMeta):
"""
An representation of an item belonging to a collection.
"""
def __iter__(self):
yield from self.cached_data
def __len__(self):
return len(self.cached_data)
def __getitem__(self, key):
return self.cached_data[key]
def __str__(self):
return "{{{}}}".format(
", ".join("{!r} : {}".format(k, v)
for k,v in self.items())
)
@cached_property
def cached_data(self):
return self.read()
@abstractmethod
def read(self):
"""
Read the record's data and return a mapping of fields to
values.
"""
class MutableRecord(MutableMapping, AbstractRecord, metaclass=ABCMeta):
"""
An abstract record that can update or delete itself.
"""
def __setitem__(self, field, val):
self.patch({field: val})
def __delitem__(self, field):
self.patch({}, (field,))
def invalidate(self, new_data=None):
if new_data is None:
self.__dict__.pop('cached_data', None)
else:
self.__dict__['cached_data'] = new_data
def start_edit_block(self):
"""
Start a transaction to the backend.
Backend edits made through this object should be grouped together
until :meth:`close_edit_block` is called.
:return: A token that is passed into :meth:`close_edit_block`.
"""
raise NotImplementedError
def close_edit_block(self, token):
"""
End a transaction started by :meth:`start_edit_block`.
"""
raise NotImplementedError
def update(self, E=None, **add_data):
add_data.update({} if E is None else E)
self.patch(add_data, ())
@contextmanager
def edit_block(self):
"""
A context manager for grouping a chain of edits together.
Some subclasses may not support performing reads inside an
edit block.
"""
token = self.start_edit_block()
yield token
self.close_edit_block(token)
@abstractmethod
def delete(self):
"""
Delete the record's data.
"""
@abstractmethod
def patch(self, add_data, remove_fields):
"""
Update the record's data with the new data.
"""
class LazyRecord(AbstractRecord):
def __init__(self, func):
self.func = func
def read(self):
return self.record
@cached_property
def record(self):
return self.func()
class LazyMutableRecord(MutableRecord, LazyRecord):
def __init__(self, func):
self.func = func
def patch(self, *args, **kwargs):
self.record.patch(*args, **kwargs)
def start_edit_block(self):
return self.record.start_edit_block()
def close_edit_block(self, token):
self.record.close_edit_block(token)
def delete(self):
self.record.delete()
class FilteredDataSet(AbstractDataSet):
"""
A concrete implementation of a data set that wraps another data
to only expose items that pass a through a filter.
:param dataset: A dataset that is filtered
:type dataset: :class:AbstractDataSet
The filter is specified through keyword arguments to the instance.
Each keyword represents the name of a field that is checked, and
the corresponding argument indicates what it is checked against. If
the argument is :class:`~collections.abc.Callable`, then it should
be a predicate that returns ``True`` if the field is valid (be aware
that the predicate will passed be ``None`` if the field isn't
present on the record), otherwise it is compared against the field
for equality. The function :meth:FilteredDataSet.check_match
implements this checking procedure.
"""
def __init__(self, dataset, **filter_spec):
self.ds = dataset
self.fs = filter_spec
def __iter__(self):
for record in self.ds:
if self.check_match(record, self.fs):
yield record
def __repr__(self):
return "<filtered-view({!r})|{}".format(
self.ds,
",".join("{}={!r}".format(k,v) for k,v in self.fs.items())
)
@staticmethod
def check_match(record, spec):
"""
Check that a record matches the search specification.
:param record: A record against which the specification is checked.
:type record: :class:collections.abc.Mapping
:param spec: A dictionary of field names and their expected values.
If an "expected value" is callable, it is treated as
a predicate that returns ``True`` if the field's
value is considered a match.
"""
for field, expected in spec.items():
val = record.get(field)
if isinstance(expected, Callable):
if not expected(val):
return False
elif not val == expected:
return False
else:
return True
class DataSetSlice(AbstractDataSet):
"""
A concrete implementation of a data set that wraps another data set
to expose only a slice of the original set.
:param start: Items before this zero based, index are skipped.
:type start: positive integer
:param stop: If given, this is the first item to be skipped after
the slice.
:type stop: positive integer
:param step: If given, step - 1 items are skipped between every
item in the slice.
"""
def __init__(self, dataset, start, stop=None, step=None):
self.ds = dataset
self.start = start
self.stop = stop
self.step = step
def __iter__(self):
yield from islice(self.ds, self.start, self.stop, self.step)
def __repr__(self):
return "{!r}[{}:{}]".format(
self.ds,
self.start,
"" if self.stop is None else self.stop
)
class OrderedDataSet(AbstractDataSet):
"""
A concrete implementation of a data set that wraps another data set
and returns its items in order.
"""
def __init__(self, dataset, *sort_spec, descending=False):
self.ds = dataset
self.ss = sort_spec
self.rv = descending
def __iter__(self):
yield from sorted(self.ds, key=self.make_key(*self.ss), reverse=self.rv)
def __repr__(self):
return "<sorted-view[{}] of {!r}>".format(
", ".join(self.ss),
self.ds
)
@staticmethod
def make_key(*sort_spec):
if len(sort_spec) == 1 and isinstance(sort_spec[0], Callable):
return sort_spec[0]
elif any(isinstance(si, Callable) for si in sort_spec):
raise ValueError("If a key function is used, it must be the "
"only argument.")
else:
def keyfunc(record):
return tuple(record.get(k, extremum()) for k in sort_spec)
return keyfunc
__all__ = ['AbstractDataSet', 'AbstractRecord', 'MutableDataSet',
'MutableRecord', 'FilteredDataSet', 'DataSetSlice',
'OrderedDataSet'] | PypiClean |
/CherryMusic-0.41.3.tar.gz/CherryMusic-0.41.3/res/js/cherrymusic.js |
var browser = detectBrowser();
//see http://www.w3schools.com/html/html5_audio.asp for available formats per browser
if(['msie','safari'].indexOf(browser) != -1){
var encoderPreferenceOrder = ['mp3','ogg'];
} else {
var encoderPreferenceOrder = ['opus', 'ogg','mp3'];
}
var SERVER_CONFIG = {};
var availableEncoders = undefined;
var availablejPlayerFormats = ['opus', 'mp3','ogg'];
var availableDecoders = undefined;
var transcodingEnabled = undefined;
var userOptions = undefined;
var isAdmin = undefined;
var loggedInUserName = undefined;
var REMEMBER_PLAYLIST_INTERVAL = 3000;
var CHECK_MUSIC_PLAYING_INTERVAL = 2000;
var HEARTBEAT_INTERVAL_MS = 30*1000;
var playlistSelector = '.jp-playlist';
var previousSorted = undefined
var executeAfterConfigLoaded = []
/**
* This function can call the cherrymusic api (v1)
* api(String actionname, -> action name as defined in httphandler.py
* [data,] -> simple js object containing the data
* successfunc, -> fucntion to be called on success
* errorfunc, -> function to be called on error
* completefunc) -> function to be called after error/success
*/
function api(){
"use strict";
var action = arguments[0];
var has_data = !(typeof arguments[1] === 'function');
var data = {};
if(has_data){
data = arguments[1];
}
var successfunc = arguments[has_data?2:1];
var errorfunc = arguments[has_data?3:2];
var completefunc = arguments[has_data?4:3];
if(!successfunc) successfunc = function(){};
if(!completefunc) completefunc = function(){};
var successFuncWrapper = function(successFunc){
return function handler(json){
var result = $.parseJSON(json);
if(result.flash){
successNotify(result.flash);
}
successFunc(result.data);
}
}
//wrapper for all error handlers:
var errorFuncWrapper = function(errorFunc){
return function(httpstatus){
if(httpstatus.status == 401){
/* if a request get's a 401, that means the user was logged
* out, so we reload to show the login page. */
reloadPage();
}
errorFunc();
}
}
if(!errorfunc){
//default error handler
errorfunc = function(){
errorFunc('Error calling API function "'+action+'"')();
};
}
$.ajax({
url: 'api/'+action,
context: $(this),
type: 'POST',
data: {'data': JSON.stringify(data)},
success: successFuncWrapper(successfunc),
error: errorFuncWrapper(errorfunc),
complete: completefunc,
});
}
htmlencode = function(val){
return $('<div />').text(val?val:'').html();
}
htmldecode = function(val){
return $('<div />').html(val?val:'').text();
}
function errorFunc(msg){
"use strict";
return function(){
window.console.error('CMError: '+msg);
displayNotification(msg,'error');
};
}
function successNotify(msg){
return function(){
displayNotification(msg,'success');
};
}
function hideNotification(selector){
var notification = $(selector);
if(notification.length){
notification.fadeOut('slow', function(){
notification.remove();
});
}
}
function displayNotification(msg, type, duration){
if(typeof duration === 'undefined'){
duration = 5000;
}
var selector = '#errormessage:contains(' + msg + ')';
var notificationExists = Boolean($(selector).length);
if(notificationExists) {
return;
}
var unique_class_id = 'notification-'+Math.floor(Math.random()*1000000);
var cssclass;
if(type == 'error'){
cssclass = 'alert-danger';
} else if(type == 'success'){
cssclass = 'alert-success';
} else {
cssclass = 'alert-info';
}
cssclass += ' '+unique_class_id;
templateLoader.render_append(
'flash-message',
{
msg : msg,
cssclass: cssclass,
},
$('#errormessage')
);
window.setTimeout('hideNotification(".'+unique_class_id+'")', duration)
}
/*******************
CONFIGURATION LOADER
*******************/
function loadConfig(executeAfter){
"use strict";
var success = function(data){
var dictatedClientConfig = data;
/** DEPRECATED GLOBAL VARIABLES **/
availableEncoders = dictatedClientConfig.getencoders;
availableDecoders = dictatedClientConfig.getdecoders;
transcodingEnabled = dictatedClientConfig.transcodingenabled;
isAdmin = dictatedClientConfig.isadmin;
loggedInUserName = dictatedClientConfig.username;
/** USE SERVER CONFIG INSTEAD **/
SERVER_CONFIG = {
'available_encoders': dictatedClientConfig.getencoders,
'available_decoders': dictatedClientConfig.getdecoders,
'fetchalbumart': dictatedClientConfig.fetchalbumart,
'transcoding_enabled': dictatedClientConfig.transcodingenabled,
'is_admin': dictatedClientConfig.isadmin,
'user_name': dictatedClientConfig.username,
'serve_path': dictatedClientConfig.servepath,
'transcode_path': dictatedClientConfig.transcodepath,
'auto_login': dictatedClientConfig.auto_login,
'version': dictatedClientConfig.version,
'rootpath': dictatedClientConfig.rootpath,
'albumart_search_methods': dictatedClientConfig.albumart_search_methods,
}
executeAfter();
if(isAdmin){
$('a[href="#adminpanel"]').show();
}
if(SERVER_CONFIG.auto_login){
$('#logout-menu-button').parent('li').addClass('disabled');
$('#logout-menu-button').attr('onclick', '');
$('#logout-menu-button').attr('title', 'Cannot logout: Auto-Login enabled');
}
if(SERVER_CONFIG.albumart_search_methods && SERVER_CONFIG.albumart_search_methods.length > 0) {
$.each(SERVER_CONFIG.albumart_search_methods, function (i, method) {
$('#albumart-search-method').append($('<option>', {
value: method,
text: method,
}));
});
} else {
$('#albumart-search-method').hide();
}
$('#aboutModal #cherrymusic-version').html(SERVER_CONFIG.version)
};
var error = errorFunc("Could not fetch client configuration, CherryMusic will not work. Clearing the browser cache might help.");
api('getconfiguration', {}, success, error);
}
/************
* USER OPTIONS
* **********/
function loadUserOptions(onSuccess){
var success = function(userOptionsLoaded){
userOptions = userOptionsLoaded;
if(typeof onSuccess !== 'undefined'){
onSuccess();
}
$('#custom_theme-primary_color').val(userOptions.custom_theme.primary_color);
$('#custom_theme-white_on_black').attr('checked',userOptions.custom_theme.white_on_black);
$('#keyboard_shortcuts-next').html(String.fromCharCode(userOptions.keyboard_shortcuts.next));
$('#keyboard_shortcuts-prev').html(String.fromCharCode(userOptions.keyboard_shortcuts.prev));
$('#keyboard_shortcuts-stop').html(String.fromCharCode(userOptions.keyboard_shortcuts.stop));
$('#keyboard_shortcuts-play').html(String.fromCharCode(userOptions.keyboard_shortcuts.play));
$('#keyboard_shortcuts-pause').html(String.fromCharCode(userOptions.keyboard_shortcuts.pause));
$('#keyboard_shortcuts-search').html(String.fromCharCode(userOptions.keyboard_shortcuts.search));
$('#misc-autoplay_on_add').attr('checked',userOptions.misc.autoplay_on_add);
$('#ui-confirm_quit_dialog').attr('checked',userOptions.ui.confirm_quit_dialog);
$('#ui-display_album_art').attr('checked',userOptions.ui.display_album_art);
handle_useroption_force_transcode_bitrate();
};
api('getuseroptions', success);
}
var waitForServerOptions = function(callback) {
var serverOptionsAreLoaded = Boolean(Object.keys(SERVER_CONFIG).length);
if(!serverOptionsAreLoaded) {
var timeout = 500;
setTimeout(callback, timeout);
return true;
}
return false;
};
var handle_useroption_force_transcode_bitrate = function() {
if(waitForServerOptions(handle_useroption_force_transcode_bitrate)) {
console.info('useroption handler waiting for server options...');
return;
}
var forced_bitrate = userOptions.media.force_transcode_to_bitrate;
if (SERVER_CONFIG['transcoding_enabled']) {
var select = "select[name='media-force_transcode_to_bitrate']";
var selected = select + "> option[value='x']".replace(/x/, forced_bitrate);
var deselected = selected.replace(/value=/, 'value!=');
$(selected).attr('selected', 'selected');
$(deselected).removeAttr('selected');
$("#media-force_transcode_to_bitrate-display").val(forced_bitrate);
if([0, 48, 64, 96, 128, 320].indexOf(forced_bitrate) < 0) {
console.log("Unknown bitrate value:", forced_bitrate);
}
} else {
var optionContainer = $("#media-force_transcode_to_bitrate-container");
optionContainer.find(".success").hide();
if(forced_bitrate) {
userOptions.media.force_transcode_to_bitrate = false;
var msg = 'WARNING Cannot enforce bitrate limit of :value kbps: server does not transcode!';
var extra = ' <a href="#userOptions" role="button" class="btn btn-info" data-toggle="modal">Disable limit in options menu</a>';
msg = msg.replace(/:value/, forced_bitrate);
displayNotification(msg + extra, 'error');
var errorArea = optionContainer.find(".error");
errorArea.find(".msg").html(msg);
errorArea.show();
}
}
};
var optionSetter = function(name, val, success, error){
busy('#userOptions .content').hide().fadeIn();
api('setuseroption',
{
'optionkey':name,
'optionval':val
},
function(){ success(); loadUserOptions(); },
error,
function(){ busy('#userOptions .content').fadeOut('fast'); }
)
}
keyboard_shortcut_setter = function(option, optionname){
$('#shortcut-changer span').html('Hit any key to set shortcut for<br><b><i>'+optionname+'</i></b><br><br>Press <b>escape</b> or <b>space</b> to cancel.');
$('#shortcut-changer').fadeIn('fast');
$('#shortcut-changer input').val('');
$('#shortcut-changer input').focus();
var keydownhandler = function(e){
if (e.altKey) return;
if (e.shiftKey) return;
if (e.ctrlKey) return;
if (e.metaKey) return;
var keyboardsetterend = function(){
$('#shortcut-changer input').unbind('keydown',keydownhandler);
$('html').unbind('keydown',keydownhandler);
$('#shortcut-changer').fadeOut('fast');
}
if(e.which && e.which !== 27 && e.which !== 32){ //do not bind unrecognised keys or escape / space
optionSetter(option,e.which,keyboardsetterend,keyboardsetterend);
}
keyboardsetterend();
return false;
}
$('#shortcut-changer input').bind('keydown',keydownhandler);
$('html').bind('keydown',keydownhandler);
}
function busy(selector, rect){
"use strict";
var domelem = $(selector).children('.busy-indicator');
if(domelem.length == 0){
domelem = $('<div></div>');
domelem.addClass('busy-indicator');
$(selector).append(domelem);
}
var top, left, width, height;
var pos = $(selector).position();
top = 'top: '+pos.top+'px;';
left = 'left: '+pos.left+'px;';
width = 'width: '+$(selector).width()+'px;';
height = 'height: '+$(selector).height()+'px;';
domelem.attr('style','position: absolute;'+top+left+width+height);
return domelem;
}
function search($form){
"use strict";
var $input = $form.find('input');
if($input.val().trim() == ""){
//make sure no spaces, so placeholder is shown
$input.val('');
$input.prop('placeholder', 'Search for what?');
$input.focus();
return false;
}
var searchstring = $input.val();
var success = function(json){
$('.searchinput').removeClass('searchinput-busy');
new MediaBrowser('.search-results', json, 'Search: '+htmlencode(searchstring));
};
var error = function(){
$('.searchinput').removeClass('searchinput-busy');
errorFunc('failed loading search results')();
};
$('.searchinput').addClass('searchinput-busy');
api('search', {'searchstring': searchstring}, success, error);
return false;
}
function submitsearch(){
search();
return false;
}
/***
INTERACTION
***/
/* PLAYLIST CREATION AND MANAGEMENT END*/
ext2jPlayerFormat = function(ext){
switch(ext){
case "mp3": return "mp3";
case "ogg":
case "opus":
case "oga": return "oga";
case "m4a":
case "mp4":
case "aac": return "m4a";
case "flac" : return "flac"
case "wav": return "wav";
case "weba": return "webma";
}
}
/******************
PLAYLIST MANAGEMENT
******************/
function savePlaylistAndHideDialog(){
"use strict";
var name = $('#playlisttitle').val();
var pub = $('#playlistpublic').prop('checked')?true:false;
if(name.trim() !== ''){
var pl = playlistManager.newPlaylistFromEditing();
savePlaylist(pl.id,name,pub);
$('#saveplaylistmodal').modal('hide');
}
$(this).blur();
return false;
}
function savePlaylist(plid,playlistname,ispublic,overwrite){
"use strict";
var pl = playlistManager.getPlaylistById(plid);
overwrite = Boolean(overwrite);
ispublic = ispublic || pl.public;
playlistname = playlistname || pl.name;
var success = function(){
playlistManager.getPlaylistById(plid).name = playlistname;
playlistManager.getPlaylistById(plid).public = ispublic;
playlistManager.getPlaylistById(plid).saved = true;
playlistManager.refresh();
playlistManager.showPlaylist(plid);
}
busy('#playlist-panel').hide().fadeIn('fast');
api('saveplaylist',
{
'playlist':pl.jplayerplaylist.playlist,
'public':ispublic,
'playlistname':playlistname,
'overwrite':overwrite,
},
success,
errorFunc('error saving playlist'),
function(){busy('#playlist-panel').fadeOut('fast')});
}
function getAddrPort(){
m = (window.location+"").match(/(https?):\/\/([^/:]+)(?::(\d+))?/); // won't work for URLs with "user:passw@host"
// 0: whole match, 1: protocol, 2: host, 3: port or undefined
// whole match = "$protocol://$host(:$port)?"
return m[0];
}
function ord(c)
{
return c.charCodeAt(0);
}
function showPlaylists(sortby, filterby){
"use strict";
var success = function(data){
var addressAndPort = getAddrPort();
var value_before = $('.playlist-filter-input').val();
new MediaBrowser('.search-results', data, 'Playlist browser', false, {showPlaylistPanel: true});
$('.playlist-filter-input').val(value_before);
};
var error = errorFunc('error loading external playlists');
if(sortby == previousSorted){
sortby = '-' + sortby;
}
previousSorted = sortby;
busy('.search-results').hide().fadeIn('fast');
api('showplaylists',
{'sortby': sortby,
'filterby': filterby},
success,
error,
function(){busy('.search-results').fadeOut('fast')}
);
}
function changePlaylist(plid,attrname,value){
window.console.log(plid);
window.console.log(attrname);
window.console.log(value);
busy('#playlist-panel').hide().fadeIn('fast');
api('changeplaylist',
{
'plid' : plid,
'attribute' : attrname,
'value' : value
},
function(){
showPlaylists();
},
errorFunc('error changing playlist attribute'),
function(){busy('#playlist-panel').fadeOut('fast')}
);
}
function confirmDeletePlaylist(id,title){
$('#deletePlaylistConfirmButton').off();
$('#deletePlaylistConfirmButton').on('click', function(){
busy('#playlist-panel').hide().fadeIn('fast');
api('deleteplaylist',
{'playlistid': id},
false,
errorFunc('error deleting playlist'),
function(){busy('#playlist-panel').fadeOut('fast')}
);
$('#dialog').fadeOut('fast');
showPlaylists();
});
$('#deleteplaylistmodalLabel').html(Mustache.render('Really delete Playlist "{{title}}"',{title:title}));
$('#deleteplaylistmodal').modal('show');
}
function loadPlaylist(playlistid, playlistlabel){
var success = function(data){
var tracklist = data;
//transform tracks to jplayer format:
//TODO rewrite jplayer playlist to support CM-music entry format
var pl = playlistManager.newPlaylist([], playlistlabel);
var animate = false;
for(var i=0; i<tracklist.length; i++){
playlistManager.addSong(tracklist[i].urlpath, tracklist[i].label, pl.id, animate);
}
pl.setSaved(true);
pl.scrollToTrack(0);
}
api('loadplaylist',
{'playlistid': playlistid},
success,
errorFunc('error loading external playlist'),
function(){busy('#playlist-panel').fadeOut('fast')}
)
}
function loadPlaylistContent(playlistid, playlistlabel){
"use strict";
var pldomid = "#playlist"+playlistid+' .playlist-content';
if('' === $(pldomid).html().trim()){
var success = function(data){
new MediaBrowser(pldomid, data, playlistlabel, false);
$("#playlist"+playlistid+' .playlist-detail-switch .glyphicon')
.toggleClass('glyphicon-chevron-right')
.toggleClass('glyphicon-chevron-down');
};
busy('#playlist-panel').hide().fadeIn('fast');
api('loadplaylist',
{'playlistid': playlistid},
success,
errorFunc('error loading external playlist'),
function(){busy('#playlist-panel').fadeOut('fast')}
);
} else {
$(pldomid).slideToggle('slow');
$("#playlist"+playlistid+' .playlist-detail-switch .glyphicon')
.toggleClass('glyphicon-chevron-right')
.toggleClass('glyphicon-chevron-down');
}
}
function randomPlaylist() {
"use strict";
playlistManager.clearQueue();
var success = function(tracks){
for (var i = 0; i < tracks.length; i++) {
var track = tracks[i];
playlistManager.addSong(track.urlpath, track.label)
}
};
busy('#jplayer').hide().fadeIn('fast');
api('generaterandomplaylist',
success,
errorFunc('error loading random playlist'),
function(){busy('#jplayer').fadeOut('fast')}
);
}
var lastPlaylistHeight = 0;
function resizePlaylistSlowly(){
var currentHeight = $('.jp-playlist').height();
if(lastPlaylistHeight <= currentHeight){
$('#playlistContainerParent').animate({'min-height': currentHeight});
}
lastPlaylistHeight = currentHeight;
}
function download_editing_playlist(){
var pl = playlistManager.getEditingPlaylist();
var p = pl.jplayerplaylist.playlist;
var track_urls = [];
for(i=0; i<p.length; i++){
track_urls.push(decodeURIComponent(p[i].url));
}
api('downloadcheck',
{'filelist': track_urls},
function(msg){
if(msg == 'ok'){
//add tracks to hidden form and call to call download using post data
$('#download-redirect-files').val(encodeURIComponent(JSON.stringify(track_urls)));
// disable confirm-to-quit for the download link (will be reenabled automatically)
window.onbeforeunload = null
$('#download-redirect').submit();
} else {
alert(msg);
}
},
errorFunc('Failed to check if playlist may be downloaded')
);
}
/*****
OTHER
*****/
function reloadPage(){
// make sure rootpath starts with a '/'
var rootpath = '/' + SERVER_CONFIG.rootpath.replace(/^[/]/, '');
//reconstruct url to suppress page reload post-data warning
var reloadurl = window.location.protocol + '//' + window.location.host + rootpath;
window.location.href = reloadurl;
}
function logout(){
"use strict";
api('logout', reloadPage);
}
/** TEMPLATES **/
function TemplateLoader(template_path){
this.template_path = template_path;
this.loaded_templates = {};
var self = this;
this.get = function(template_name, callback){
if(this.loaded_templates.hasOwnProperty(template_name)){
callback(this.loaded_templates[template_name]);
} else {
$.get(
this.template_path+'/'+template_name+'.html',
function(data){
self.loaded_templates[template_name] = data;
if(typeof callback === 'undefined'){
window.console.log('preloaded template '+template_name);
} else {
callback(self.loaded_templates[template_name]);
}
}
);
}
}
this.render = function(template_name, content, $jqobj){
this.get(template_name, function(template){
$jqobj.html(Mustache.render(template, content));
});
}
this.render_append = function(template_name, content, $jqobj){
this.get(template_name, function(template){
$jqobj.append(Mustache.render(template, content));
});
};
this.cached = function(template_name){
if(this.loaded_templates.hasOwnProperty(template_name)){
return this.loaded_templates[template_name];
} else {
window.console.error('Can not return unloaded template '+template_name+'!');
return '';
}
}
}
var templateLoader = new TemplateLoader('res/templates');
//preload templates for mediabrowser
templateLoader.get('mediabrowser-directory');
templateLoader.get('mediabrowser-file');
templateLoader.get('mediabrowser-compact');
templateLoader.get('mediabrowser-message');
templateLoader.get('mediabrowser-playlist');
//preload templates for flash message
templateLoader.get('flash-message');
/***
ADMIN PANEL
***/
function updateUserList(){
"use strict";
var success = function(data){
var htmllist = "";
var response = $.parseJSON(data);
var time = response['time'];
var template_user_data = {'users': []};
$.each(response['userlist'],function(i,e){
var reltime = time - e.last_time_online;
template_user_data['users'].push({
isadmin: e.admin,
may_download: e.may_download,
isnotadmin: !e.admin,
isdeletable: e.deletable,
userid: e.id,
isonline: reltime < HEARTBEAT_INTERVAL_MS/500,
username: e.username,
username_color: userNameToColor(e.username),
fuzzytime: time2text(reltime),
});
});
templateLoader.get('user-list', function(template){
$('#adminuserlist').html(Mustache.render(template, template_user_data));
});
};
busy('#adminuserlist').hide().fadeIn('fast');
api('getuserlist',
success,
errorFunc('cannot fetch user list'),
function(){busy('#adminuserlist').fadeOut('fast')}
);
}
function addNewUser(){
"use strict";
var newusername = $('#newusername').val();
var newpassword = $('#newpassword').val();
var newisadmin = $('#newisadmin').prop('checked')?1:0;
if(newusername.trim() === '' || newpassword.trim() === ''){
return;
}
var success = function(data){
$('#newusername').val('');
$('#newpassword').val('');
$('#newisadmin').prop('checked', false);
updateUserList();
};
busy('#adminpanel').hide().fadeIn('fast');
api('adduser',
{
'username':newusername,
'password':newpassword,
'isadmin':newisadmin
},
success,
errorFunc('failed to add new user'),
function(){busy('#adminpanel').fadeOut('fast')}
);
}
function userDelete(userid){
var success = function(data){
updateUserList();
};
busy('#adminuserlist').hide().fadeIn('fast');
api('userdelete',
{ 'userid':userid },
success,
errorFunc('failed to delete user'),
function(){ busy('#adminuserlist').fadeOut('fast') }
);
}
function userSetPermitDownload(userid, allow_download){
var success = function(data){
updateUserList();
};
busy('#adminuserlist').hide().fadeIn('fast');
api('setuseroptionfor',
{
'optionkey': 'media.may_download',
'optionval': allow_download,
'userid': userid,
},
success,
errorFunc('Failed to set user download state'),
function(){busy('#adminuserlist').fadeOut('fast')}
);
}
function userChangePassword(){
if (! validateNewPassword($('#newpassword-change'), $('#repeatpassword-change'))) {
return false;
}
var success = function(data){
$('#changePassword').find('input').each(function(idx, el) { $(el).val(''); } );
$('#changePassword').modal('hide');
$('#userOptions').modal('hide');
successNotify('Password changed successfully!')();
};
var error = function(){
$('#oldpassword-change').val('');
$('#oldpassword-change').focus();
$("#changePassword").modal('attention');
};
busy('#changePassword').hide().fadeIn('fast');
api('userchangepassword',
{
'oldpassword':$('#oldpassword-change').val(),
'newpassword':$('#newpassword-change').val()
},
success,
error,
function(){busy('#changePassword').fadeOut('fast');}
);
}
function validateNewPassword($newpwfield, $repeatpwfield){
var newpw = $newpwfield.val();
var repeatpw = $repeatpwfield.val();
if (newpw == repeatpw) {
$repeatpwfield.closest('.control-group').removeClass('error');
return true;
}
$repeatpwfield.closest('.control-group').addClass('error');
return false;
}
function userExportPlaylists() {
var loc = window.location;
var hostaddr = loc.protocol + '//' + loc.host;
$('#exportPlaylists input[name=hostaddr]').val(hostaddr);
$('#exportPlaylists form').submit();
$('#exportPlaylists').modal('hide');
$('#userOptions').modal('hide');
}
function enableJplayerDebugging(){
$('#jplayer_inspector').jPlayerInspector({jPlayer:$('#jquery_jplayer_1'),visible:true});
$('#jquery_jplayer_1').data().jPlayer.options.errorAlerts = true;
$('#jquery_jplayer_1').data().jPlayer.options.warningAlerts = true;
$('#jplayer_inspector_update_0').click();
}
function loadBrowser(directory, title){
if(typeof directory === 'undefined'){
directory = '';
}
if(typeof title === 'undefined'){
title = 'Root';
}
var success = function(data){
new MediaBrowser('.search-results', data, title);
};
busy('#searchfield').hide().fadeIn('fast');
api('listdir',
{'directory' : directory},
success,
errorFunc('failed to load file browser'),
function(){busy('#searchfield').fadeOut('fast')});
}
/***
HELPER
***/
function endsWith(str, suffix) {
"use strict";
return str.indexOf(suffix, str.length - suffix.length) !== -1;
}
function getFileTypeByExt(filepath){
"use strict";
var extmatch = filepath.match(/.*?\.(\w+)$/);
if(extmatch){
return extmatch[1].toLowerCase();
}
}
function detectBrowser(){
var browsers = ['midori','firefox','msie','chrome','safari','opera']
for(var i=0; i<browsers.length; i++){
if(navigator.userAgent.toLowerCase().indexOf(browsers[i])!=-1){
return browsers[i];
}
}
return 'unknown';
}
/*****
* UTIL
* ***/
function unixtime(){
var d = new Date;
return parseInt(d.getTime() / 1000);
}
function time2text(sec){
var abssec = Math.abs(sec);
var minutes = parseInt(abssec/60);
var hours = parseInt(minutes/60)
var days = parseInt(hours/24);
var weeks = parseInt(days/7);
var months = parseInt(days/30);
var years = parseInt(days/365);
var t='';
if(abssec < 30){
return 'just now'
} else {
if(years != 0){
t = years == 1 ? 'a year' : years+' years';
if(years > 20){
t = 'a long time';
}
} else if(months != 0){
t = months == 1 ? 'a month' : months+' months';
} else if(weeks != 0){
t = weeks == 1 ? 'a week' : weeks+' weeks';
} else if(days != 0){
t = days == 1 ? 'a day' : days+' days';
} else if(hours != 0){
t = hours == 1 ? 'an hour' : hours+' hours';
} else if(minutes != 0){
t = minutes > 25 ? 'half an hour' : minutes+' minutes';
if (minutes == 1){
t = 'a minute';
}
} else {
t = 'a few seconds'
}
return sec > 0 ? t+' ago' : 'in '+t;
}
}
function dec2Hex(dec){
var hexChars = "0123456789ABCDEF";
var a = dec % 16;
var b = (dec - a)/16;
hex = hexChars.charAt(b) + hexChars.charAt(a);
return hex;
}
function userNameToColor(username){
username = username.toUpperCase();
username+='AAA';
var g = ((ord(username[0])-65)*255)/30;
var b = ((ord(username[1])-65)*255)/30;
var r = ((ord(username[2])-65)*255)/30;
return '#'+dec2Hex(r)+dec2Hex(g)+dec2Hex(b);
}
/*****************
* KEYBOARD SHORTCUTS
* ***************/
function initKeyboardshortcuts(){
$(window.document).bind('keydown', keyboardShortcuts);
//disable space bar scrolling
$(document).keydown(function (e) {
var focusedElement = $("*:focus");
var inputFieldFocused = focusedElement.length > 0;
var key = e.charCode ? e.charCode : e.keyCode ? e.keyCode : 0;
if (key === 32 && !inputFieldFocused) e.preventDefault();
});
}
function keyboardShortcuts(e){
//we don't want to ruin all the nice standard shortcuts.
if (e.altKey) return;
if (e.shiftKey) return;
if (e.ctrlKey) return;
if (e.metaKey) return;
var actions = { 'next' : function(e){playlistManager.cmd_next()},
'pause' : function(e){playlistManager.cmd_pause()},
'play' : function(e){playlistManager.cmd_play()},
'prev' : function(e){playlistManager.cmd_previous()},
'search' : function(e){$('#searchform input').focus().select(); e.preventDefault();},
'stop' : function(e){playlistManager.cmd_stop()},
};
var mediaKeys = { 'MediaNextTrack' : 'next',
'MediaPlayPause' : 'pause', //The pause action is really play/pause, while the play action is only play.
'MediaPreviousTrack' : 'prev',
'MediaStop' : 'stop'
//Volume up/down/mute keys also exist, but ignore them because they already control system volume.
};
var triggerAction = function (action){
window.console.log('triggering: '+action);
actions[action](e);
};
if (e.key && mediaKeys[e.key]){
triggerAction(mediaKeys[e.key]);
} else {
var focusedElement = $("*:focus");
var inputFieldFocused = focusedElement.length > 0;
if(inputFieldFocused){
if(e.which === 27){ //escape -> unfocus
focusedElement.blur();
}
} else if(e.which === 32){
triggerAction('pause');
} else {
for(var action in actions){
if(e.which === userOptions.keyboard_shortcuts[action] && userOptions.keyboard_shortcuts[action]){
triggerAction(action);
break;
}
}
}
}
}
function sendHeartBeat(){
api('heartbeat',
function(){ /*removeError('connection to server lost') */ },
errorFunc('connection to server lost'),
true)
window.setTimeout('sendHeartBeat()', HEARTBEAT_INTERVAL_MS);
}
function userOptionCheckboxListener(htmlid, optionname){
REQUIRES_RELOAD_ON_ENABLE = ['#ui-confirm_quit_dialog'];
$(htmlid).on('change',function(){
var self = this;
optionSetter( optionname,
$(this).is(':checked'),
function(){
if($(self).is(':checked')){
if(REQUIRES_RELOAD_ON_ENABLE.indexOf(htmlid) != -1){
alert('You need to reload the page for this setting to take effect.')
}
}
},
errorFunc('Error setting option! '+optionname)
);
});
}
function userOptionMultivalListener(selector, optionname) {
$(selector).on('change',function(){
var self = this;
optionSetter( optionname,
$(this).val(),
function(){
},
errorFunc('Error setting option! '+optionname)
);
});
}
/*****************************
CONDITIONAL USER INTERFACE
*****************************/
function show_ui_conditionally(selectors, conditions_table){
var conditions_met = [];
for(var condition_name in conditions_table){
if(conditions_table.hasOwnProperty(condition_name)){
if(conditions_table[condition_name]){
conditions_met.push(condition_name);
}
}
}
//support for single string as first argument
if(!selectors instanceof Array){
selectors = [selectors];
}
for(var i=0; i<selectors.length; i++){
//check all buttons for their show conditions and hide/show them
$(selectors[i]+' > [show-cond]').each(function(i, e){
var ui_element = $(e);
var conditions_needed = ui_element.attr('show-cond').split(' ');
ui_element.show();
$.each(conditions_needed, function(i, e){
if(conditions_met.indexOf(e) < 0){
ui_element.hide();
return false;
}
});
});
}
}
function jPlayerIsPlaying(){
return !$('#jquery_jplayer_1').data().jPlayer.status.paused;
}
function dontCloseWindowIfMusicPlays(){
if(userOptions.ui.confirm_quit_dialog){
if(jPlayerIsPlaying()){
if(window.onbeforeunload === null){
// register close dialog if music is playing
window.onbeforeunload = function() {
return "This will stop the playback. Do you really want to close CherryMusic?";
}
}
} else {
if(window.onbeforeunload !== null){
// remove close dialog if no music is playing
window.onbeforeunload = null;
}
}
window.setTimeout("dontCloseWindowIfMusicPlays()", CHECK_MUSIC_PLAYING_INTERVAL)
} else {
window.onbeforeunload = null;
}
}
function searchAlbumArt(){
busy('#changeAlbumArt .modal-body').hide().fadeIn('fast');
var success = function(urllist){
$('.coverart-tryout').html('');
for(var i=0; i<urllist.length; i++){
var html = '<div class="album-art-choice">'+
'<img width="80" height="80" src="'+urllist[i]+'"'+
' onclick="pickCoverArt($(this))" '+
'" />'+
'</div>';
$('.coverart-tryout').append(html);
}
}
api('fetchalbumarturls',
{
'searchterm': $('#albumart-search-term').val(),
'method': $('#albumart-search-method').val()
},
success,
errorFunc('Error fetching image urls'),
function(){busy('#changeAlbumArt .modal-body').fadeOut('fast')});
}
function pickCoverArt(img){
var imagesrc = $(img).attr('src');
var dirname = decodeURIComponent($('#changeAlbumArt').attr('data-dirname'));
var success = function(){
$('#changeAlbumArt').modal('hide');
//reload cover art:
var folder_div = $('.list-dir[dir="'+dirname+'"]')
//force reload image
var folder_image = folder_div.find('img');
folder_image.attr('src', folder_image.attr('src')+'&reload=1');
}
api('albumart_set',
{
'directory': dirname,
'imageurl': imagesrc,
},
success
);
}
function displayMessageOfTheDay(){
api('getmotd',
function(resp){
if(resp.type == 'update'){
html = Mustache.render(
'<a href="http://fomori.org/cherrymusic/">'+
'CherryMusic {{version}} is available!'+
'<h2>download now →</h2>'+
'</a><hr>'+
'<h3>{{features_count}} new {{feature_title}}:</h3>'+
'<ul class="feature-list">'+
' {{#features}}<li>{{.}}</li>{{/features}}'+
'</ul>'+
'<h3>{{fixes_count}} {{fixes_title}}:</h3>'+
'<ul class="feature-list">'+
' {{#fixes}}<li>{{.}}</li>{{/fixes}}'+
'</ul><hr>'+
'<p>'+
' And a lot of other stuff, see the'+
' <a href="https://github.com/devsnd/cherrymusic/blob/{{version}}/CHANGES" target="_blank">'+
' CHANGELOG</a>.'+
'</p>',
{
version: resp.data.version,
features: resp.data.features,
features_count: resp.data.features.length,
feature_title: resp.data.features.length > 1 ? 'features' : 'feature',
fixes: resp.data.fixes,
fixes_count: resp.data.fixes.length,
fixes_title: resp.data.fixes.length > 1 ? 'fixes' : 'fix',
});
$('#motd').html(html);
} else if(resp.type == 'wisdom'){
$('#motd').html('useless wisdom<hr>'+resp.data);
} else {
window.console.error('unknown motd type '+resp.type);
}
},
errorFunc('could not fetch message of the day')
);
}
/***
ON DOCUMENT READY... STEADY... GO!
***/
var playlistManager;
$(document).ready(function(){
"use strict";
$('#playlistBrowser').hide();
loadConfig(function(){
playlistManager = new PlaylistManager();
$('#username-label').text('('+loggedInUserName+')');
});
loadUserOptions(function(){
initKeyboardshortcuts();
dontCloseWindowIfMusicPlays();
$('#albumart').toggle(userOptions.ui.display_album_art)
});
$('#search-panel').on('scroll', function(){
//enable loading of images when in viewport
MediaBrowser.static.albumArtLoader('#search-panel');
});
//register top level directories
$('div#progressscreen').fadeOut('slow');
//window.setInterval("resizePlaylistSlowly()",2000);
$('#searchform .searchinput').focus();
sendHeartBeat();
displayMessageOfTheDay();
$('#adminpanel').on('shown.bs.modal', function (e) {
updateUserList();
});
$('#save-playlist-from-queue').on('click',function(){
$('#playlisttitle').val('');
$("#playlistpublic").attr("checked", true);
});
$('#saveplaylistmodal').on('shown.bs.modal',function(){
$('#playlisttitle').focus();
$('#playlisttitle').bind('keyup',function(e){
if(e.which === 13) { //enter
savePlaylistAndHideDialog();
} else if(e.which === 27){ //escape
$('#saveplaylistmodal').modal('hide');
}
});
});
$('#saveplaylistmodal').on('hide', function(){
$('#playlisttitle').unbind('keyup');
});
$('#changeAlbumArt').on('shown.bs.modal', function(){
//empty old search results
$('#changeAlbumArt .coverart-tryout').empty();
//set input field in modal
$("#albumart-search-term").val(decodeURIComponent($('#changeAlbumArt').attr('data-dirname')));
$("#albumart-search-term").focus();
//when pressing enter, the search should start:
$("#albumart-search-term").off('keypress').on('keypress', function(e){
if (e.keyCode == '13' || e.which == '13'){
searchAlbumArt();
}
});
});
$('#changePassword').on('show.bs.modal', function(){
//$('#changePassword').data('modal').options.focusOn = '#oldpassword-change';
});
userOptionCheckboxListener('#misc-show_playlist_download_buttons',
'misc.show_playlist_download_buttons');
userOptionCheckboxListener('#misc-autoplay_on_add',
'misc.autoplay_on_add');
userOptionCheckboxListener('#ui-confirm_quit_dialog',
'ui.confirm_quit_dialog');
userOptionCheckboxListener('#ui-display_album_art',
'ui.display_album_art');
userOptionMultivalListener("select[name='media-force_transcode_to_bitrate']",
'media.force_transcode_to_bitrate');
$('#media-force_transcode_to_bitrate-disable').click(function(){
optionSetter('media.force_transcode_to_bitrate', 0, function(){
$('#media-force_transcode_to_bitrate-disable').closest('.error').hide();
});
});
$('#ui-display_album_art').click(function() {
$('#albumart').toggle($('#ui-display_album_art').prop('checked'));
});
}); | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/media/static/cubane/media/js/cubane.media.js | (function (globals){
"use strict";
cubane.namespace('cubane.media');
cubane.require('cubane.dom');
cubane.require('cubane.urls');
cubane.require('cubane.dialog');
var droppedFiles = undefined;
function setup() {
// files changed
var fileInput = document.querySelector('input[type=file]');
if (fileInput) {
fileInput.addEventListener('change', function(e) {
var holder = cubane.dom.closest(fileInput, '.cubane-file-upload');
onImageChanged(holder, e.target.files);
});
}
// set image boundaries
window.addEventListener('resize', setImageBoundaries);
setImageBoundaries();
// image drag and drop
var container = document.querySelectorAll('.cubane-file-upload');
for (var i = 0; i < container.length; i++) {
setupDragAndDrop(container[i]);
}
// submit
for (var i = 0; i < container.length; i++) {
setupUpload(container[i]);
}
// set initial focal point
setupFocalPoint();
// set shape previews
window.addEventListener('resize', updateCropPreviews);
updateCropPreviews();
}
/*
* Setup drag and drop support for given drop target
*/
function setupDragAndDrop(holder) {
holder.ondragover = function () {
this.classList.add('hover');
return false;
};
holder.ondragleave = function() {
this.classList.remove('hover');
return false;
}
holder.ondragend = function () {
this.classList.remove('hover');
return false;
};
holder.ondrop = function (e) {
e.preventDefault();
this.classList.remove('hover');
// single or multiple?
var input = holder.querySelector('input[type="file"]');
if (input.getAttribute('multiple') === 'multiple') {
droppedFiles = e.dataTransfer.files;
} else {
droppedFiles = [e.dataTransfer.files[0]];
}
onImageChanged(holder, droppedFiles);
};
}
/*
* Setup form data upload
*/
function setupUpload(holder) {
var form = cubane.dom.closest(holder, 'form');
if (form) {
form.addEventListener('submit', onUploadFormSubmit);
var input = form.querySelector('input[type="file"]');
if (input) {
input.required = false;
}
}
}
/*
* Ensures that the given image is loaded and then calls the given callback.
*/
function ensureImageLoaded(img, callback) {
if (!img) return;
var handler = function() {
img.removeEventListener('load', handler);
callback();
};
if (!img.naturalWidth || !img.naturalHeight) {
img.addEventListener('load', handler);
} else {
handler();
}
}
/*
* Set max. boundaries for the image based on the aspect ratio of the image.
*/
function setImageBoundaries() {
// wait for the image to be fully loaded, so that we have image dimensions
var img = document.querySelector('.cubane-media-editor-preview-panel-frame img');
if (!img) return;
ensureImageLoaded(img, function() {
if (img.naturalHeight == 0) return;
var frame = document.querySelector('.cubane-media-editor-preview-frame');
var panel = document.querySelector('.cubane-media-editor-preview-panel');
var ar = img.naturalWidth / img.naturalHeight;
var w = frame.offsetWidth;
var h = w / ar;
if (h > frame.offsetHeight) {
h = frame.offsetHeight;
w = h * ar
}
panel.style.width = w.toString() + 'px';
panel.style.height = h.toString() + 'px';
});
}
/*
* Return the crop rectangle for an image with given width and height to
* be cropped and fitted into the given target width and height.
* The resulting crop width and height might be smaller (or larger) than the
* given target width and height depending on the input image size; however
* the aspect ratio is the same.
* The crop region is based around the given focal point which describes the
* main focal point of the image which should become the center of the new
* image. If no focal point is given, the image center position is assumed.
* Focal point coordinates are in relative coordinates between 0.0 and 1.0.
*/
function getImageCropArea(width, height, targetWidth, targetHeight, fx, fy) {
// division by zero guarde
if (!targetHeight || !targetHeight || !width || !height)
return {
x: 0,
y: 0,
w: Math.round(targetWidth),
h: 0
};
// focal point
if (isNaN(fx)) fx = 0.5;
if (isNaN(fy)) fy = 0.5;
fx = Math.max(0, Math.min(1, fx));
fy = Math.max(0, Math.min(1, fy));
// aspect ratios
var srcAR = width / height;
var targetAR = targetWidth / targetHeight;
var srcLandscape = srcAR > 1;
var targetLandscape = targetAR > 1;
// focal point in image space
var imgFx = width * fx;
var imgFy = height * fy;
// find largest possible crop region where focal point is relative to
// where it is in the original image (binary search)...
var top = width;
var bottom = 0;
var targetThreshold = targetWidth * 1.01;
var w = top;
var i = 0;
while (true) {
var h = w / targetAR;
var x = imgFx - (fx * w);
var y = imgFy - (fy * h);
if (w < targetThreshold) {
if (x < 0) x = 0;
if (y < 0) y = 0;
if (x + w > width) x = width - w;
if (y + h > height) y = height - h;
}
var valid = x >= 0 && y >= 0 && x + w <= width && y + h <= height;
if (valid) {
// valid -> increase
bottom = w;
} else {
// not valid -> decrease
top = w;
}
w = bottom + ((top - bottom) / 2);
// good enought?
if (valid && top - bottom < 1)
break;
i++;
if (i > 10) break;
}
if (x < 0) x = 0;
if (y < 0) y = 0;
// return crop region (integers)
return {
x: Math.round(x),
y: Math.round(y),
w: Math.round(w),
h: Math.round(h)
};
}
/*
* Update the crop preview images based on the current focal point and image.
*/
function updateCropPreviews() {
// get focal point
var fx = parseFloat(document.querySelector('#id_focal_x').value);
var fy = parseFloat(document.querySelector('#id_focal_y').value);
if (isNaN(fx)) fx = 0.5;
if (isNaN(fy)) fy = 0.5;
// get preview image shapes
var shapes = document.querySelectorAll('.cubane-media-editor-shape');
if (shapes.length === 0) return;
var img = document.querySelector('.cubane-media-editor-preview-panel-frame img');
if (!img) return;
ensureImageLoaded(img, function() {
var width = img.naturalWidth;
var height = img.naturalHeight;
var targetWidth = shapes[0].offsetWidth;
function _updateCropForShape(shape, img) {
ensureImageLoaded(img, function() {
// determine crop area for the shape
var ar = parseFloat(shape.getAttribute('data-ar'));
var targetHeight = targetWidth / ar;
var crop = getImageCropArea(width, height, targetWidth, targetHeight, fx, fy);
// adjust image width and position
var r = targetWidth / crop.w;
img.width = width * r;
img.style.webkitTransform =
img.style.transform =
'translate(' + (-crop.x * r).toString() + 'px, ' + (-crop.y * r).toString() + 'px)';
});
}
// process shapes
for (var i = 0; i < shapes.length; i++) {
// make sure that we have the correct image for all preview shapes
var shapeImg = shapes[i].querySelector('img');
if (shapeImg.src != img.src) {
shapeImg.src = img.src;
}
// update crop placement
_updateCropForShape(shapes[i], shapeImg, i == 0);
}
});
}
/*
* Setup focal point editor
*/
function setupFocalPoint() {
// editor required
var editor = document.querySelector('.cubane-media-editor');
if (!editor) return;
// image required, does not work for documents
var isImages = editor.getAttribute('data-images') === 'True';
if (!isImages) return;
var target = editor.querySelector('.cubane-media-editor-focal-point');
// interact
interact('.cubane-media-editor-focal-point').draggable({
inertia: true,
restrict: {
restriction: 'parent',
endOnly: true,
elementRect: { top: 0.5, left: 0.5, bottom: 0.5, right: 0.5 }
},
onmove: dragMoveListener
});
// move handler
function dragMoveListener (event) {
var x = (parseFloat(target.getAttribute('data-x')) || 0) + event.dx;
var y = (parseFloat(target.getAttribute('data-y')) || 0) + event.dy;
setFocalPointFromScreen(x, y);
updateCropPreviews();
}
// resize
window.addEventListener('resize', function() {
var focal_x = document.querySelector('#id_focal_x');
var focal_y = document.querySelector('#id_focal_y');
if (focal_x && focal_y) {
setFocalPointFromCoords(parseFloat(focal_x.value), parseFloat(focal_y.value));
}
});
// enable if we are in edit mode, which means that we should have an image
var isEdit = editor.getAttribute('data-edit') === 'True';
if (isEdit) {
enableFocalPoint(false);
}
// allow auto detect focal point (button)
document.querySelector('.cubane-media-editor-auto-detect-focal-point').addEventListener('click', function(e) {
e.preventDefault();
enableFocalPoint(true);
});
// allow for resetting focal point to center position
document.querySelector('.cubane-media-editor-center-focal-point').addEventListener('click', function(e) {
e.preventDefault();
enableFocalPoint(true, 0.5, 0.5);
});
// auto-detection of focal point supported by browser?
if (!supportsAutoDetectFocalPoint()) {
$('.cubane-media-editor-auto-detect-focal-point').hide();
}
}
/*
* Set focal point position from screen coordinates.
*/
function setFocalPointFromScreen(screenX, screenY) {
var target = document.querySelector('.cubane-media-editor-focal-point');
if (!target) return;
var panel = document.querySelector('.cubane-media-editor-preview-panel');
var focal_x = document.querySelector('#id_focal_x');
var focal_y = document.querySelector('#id_focal_y');
if (!focal_x || !focal_y) return;
// translate the element
target.style.webkitTransform =
target.style.transform =
'translate(' + screenX + 'px, ' + screenY + 'px)';
// update the posiion attributes
target.setAttribute('data-x', screenX);
target.setAttribute('data-y', screenY);
// update form fields (relative coordinates)
focal_x.value = (screenX + 50) / panel.offsetWidth;
focal_y.value = (screenY + 50) / panel.offsetHeight;
}
/*
* Set focal point position from relative coordinates.
*/
function setFocalPointFromCoords(x, y) {
if (!isNaN(x) && !isNaN(y)) {
var panel = document.querySelector('.cubane-media-editor-preview-panel');
if (panel) {
x = (panel.offsetWidth * x) - 50;
y = (panel.offsetHeight * y) - 50;
setFocalPointFromScreen(x, y);
}
}
}
/*
* Enable focal point
*/
function enableFocalPoint(newImage, newX, newY) {
if (newImage === undefined) newImage = false;
var target = document.querySelector('.cubane-media-editor-focal-point');
if (!target) return;
// set initial position. Use centre position if no focal point is
// available yet...
var focal_x = document.querySelector('#id_focal_x');
var focal_y = document.querySelector('#id_focal_y');
if (focal_x && focal_y) {
var initialX = parseFloat(focal_x.value);
var initialY = parseFloat(focal_y.value);
function applyFocalPoint(x, y) {
setFocalPointFromCoords(x, y);
target.classList.add('active');
updateCropPreviews();
}
if (isNaN(initialX) || isNaN(initialY) || newImage) {
// default focal point
initialX = 0.5;
initialY = 0.5;
// detect focal point automatically if this is a new image
if (newImage) {
if (newX && newY) {
initialX = newX;
initialY = newY;
} else if (supportsAutoDetectFocalPoint()) {
// auto-detect
var img = document.querySelector('.cubane-media-editor-preview-panel-frame img');
ensureImageLoaded(img, function() {
smartcrop.crop(img, {width: 100, height: 100}).then(function(result){
var x = (result.topCrop.x + (result.topCrop.width / 2)) / img.naturalWidth;
var y = (result.topCrop.y + (result.topCrop.height / 2)) / img.naturalHeight;
applyFocalPoint(x, y);
});
});
return;
}
}
}
applyFocalPoint(initialX, initialY);
}
}
/*
* Return True, if the browser can support the auto-detection of the focal
* point. We need a modern browser with Promise support for this to work.
*/
function supportsAutoDetectFocalPoint() {
return window.Promise !== undefined;
}
/*
* Disable focal point
*/
function disableFocalPoint() {
var target = document.querySelector('.cubane-media-editor-focal-point');
if (!target) return;
target.classList.remove('active');
}
/*
* Handle form submit and upload media data
*/
function onUploadFormSubmit(e) {
e.preventDefault();
// get form and elements...
var form = cubane.dom.closest(e.target, 'form');
var input = form.querySelector('input[type="file"]');
var editor = cubane.dom.closest(form, '.cubane-media-editor');
// prevent duplicate submission...
if (form.classList.contains('uploading')) return;
form.classList.add('uploading');
// construct upload form data and append data that was dragged and
// dropped...
var data = new FormData(form);
if (droppedFiles) {
for (var i = 0; i < droppedFiles.length; i++) {
data.append(input.name, droppedFiles[i]);
}
}
var dlg = cubane.dialog.progress(
'Uploading files...',
'Uploading files...Please Wait...'
);
var processing = false;
var completed = false;
var progressId = Math.random();
var progressUrl = cubane.urls.reverse('cubane.backend.progress') + '?progressId=' + progressId;
var action = form.getAttribute('action');
if (!action) action = document.location.href;
action = cubane.urls.combineUrlArg(action, 'progressId', progressId);
$.ajax({
url: action,
type: form.getAttribute('method'),
data: data,
dataType: 'json',
cache: false,
contentType: false,
processData: false,
xhr: function(){
//upload Progress
var xhr = $.ajaxSettings.xhr();
if (xhr.upload) {
xhr.upload.addEventListener('progress', function(event) {
// calculate progress (percent)
var percent = 0;
var position = event.loaded || event.position;
var total = event.total;
if (event.lengthComputable) {
percent = Math.ceil(position / total * 100);
}
// report progress made
dlg.progress(percent);
if (!processing && percent > 99) {
// we're done upload. Now we need to monitor progress
// being made on the server side while processing
// all uploaded media files...
processing = true;
cubane.dialog.closeAll();
dlg = cubane.dialog.progress(
'Processing data...',
'Processing uploaded data. This may take a moment...Please Wait...'
);
var interval = setInterval(function() {
$.getJSON(progressUrl, function(json) {
dlg.progress(json.percent);
if (json.percent > 99 || completed) {
clearInterval(interval);
}
});
}, 1000);
}
}, true);
}
return xhr;
},
complete: function() {
dlg.progress(100);
completed = true;
form.classList.remove('uploading');
cubane.dialog.closeAll();
},
success: function(json) {
if (json.success) {
if ($('body').hasClass('create-dialog') && window.parent !== window) {
window.parent.$(window.parent).trigger('cubane-listing-create', [{
id: json.instance_id,
title: json.instance_title
}]);
} else if ($('body').hasClass('browse-dialog') && window.parent !== window) {
if (json.next) {
document.location = json.next;
}
} else if ($('body').hasClass('index-dialog') && window.parent !== window) {
window.parent.$(window.parent).trigger('cubane-close-index-dialog');
} else if ($('body').hasClass('edit-dialog') && window.parent !== window) {
window.parent.$(window.parent).trigger('cubane-listing-edit', [{
id: json.instance_id,
title: json.instance_title
}]);
} else if (json.next) {
document.location = json.next;
} else {
document.location.reload();
}
} else {
if (json.errors) {
cubane.backend.presentFormErrors(form, json.errors);
} else {
document.location.reload();
}
}
}
});
}
/*
* Uploaded image changed
*/
function onImageChanged(holder, files) {
// reflect selected image label in upload box
var label = holder.querySelector('.cubane-file-label');
if (label) {
label.innerText = files.length > 1 ? (files.length + ' files selected') : files[0].name;
}
// enable/disable caption input field, which does not apply if we are
// uploading multiple images at once...
var container = document.getElementById('id_caption');
if (container) {
container = cubane.dom.closest(container, '.control-group');
if (files.length > 1) {
container.style.display = 'none';
} else {
container.style.display = 'block';
}
}
// toggle between preview panel and preview images, depending on
// whether we have multiple images or not...
var editor = document.querySelector('.cubane-media-editor');
if (editor) {
var isImages = editor.getAttribute('data-images') === 'True';
var images = document.querySelector('.cubane-media-editor-preview-images');
var panelFrame = document.querySelector('.cubane-media-editor-preview-panel-frame');
if (files.length > 1) {
editor.classList.add('multiple');
images.innerText = '';
for (var i = 0; i < files.length; i++) {
getFileUrl(files[i], function(url) {
addPreviewImageFromUrl(url, isImages);
});
}
// focal point not available
disableFocalPoint();
} else {
panelFrame.innerText = '';
editor.classList.remove('multiple');
getFileUrl(files[0], function(url) {
createPreviewImageFromUrl(url, isImages);
// enable focal point when working with single image
if (isImages) {
setImageBoundaries();
enableFocalPoint(true);
}
});
}
// check save and continue, which we cannot do if this is a create
// with multiple assets...
var isEdit = editor.getAttribute('data-edit') === 'True';
if (!isEdit && files.length > 1) {
$('.btn-save-and-continue').hide();
} else {
$('.btn-save-and-continue').show();
}
}
}
/*
* Return the file url for the given file object.
*/
function getFileUrl(file, loaded) {
if (URL.createObjectURL) {
loaded(URL.createObjectURL(file));
} else {
var reader = new FileReader();
reader.onload = function (event) {
loaded(event.target.result);
};
reader.readAsDataURL(file);
}
}
/*
* Add given image as a preview image from given image url (multiple files).
*/
function addPreviewImageFromUrl(url, isImages) {
var images = document.querySelector('.cubane-media-editor-preview-images');
var node = document.createElement('div');
if (isImages) {
node.classList.add('cubane-media-editor-preview-image');
node.style.backgroundImage = 'url(' + url + ')';
images.appendChild(node);
}
}
/*
* Create new preview image for the given url (single file).
*/
function createPreviewImageFromUrl(url, isImages) {
var panelFrame = document.querySelector('.cubane-media-editor-preview-panel-frame');
if (isImages) {
var node = document.createElement('img');
node.src = url;
node.alt = '';
} else {
var node = document.createElement('iframe');
node.src = cubane.urls.reverse('cubane.cms.documents.preview') + '?url=' + url;
}
panelFrame.appendChild(node);
}
/*
* Init
*/
if (document.querySelector('.cubane-file-upload')) {
setup();
}
}(this)); | PypiClean |
/OSAlgos-0.0.1.tar.gz/OSAlgos-0.0.1/OS_Project/FCFS.py | from prettytable import PrettyTable
def findWaitingTime(processes, n, bt, wt):
wt[0] = 0
for i in range(1, n):
wt[i] = bt[i - 1] + wt[i - 1]
def findTurnAroundTime(processes, n, bt, wt, tat):
for i in range(n):
tat[i] = bt[i] + wt[i]
def avgFCFS_Time(processes, n, bt):
wt = [0] * n
tat = [0] * n
total_wt = 0
total_tat = 0
findWaitingTime(processes, n, bt, wt)
findTurnAroundTime(processes, n, bt, wt, tat)
table = PrettyTable(
["Processes", "Burst time", "Waiting time", "Turn around time"])
for i in range(n):
total_wt = total_wt + wt[i]
total_tat = total_tat + tat[i]
table.add_row([str(i + 1), str(bt[i]), str(wt[i]), str(tat[i])])
print("\n----------------------------FCFS----------------------------")
print("")
print(table)
print("")
print("Average waiting time = " + str(total_wt / n))
print("Average turn around time = " + str(total_tat / n))
def avgFCFS_Time_waiting(processes, n, bt):
wt = [0] * n
tat = [0] * n
total_wt = 0
total_tat = 0
findWaitingTime(processes, n, bt, wt)
findTurnAroundTime(processes, n, bt, wt, tat)
for i in range(n):
total_wt = total_wt + wt[i]
total_tat = total_tat + tat[i]
return total_wt / n
def avgFCFS_Time_turnRound(processes, n, bt):
wt = [0] * n
tat = [0] * n
total_wt = 0
total_tat = 0
findWaitingTime(processes, n, bt, wt)
findTurnAroundTime(processes, n, bt, wt, tat)
for i in range(n):
total_wt = total_wt + wt[i]
total_tat = total_tat + tat[i]
return total_tat / n
def FCFS_Table(processes, n, bt):
wt = [0] * n
tat = [0] * n
total_wt = 0
total_tat = 0
findWaitingTime(processes, n, bt, wt)
findTurnAroundTime(processes, n, bt, wt, tat)
table = PrettyTable(
["Processes", "Burst time", "Waiting time", "Turn around time"])
for i in range(n):
total_wt = total_wt + wt[i]
total_tat = total_tat + tat[i]
table.add_row([str(i + 1), str(bt[i]), str(wt[i]), str(tat[i])])
print("\n----------------------------FCFS----------------------------")
print("")
print(table)
print("") | PypiClean |
/Flask-Authorize-0.2.6.tar.gz/Flask-Authorize-0.2.6/docs/overview.rst |
Overview
========
Flask-Authorize is a Flask extension designed to simplify the process of incorporating Access Control Lists (ACLs) and Role-Based Access Control (RBAC) into applications housing sensitive data, allowing developers to focus on the actual code for their application instead of logic for enforcing permissions. It uses a unix-like permissions scheme for enforcing access permissions on existing content, and also provides mechanisms for globally enforcing permissions throughout an application.
There are quite a few packages designed to simplify the process of adding ACLs and RBAC to a Flask application:
* `Flask-Principal <https://pythonhosted.org/Flask-Principal/>`_
* `Flask-ACL <https://mikeboers.github.io/Flask-ACL/>`_
* `Flask-RBAC <https://flask-rbac.readthedocs.io/en/latest/>`_
* `Flask-Security <https://pythonhosted.org/Flask-Security/>`_
And each provides a different developer experience and makes different assumptions in their design. This package is yet another take at solving the same problem, resulting in a slightly different development experience when working with Flask applications. The developers of this package recommend you check out these alternatives along with Flask-Authorize to see if they fit your needs better.
A Minimal Application
---------------------
Setting up the flask application with extensions:
.. code-block:: python
from flask import Flask
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_authorize import Authorize
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
login = LoginManager(app)
authorize = Authorize(app)
Defining database models:
.. code-block:: python
from flask_authorize import RestrictionsMixin, AllowancesMixin
from flask_authorize import PermissionsMixin
# mapping tables
UserGroup = db.Table(
'user_group', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('group_id', db.Integer, db.ForeignKey('groups.id'))
)
UserRole = db.Table(
'user_role', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('role_id', db.Integer, db.ForeignKey('roles.id'))
)
# models
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
# `roles` and `groups` are reserved words that *must* be defined
# on the `User` model to use group- or role-based authorization.
roles = db.relationship('Role', secondary=UserRole)
groups = db.relationship('Group', secondary=UserGroup)
class Group(db.Model, RestrictionsMixin):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
class Role(db.Model, AllowancesMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
class Article(db.Model, PermissionsMixin):
__tablename__ = 'articles'
__permissions__ = dict(
owner=['read', 'update', 'delete', 'revoke'],
group=['read', 'update'],
other=['read']
)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), index=True, nullable=False)
content = db.Column(db.Text)
Defining endpoint actions:
.. code-block:: python
from flask import jsonify
from werkzeug import NotFound, Unauthorized
@app.route('/articles', methods=['POST'])
@login.logged_in
@authorize.create(Article)
def article():
article = Article(
name=request.json.get('name'),
content=request.json.get('content'),
)
db.session.add(article)
db.session.commit()
return jsonify(msg='Created Article'), 200
@app.route('/articles/<int:ident>', methods=['GET', 'PUT', 'DELETE'])
@login.logged_in
def single_article(ident):
article = db.session.query(Article).filter_by(id=ident).first()
if not article:
raise NotFound
if request.method == 'GET':
# check if the current user is authorized to read the article
if not authorize.read(article):
raise Unauthorized
return jsonify(id=article.id, name=article.name), 200
elif request.method == 'PUT':
# check if the current user is authorized to update to the article
if not authorize.update(article):
raise Unauthorized
# update values
if 'name' in request.json:
article.name = request.json['name']
if 'content' in request.json:
article.content = request.json['content']
db.session.commit()
return jsonify(id=article.id, name=article.name), 200
elif request.method == 'DELETE':
# check if the current user is associated with the 'admin' role
if not authorize.delete(article) or \
not authorize.has_role('admin'):
raise Unauthorized
db.session.delete(article)
db.session.commit()
return
@app.route('/articles/<int:ident>/revoke', methods=['POST'])
@login.logged_in
def revoke_article(ident):
article = db.session.query(Article).filter_by(id=ident).first()
if not article:
raise NotFound
# check if the current user can revoke the article
if not authorize.revoke(article):
raise Unauthorized
article.revoked = True
db.session.commit()
return
Additionally, if you've configured your application to dispatch request processing to API functions, you can use the ``authorize`` extension object as a decorator:
.. code-block:: python
@authorize.create(Article)
def create_article(name):
article = Article(name=name)
db.session.add(article)
db.session.commit()
return article
@authorize.read
def read_article(article):
return article
@authorize.update
def update_article(article, **kwargs):
for key, value in request.json.items():
setattr(article, key, value)
db.session.commit()
return article
@authorize.delete
def delete_article(article):
db.session.delete(article)
return
@authorize.revoke
def revoke_article(article):
article.revoke = True
db.session.commit()
return
@authorize.has_role('admin')
def get_admin_articles():
pass
Using the extension as a decorator goes a long way in removing boilerplate associated with permissions checking. Additionally, using the ``authorize`` extension object as a decorator will implicitly check the current user's access to each argument or keyword argument to the function. For example, if your method takes two ``Article`` objects and merges them into one, you can add permissions for both operations like so:
.. code-block:: python
@authorize.read
@authorize.create(Article)
def merge_articles(article1, article2):
new_article = Article(name=article1.name + article.2.name)
db.session.add(new_article)
db.session.delete(article1, article2)
db.session.commit()
return new_article
This function will ensure that the current user has read access to both articles and also create permissions on the **Article** model itself. If the authorization criteria aren't satisfied, an ``Unauthorized`` error will be thrown.
Finally, the ``authorize`` operator is also available in Jinja templates:
.. code-block:: html
<!-- button for creating new article -->
{% if authorize.create('articles') %}
<button>Create Article</button>
{% endif %}
<!-- display article feed -->
{% for article in articles %}
<!-- show article if user has read access -->
{% if authorize.read(article) %}
<h1>{{ article.name }}</h1>
<!-- add edit button for users who can update-->
{% if authorize.update(article) %}
<button>Update Article</button>
{% endif %}
<!-- add delete button for administrators -->
{% if authorize.in_group('admins') %}
<button>Delete Article</button>
{% endif %}
{% endif %}
{% endfor %}
Usage without Flask-Login
-------------------------
By default, this module uses the Flask-Login extension for determining the current user. If you aren't using that module, you simply need to provide a function to the plugin that will return the current user:
.. code-block:: python
from flask import Flask, g
from flask_authorize import Authorize
def my_current_user():
"""
Return current user to check authorization against.
"""
return g.user
# using the declarative method for setting up the extension
app = Flask(__name__)
authorize = Authorize(current_user=my_current_user)
authorize.init_app(app)
For more in-depth discussion on design considerations and how to fully utilize the plugin, see the `User Guide <./usage.html>`_.
| PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/theme-eclipse.js | ace.define("ace/theme/eclipse",["require","exports","module","ace/lib/dom"], function(require, exports, module) {
"use strict";
exports.isDark = false;
exports.cssText = ".ace-eclipse .ace_gutter {\
background: #ebebeb;\
border-right: 1px solid rgb(159, 159, 159);\
color: rgb(136, 136, 136);\
}\
.ace-eclipse .ace_print-margin {\
width: 1px;\
background: #ebebeb;\
}\
.ace-eclipse {\
background-color: #FFFFFF;\
color: black;\
}\
.ace-eclipse .ace_fold {\
background-color: rgb(60, 76, 114);\
}\
.ace-eclipse .ace_cursor {\
color: black;\
}\
.ace-eclipse .ace_storage,\
.ace-eclipse .ace_keyword,\
.ace-eclipse .ace_variable {\
color: rgb(127, 0, 85);\
}\
.ace-eclipse .ace_constant.ace_buildin {\
color: rgb(88, 72, 246);\
}\
.ace-eclipse .ace_constant.ace_library {\
color: rgb(6, 150, 14);\
}\
.ace-eclipse .ace_function {\
color: rgb(60, 76, 114);\
}\
.ace-eclipse .ace_string {\
color: rgb(42, 0, 255);\
}\
.ace-eclipse .ace_comment {\
color: rgb(113, 150, 130);\
}\
.ace-eclipse .ace_comment.ace_doc {\
color: rgb(63, 95, 191);\
}\
.ace-eclipse .ace_comment.ace_doc.ace_tag {\
color: rgb(127, 159, 191);\
}\
.ace-eclipse .ace_constant.ace_numeric {\
color: darkblue;\
}\
.ace-eclipse .ace_tag {\
color: rgb(25, 118, 116);\
}\
.ace-eclipse .ace_type {\
color: rgb(127, 0, 127);\
}\
.ace-eclipse .ace_xml-pe {\
color: rgb(104, 104, 91);\
}\
.ace-eclipse .ace_marker-layer .ace_selection {\
background: rgb(181, 213, 255);\
}\
.ace-eclipse .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid rgb(192, 192, 192);\
}\
.ace-eclipse .ace_meta.ace_tag {\
color:rgb(25, 118, 116);\
}\
.ace-eclipse .ace_invisible {\
color: #ddd;\
}\
.ace-eclipse .ace_entity.ace_other.ace_attribute-name {\
color:rgb(127, 0, 127);\
}\
.ace-eclipse .ace_marker-layer .ace_step {\
background: rgb(255, 255, 0);\
}\
.ace-eclipse .ace_active-line {\
background: rgb(232, 242, 254);\
}\
.ace-eclipse .ace_gutter-active-line {\
background-color : #DADADA;\
}\
.ace-eclipse .ace_marker-layer .ace_selected-word {\
border: 1px solid rgb(181, 213, 255);\
}\
.ace-eclipse .ace_indent-guide {\
background: url(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAE0lEQVQImWP4////f4bLly//BwAmVgd1/w11/gAAAABJRU5ErkJggg==\") right repeat-y;\
}";
exports.cssClass = "ace-eclipse";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | PypiClean |
/FastGets-0.3.5.tar.gz/FastGets-0.3.5/fastgets/web/static/dist/plugins/code/plugin.js | (function () {
var defs = {}; // id -> {dependencies, definition, instance (possibly undefined)}
// Used when there is no 'main' module.
// The name is probably (hopefully) unique so minification removes for releases.
var register_3795 = function (id) {
var module = dem(id);
var fragments = id.split('.');
var target = Function('return this;')();
for (var i = 0; i < fragments.length - 1; ++i) {
if (target[fragments[i]] === undefined) { target[fragments[i]] = {}; }
target = target[fragments[i]];
}
target[fragments[fragments.length - 1]] = module;
};
var instantiate = function (id) {
var actual = defs[id];
var dependencies = actual.deps;
var definition = actual.defn;
var len = dependencies.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(dependencies[i]); }
var defResult = definition.apply(null, instances);
if (defResult === undefined) { throw 'module [' + id + '] returned undefined'; }
actual.instance = defResult;
};
var def = function (id, dependencies, definition) {
if (typeof id !== 'string') { throw 'module id must be a string'; } else if (dependencies === undefined) { throw 'no dependencies for ' + id; } else if (definition === undefined) { throw 'no definition function for ' + id; }
defs[id] = {
deps: dependencies,
defn: definition,
instance: undefined
};
};
var dem = function (id) {
var actual = defs[id];
if (actual === undefined) { throw 'module [' + id + '] was undefined'; } else if (actual.instance === undefined) { instantiate(id); }
return actual.instance;
};
var req = function (ids, callback) {
var len = ids.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(ids[i]); }
callback.apply(null, instances);
};
var ephox = {};
ephox.bolt = {
module: {
api: {
define: def,
require: req,
demand: dem
}
}
};
var define = def;
var require = req;
var demand = dem;
// this helps with minification when using a lot of global references
var defineGlobal = function (id, ref) {
define(id, [], function () { return ref; });
};
/* jsc
["tinymce.plugins.code.Plugin","tinymce.core.PluginManager","tinymce.plugins.code.api.Commands","tinymce.plugins.code.ui.Buttons","global!tinymce.util.Tools.resolve","tinymce.plugins.code.ui.Dialog","tinymce.plugins.code.api.Settings","tinymce.plugins.code.core.Content","tinymce.core.dom.DOMUtils"]
jsc */
defineGlobal('global!tinymce.util.Tools.resolve', tinymce.util.Tools.resolve);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.PluginManager',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.PluginManager');
}
);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.dom.DOMUtils',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.dom.DOMUtils');
}
);
/**
* Settings.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.api.Settings',
[
'tinymce.core.dom.DOMUtils'
],
function (DOMUtils) {
var getMinWidth = function (editor) {
return editor.getParam('code_dialog_width', 600);
};
var getMinHeight = function (editor) {
return editor.getParam('code_dialog_height', Math.min(DOMUtils.DOM.getViewPort().h - 200, 500));
};
return {
getMinWidth: getMinWidth,
getMinHeight: getMinHeight
};
}
);
/**
* Content.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.core.Content',
[
],
function () {
var setContent = function (editor, html) {
// We get a lovely "Wrong document" error in IE 11 if we
// don't move the focus to the editor before creating an undo
// transation since it tries to make a bookmark for the current selection
editor.focus();
editor.undoManager.transact(function () {
editor.setContent(html);
});
editor.selection.setCursorLocation();
editor.nodeChanged();
};
var getContent = function (editor) {
return editor.getContent({ source_view: true });
};
return {
setContent: setContent,
getContent: getContent
};
}
);
/**
* Dialog.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.ui.Dialog',
[
'tinymce.plugins.code.api.Settings',
'tinymce.plugins.code.core.Content'
],
function (Settings, Content) {
var open = function (editor) {
var minWidth = Settings.getMinWidth(editor);
var minHeight = Settings.getMinHeight(editor);
var win = editor.windowManager.open({
title: 'Source code',
body: {
type: 'textbox',
name: 'code',
multiline: true,
minWidth: minWidth,
minHeight: minHeight,
spellcheck: false,
style: 'direction: ltr; text-align: left'
},
onSubmit: function (e) {
Content.setContent(editor, e.data.code);
}
});
// Gecko has a major performance issue with textarea
// contents so we need to set it when all reflows are done
win.find('#code').value(Content.getContent(editor));
};
return {
open: open
};
}
);
/**
* Commands.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.api.Commands',
[
'tinymce.plugins.code.ui.Dialog'
],
function (Dialog) {
var register = function (editor) {
editor.addCommand('mceCodeEditor', function () {
Dialog.open(editor);
});
};
return {
register: register
};
}
);
/**
* Buttons.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.ui.Buttons',
[
'tinymce.plugins.code.ui.Dialog'
],
function (Dialog) {
var register = function (editor) {
editor.addButton('code', {
icon: 'code',
tooltip: 'Source code',
onclick: function () {
Dialog.open(editor);
}
});
editor.addMenuItem('code', {
icon: 'code',
text: 'Source code',
onclick: function () {
Dialog.open(editor);
}
});
};
return {
register: register
};
}
);
/**
* Plugin.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.code.Plugin',
[
'tinymce.core.PluginManager',
'tinymce.plugins.code.api.Commands',
'tinymce.plugins.code.ui.Buttons'
],
function (PluginManager, Commands, Buttons) {
PluginManager.add('code', function (editor) {
Commands.register(editor);
Buttons.register(editor);
return {};
});
return function () { };
}
);
dem('tinymce.plugins.code.Plugin')();
})(); | PypiClean |
/NESP-Lib-1.0.2.tar.gz/NESP-Lib-1.0.2/README.md | # NESP-Lib – New Era Syringe Pump Library for Python
This project aims to offer a clean high-level interface to the New Era syringe pumps by New Era Pump
Systems.
These pumps are also distributed under different names, like Aladdin by World Precision Instruments
(WPI) and LA by Landgraf Laborsysteme.
## Features
- Object-oriented design
- Documented public elements via type hints and docstrings
- Signaling errors via exceptions
- Blocking and non-blocking running
- Sending heartbeat messages automatically
## Installing
```
pip install NESP-Lib
```
## Importing
``` python
import nesp_lib
```
## Examples
### Configuring
``` python
from nesp_lib import Port, Pump, PumpingDirection
# Constructs the port to which the pump is connected.
port = Port('COM1')
# Constructs the pump connected to the port.
pump = Pump(port)
# Sets the syringe diameter of the pump in units of millimeters.
pump.syringe_diameter = 30.0
# Sets the pumping direction of the pump.
pump.pumping_direction = PumpingDirection.INFUSE
# Sets the pumping volume of the pump in units of milliliters.
pump.pumping_volume = 1.0
# Sets the pumping rate of the pump in units of milliliters per minute.
pump.pumping_rate = 20.0
```
### Identifying
``` python
# Prints the model number of the pump (e.g. "1000" for NE-1000).
print(pump.model_number)
# Prints the firmware version of the pump (e.g. "(3, 928)" for 3.928).
print(pump.firmware_version)
```
### Running (Blocking)
Blocking running waits while the pump is running.
``` python
# Runs the pump considering the direction, volume, and rate set.
pump.run()
```
### Running (Non-blocking)
Non-blocking running returns immediately after starting the running.
``` python
# Starts running the pump considering the direction, volume, and rate set.
pump.run(False)
# Waits while the pump is running.
pump.wait_while_running()
# Starts running the pump considering the direction, volume, and rate set.
pump.run(False)
# Waits while the pump is running.
while pump.running :
...
# Starts running the pump considering the direction, volume, and rate set.
pump.run(False)
...
# Stops the pump.
pump.stop()
``` | PypiClean |
/ODMExifRead-3.0.4.tar.gz/ODMExifRead-3.0.4/exifread/exif_log.py | import sys
import logging
TEXT_NORMAL = 0
TEXT_BOLD = 1
TEXT_RED = 31
TEXT_GREEN = 32
TEXT_YELLOW = 33
TEXT_BLUE = 34
TEXT_MAGENTA = 35
TEXT_CYAN = 36
def get_logger():
"""Use this from all files needing to log."""
return logging.getLogger("exifread")
def setup_logger(debug, color):
"""Configure the logger."""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger = logging.getLogger("exifread")
stream = Handler(log_level, debug, color)
logger.addHandler(stream)
logger.setLevel(log_level)
class Formatter(logging.Formatter):
"""
Custom formatter, we like colors!
"""
def __init__(self, debug=False, color=False):
self.color = color
self.debug = debug
if self.debug:
log_format = "%(levelname)-6s %(message)s"
else:
log_format = "%(message)s"
logging.Formatter.__init__(self, log_format)
def format(self, record):
if self.debug and self.color:
if record.levelno >= logging.CRITICAL:
color = TEXT_RED
elif record.levelno >= logging.ERROR:
color = TEXT_RED
elif record.levelno >= logging.WARNING:
color = TEXT_YELLOW
elif record.levelno >= logging.INFO:
color = TEXT_GREEN
elif record.levelno >= logging.DEBUG:
color = TEXT_CYAN
else:
color = TEXT_NORMAL
record.levelname = "\x1b[%sm%s\x1b[%sm" % (color, record.levelname, TEXT_NORMAL)
return logging.Formatter.format(self, record)
class Handler(logging.StreamHandler):
def __init__(self, log_level, debug=False, color=False):
self.color = color
self.debug = debug
logging.StreamHandler.__init__(self, sys.stdout)
self.setFormatter(Formatter(debug, color))
self.setLevel(log_level)
# def emit(self, record):
# record.msg = "\x1b[%sm%s\x1b[%sm" % (TEXT_BOLD, record.msg, TEXT_NORMAL)
# logging.StreamHandler.emit(self, record) | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/meta/asttools/mutators/remove_trivial.py | from __future__ import print_function
import _ast
import ast
from ...asttools.visitors.graph_visitor import GraphGen
from ...asttools import Visitor, dont_visit, visit_children
from ...asttools.mutators.replace_mutator import replace_nodes
from ...asttools.visitors.symbol_visitor import get_symbols
from ...asttools.visitors.cond_symbol_visitor import conditional_lhs
class Assignment(object):
def __init__(self, root, assignments):
self.root = root
self.assignments = assignments
def visit_conditional(self, node):
conditional, stable = conditional_lhs(node)
if not stable:
return
bgather = GatherAssignments()
for stmnt in node.body: bgather.visit(stmnt)
egather = GatherAssignments()
for stmnt in node.orelse: egather.visit(stmnt)
for symbol in stable:
node_list = self.assign_id_map.setdefault(symbol, [])
assignments = []
for asgn_list in bgather.assign_id_map[symbol]:
assignments.extend(asgn_list.assignments)
for asgn_list in egather.assign_id_map[symbol]:
assignments.extend(asgn_list.assignments)
node_list.append(Assignment(root=node, assignments=assignments))
class GatherAssignments(Visitor):
'''
Collect ast nodes that assign to the same variable.
'''
def __init__(self):
self.assign_id_map = {}
visitTryExcept = dont_visit
visitDefault = visit_children
visitIf = visit_conditional
visitFor = visit_conditional
visitWhile = visit_conditional
def visitAssign(self, node):
target_ids = [get_symbols(target, ast.Store) for target in node.targets]
target_ids = set.union(*target_ids)
for id in target_ids:
node_list = self.assign_id_map.setdefault(id, [])
node_list.append(Assignment(root=node, assignments=(node,)))
def remove_trivial(root):
'''
Remove redundant statements.
The statement `a = 1` will be removed::
a = 1
a = 2
The statement `a = 1` will not be removed because `b` depends on it::
a = 1
b = a + 2
a = 2
:param root: ast node
'''
gen = GatherAssignments()
gen.visit(root)
to_remove = []
for symbol, assignments in gen.assign_id_map.items():
if len(assignments) < 2:
continue
for j in range(len(assignments) - 1):
i1 = root.body.index(assignments[j].root)
i2 = root.body.index(assignments[j + 1].root)
body = root.body[i1 + 1:i2]
grapher = GraphGen()
for stmnt in body:
grapher.visit(stmnt)
if symbol not in grapher.used:
to_remove.extend(assignments[j].assignments)
Pass = lambda node: _ast.Pass(lineno=node.lineno, col_offset=node.col_offset)
for old in to_remove:
replace_nodes(root, old, Pass(old))
def remove_unused_assign(root, symbol):
'''
Remove redundant statements.
The statement `a = 1` will be removed::
a = 1
a = 2
The statement `a = 1` will not be removed because `b` depends on it::
a = 1
b = a + 2
a = 2
:param root: ast node
'''
gen = GatherAssignments()
gen.visit(root)
to_remove = []
if symbol not in gen.assign_id_map:
return
assignments = gen.assign_id_map[symbol]
if len(assignments) < 2:
return
for j in range(len(assignments) - 1):
i1 = root.body.index(assignments[j].root)
i2 = root.body.index(assignments[j + 1].root)
body = root.body[i1 + 1:i2]
grapher = GraphGen()
for stmnt in body:
grapher.visit(stmnt)
if symbol not in grapher.used:
to_remove.extend(assignments[j].assignments)
Pass = lambda node: _ast.Pass(lineno=node.lineno, col_offset=node.col_offset)
for old in to_remove:
replace_nodes(root, old, Pass(old)) | PypiClean |
/MicroTokenizer-0.21.2.tar.gz/MicroTokenizer-0.21.2/docs/installation.rst | .. highlight:: shell
============
Installation
============
Stable release
--------------
To install Micro Tokenizer for Chinese, run this command in your terminal:
.. code-block:: console
$ pip install MicroTokenizer
This is the preferred method to install Micro Tokenizer for Chinese, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for Micro Tokenizer for Chinese can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/howl-anderson/MicroTokenizer
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/howl-anderson/MicroTokenizer/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/howl-anderson/MicroTokenizer
.. _tarball: https://github.com/howl-anderson/MicroTokenizer/tarball/master
| PypiClean |
/Ibidas-0.1.26.tar.gz/Ibidas-0.1.26/ibidas/command_parser.py | from utils import util, context, infix
from parser_objs import *
import inspect
import representor
allowed_clss = (representor.Representor, context.Context, infix.Infix, int, long, float, str)
class CommandLineScanner(GenericScanner):#{{{
def tokenize(self, input):
self.rv = []
GenericScanner.tokenize(self,input)
return self.rv
def t_anumber_0(self,s):
r" \d*\.\d+([eE][\+\-]\d+)?[jJ]? | \d+\.([eE][\+\-]\d+)?[jJ]? | \d+[eE][\+\-]\d+[jJ]? "
t = Token(type="float",attr=s)
self.rv.append(t)
def t_anumber_1(self,s):
"""
0[bB][01]+[lL]? | 0[xX][0-9a-fA-F]+[lL]? | 0[oO][0-7]*[lL]? | \d+[lL]?
"""
t = Token(type="integer",attr=s)
self.rv.append(t)
def t_bkeywords(self, s):
r' and[^a-zA-Z\d]+ | as[^a-zA-Z\d]+ | assert[^a-zA-Z\d]+ | break[^a-zA-Z\d]+ | class[^a-zA-Z\d]+ | continue[^a-zA-Z\d]+ | def[^a-zA-Z\d]+ | del[^a-zA-Z\d]+ | elif[^a-zA-Z\d]+ | else[^a-zA-Z\d]+ | except[^a-zA-Z\d]+ | exec[^a-zA-Z\d]+ | finally[^a-zA-Z\d]+ | for[^a-zA-Z\d]+ | from[^a-zA-Z\d]+ | global[^a-zA-Z\d]+ | if[^a-zA-Z\d]+ | import[^a-zA-Z\d]+ | in[^a-zA-Z\d]+ | is[^a-zA-Z\d]+ | lambda[^a-zA-Z\d]+ | not[^a-zA-Z\d]+ | or[^a-zA-Z\d]+ | pass[^a-zA-Z\d]+ | print[^a-zA-Z\d]+ | raise[^a-zA-Z\d]+ | return[^a-zA-Z\d]+ | try[^a-zA-Z\d]+ | while[^a-zA-Z\d]+ | with[^a-zA-Z\d]+ | yield[^a-zA-Z\d]+ '
t = Token(type=s)
self.rv.append(t)
def t_cidentifier(self,s):
r' [a-zA-Z_][a-zA-Z_\d]* '
t = Token(type='identifier',attr=s)
self.rv.append(t)
def t_dsymbol_0(self,s):
r' \"\"\" | \+\= | \-\= | \*\= | \/\= | \/\/\= | \%\= | \&\= | \|\= | \^\= | \>\>\= | \<\<\= | \*\*\= '
t = Token(type=s)
self.rv.append(t)
def t_esymbol_1(self, s):
r' \+ | \- | \*\* | \* | \/\/ | \/ | \% | \<\< | \>\> | \& | \| | \^ | \~ | \< | \> | \<\= | \>\= | \=\= | \!\= | \<\> '
t = Token(type=s)
self.rv.append(t)
def t_fsymbol_2(self,s):
r' \( | \) | \[ | \] | \{ | \} | \@ | \, | \: | \. | \` | \= | \; | \" | \''
t = Token(type=s)
self.rv.append(t)#}}}
def t_gwhitespace(self,s):
r' [\t\s\n]+ '
t = Token(type="whitespace",attr=s)
self.rv.append(t)
pass
def merge_tokens(tokens):
res = []
for token in tokens:
if token.attr is None:
res.append(str(token.type))
else:
res.append(str(token.attr))
return "".join(res)
def process_tokens(tokens):
if not tokens:
return tokens
pos = 0
while pos < len(tokens):
token = tokens[pos]
if pos < len(tokens) - 1:
nexttoken = tokens[pos + 1]
else:
nexttoken = None
if token == '"' or token.type == '"""' or token.type == "'":
try:
endpos = tokens.index(token.type,pos+1)
ntoken = Token(type="string", attr=merge_tokens(tokens[(pos + 1):endpos]))
tokens = tokens[:pos] + [ntoken] + tokens[(endpos + 1):]
except ValueError:
tokens[pos] = Token(type="incomplete_string", attr=merge_tokens(tokens[(pos + 1):]))
tokens = tokens[:(pos + 1)]
elif token == "whitespace":
del tokens[pos]
pos -= 1
pos = pos + 1
return tokens
class CommandLineParser(GenericParser):#{{{
def __init__(self, start="simple_stmt"):
GenericParser.__init__(self, start)
def p_atom_0(self,args):
"""
atom ::= identifier
atom ::= literal
atom ::= enclosure
"""
return AST(type="atom",kids=args[:1])
def p_enclosure_0(self,args):
"""
enclosure ::= parenth_form
enclosure ::= list_display
enclosure ::= generator_expression
enclosure ::= dict_display
enclosure ::= set_display
enclosure ::= string_conversion
enclosure ::= yield_atom
"""
return AST(type="enclosure",kids=args[:1])
def p_literal_0(self,args):
"""
literal ::= integer
literal ::= string
literal ::= incomplete_string
literal ::= float
"""
return AST(type="literal",kids=args[:1])
def p_parenth_form_0(self,args):
"""
parenth_form ::= ( expression_list )
parenth_form ::= ( )
"""
return AST(type="parenth_form",kids=args[1:-1])
def p_list_display_0(self,args):
"""
list_display ::= [ ]
list_display ::= [ expression_list ]
list_display ::= [ list_comprehension ]
"""
return AST(type="list_display",kids=args[1:-1])
def p_list_comprehension(self, args):
r" list_comprehension ::= expression list_for "
return AST(type="list_comprehension",kids=args)
def p_list_for(self, args):
"""
list_for ::= for target_list in old_expression_list
list_for ::= for target_list in old_expression_list list_iter
"""
return AST(type="list_for",kids=args[1:2] + args[3:])
def p_old_expression_list(self,args):
"""
old_expression_list ::= old_expression
old_expression_list ::= old_expression_list , old_expression
"""
return AST(type="old_expression_list",kids=args[:1] + args[2:])
def p_old_expression(self,args):
"""
old_expression ::= or_test
old_expression ::= old_lambda_form
"""
return AST(type="old_expression",kids=args[:1])
def p_list_iter(self,args):
"""
list_iter ::= list_for
list_iter ::= list_if
"""
return AST(type="list_iter",kids=args[:1])
def p_list_if(self,args):
"""
list_if ::= if old_expression
list_if ::= if old_expression list_iter
"""
return AST(type="list_if",kids=args[1:])
def p_comprehension(self,args):
r" comprehension ::= expression comp_for "
return AST(type="comprehension",kids=args)
def p_comp_for(self,args):
"""
comp_for ::= for target_list in or_test
comp_for ::= for target_list in or_test comp_iter
"""
return AST(type="comp_for",kids=args[1:2] + args[3:])
def p_comp_iter(self, args):
"""
comp_iter ::= comp_for
comp_iter ::= comp_if
"""
return AST(type="comp_iter",kids=args[:1])
def p_comp_if(self,args):
"""
comp_if ::= if expression_nocond
comp_if ::= if expression_nocond comp_iter
"""
return AST(type="comp_if",kids=args[1:])
def p_generator_expression(self,args):
r" generator_expression ::= ( expression comp_for ) "
return AST(type="generator_expression",kids=args[1:3])
def p_string_conversion_0(self,args):
"""
string_conversion ::= ` expression_list `
"""
return AST(type="string_conversion",kids=args[1:-1])
def p_primary(self,args):
"""
primary ::= atom
primary ::= attributeref
primary ::= subscription
primary ::= slicing
primary ::= partial_slicing
primary ::= call
"""
return AST(type="primary",kids=args[:1])
def p_attributeref(self,args):
"""
primary ::= primary . identifier
primary ::= primary .
"""
return AST(type="attributeref",kids=args[:1] + args[2:])
def p_subscription(self,args):
"""
subscription ::= primary [ expression_list ]
"""
return AST(type="subscription",kids=args[:1] + args[2:3])
def p_partial_slicing(self,args):
"""
partial_slicing ::= primary [ slice_list
"""
return AST(type="partial_slicing",kids=args[:1] + args[2:3])
def p_slicing(self, args):
"""
slicing ::= simple_slicing
slicing ::= extended_slicing
"""
return AST(type="slicing",kids=args[:1])
def p_simple_slicing(self,args):
"""
simple_slicing ::= primary [ short_slice ]
"""
return AST(type="simple_slicing",kids=args[:1] + args[2:3])
def p_extended_slicing(self, args):
"""
extended_slicing ::= primary [ slice_list ]
"""
return AST(type="extended_slicing",kids=args[:1] + args[2:3])
def p_slice_list(self, args):
"""
slice_list ::= slice_item
slice_list ::= slice_list , slice_item
"""
return AST(type="slice_list",kids=args[:1] + args[2:])
def p_slice_item(self, args):
"""
slice_item ::= expression
slice_item ::= proper_slice
slice_item ::= ellipsis
"""
return AST(type="slice_item",kids=args[:1])
def p_proper_slice(self, args):
"""
proper_slice ::= short_slice
proper_slice ::= long_slice
"""
return AST(type="proper_slice",kids=args[:1])
def p_short_slice(self, args):
"""
short_slice ::= :
short_slice ::= expression :
short_slice ::= : expression
short_slice ::= expression : expression
"""
none = Token(type="object",attr=None)
if(len(args) == 1):
kids = [none, none]
elif(args[0] == "expression"):
if len(args) > 2:
kids = [args[0], args[2]]
else:
kids = [args[0], none]
else:
kids = [none, args[1]]
return AST(type="short_slice",kids=kids)
def p_long_slice(self, args):
"""
long_slice ::= short_slice :
long_slice ::= short_slice : expression
"""
return AST(type="extended_slicing",kids=args[:1] + args[2:])
def p_ellipsis(self, args):
"""
ellipsis ::= . . .
"""
return Token(type ="elllipsis")
def p_call(self,args):
"""
call ::= primary ( argument_list )
"""
#FIXME
return AST(type="call",kids=args[:1] + args[2:3])
def p_argument_list(self, args):
"""
argument_list ::= positional_arguments
argument_list ::= positional_arguments , keyword_arguments
argument_list ::= keyword_arguments
"""
#FIXME
return AST(type="argument_list",kids=args[:1] + args[2:])
def p_positional_arguments(self, args):
"""
positional_arguments ::= expression
positional_arguments ::= positional_arguments , expression
"""
return AST(type="positional_arguments",kids=args[:1] + args[2:])
def p_keyword_arguments(self, args):
"""
keyword_arguments ::= keyword_item
keyword_arguments ::= keyword_arguments , keyword_item
"""
return AST(type="keyword_arguments",kids=args[:1] + args[2:])
def p_keyword_item(self, args):
r" keyword_item ::= identifier = expression "
return AST(type="keyword_item",kids=args[:1] + args[2:])
def p_conditional_expression(self, args):
"""
conditional_expression ::= or_test if or_test else expression
"""
if(len(args) > 1):
return AST(type="conditional_expression",kids=args[:1] + args[3:4] + args[5:])
else:
return AST(type="conditional_expression",kids=args[:1])
def p_expression(self,args):
"""
expression ::= conditional_expression
expression ::= lambda_form
"""
return AST(type="expression",kids=args[:1])
def p_power(self,args):
"""
power ::= primary
power ::= primary ** u_expr
"""
return AST(type="power",kids=args[:1] + args[2:])
def p_u_expr(self, args):
"""
u_expr ::= power
u_expr ::= - u_expr
u_expr ::= + u_expr
u_expr ::= ~ u_expr
"""
return AST(type="u_expr",kids=args)
def p_m_expr(self, args):
"""
m_expr ::= u_expr
m_expr ::= m_expr * u_expr
m_expr ::= m_expr // u_expr
m_expr ::= m_expr / u_expr
m_expr ::= m_expr % u_expr
"""
return AST(type="m_expr",kids=args)
def p_a_expr(self, args):
"""
a_expr ::= m_expr
a_expr ::= a_expr + m_expr
a_expr ::= a_expr - m_expr
"""
return AST(type="a_expr",kids=args)
def p_shift_expr(self, args):
"""
shift_expr ::= a_expr
shift_expr ::= shift_expr << a_expr
shift_expr ::= shift_expr >> a_expr
"""
return AST(type="shift_expr",kids=args)
def p_and_expr(self, args):
"""
and_expr ::= shift_expr
and_expr ::= and_expr & shift_expr
"""
return AST(type="and_expr",kids=args)
def p_xor_expr(self, args):
"""
xor_expr ::= and_expr
xor_expr ::= xor_expr & and_expr
"""
return AST(type="xor_expr",kids=args)
def p_or_expr(self, args):
"""
or_expr ::= xor_expr
or_expr ::= or_expr | xor_expr
"""
return AST(type="or_expr",kids=args)
def p_comparision(self, args):
"""
comparison ::= or_expr
comparison ::= comparison < or_expr
comparison ::= comparison > or_expr
comparison ::= comparison == or_expr
comparison ::= comparison >= or_expr
comparison ::= comparison <= or_expr
comparison ::= comparison <> or_expr
comparison ::= comparison != or_expr
comparison ::= comparison is or_expr
comparison ::= comparison is not or_expr
comparison ::= comparison in or_expr
comparison ::= comparison not in or_expr
"""
return AST(type="comparison",kids=args)
def p_not_test(self, args):
"""
not_test ::= comparison
not_test ::= not not_test
"""
return AST(type="not_test",kids=args[:1] + args[2:])
def p_and_test(self, args):
"""
and_test ::= not_test
and_test ::= and_test and not_test
"""
return AST(type="and_test",kids=args[:1] + args[2:])
def p_or_test(self, args):
"""
or_test ::= and_test
or_test ::= or_test or and_test
"""
return AST(type="or_test",kids=args[:1] + args[2:])
def p_conditional_expression(self, args):
"""
conditional_expression ::= or_test
conditional_expression ::= or_test if or_test else expression
"""
return AST(type="conditional_expression",kids=args[:1] + args[2:3] + args[4:])
def p_expression(self, args):
"""
expression ::= conditional_expression
expression ::= lambda_form
"""
return AST(type="expression",kids=args[:1])
def p_lambda_form(self,args):
"""
lambda_form ::= lambda : expression
lambda_form ::= lambda parameter_list : expression
"""
if len (args) == 3:
return AST(type="lambda_form",kids=args[2:])
else:
return AST(type="lambda_form",kids=args[1:2] + args[3:])
def p_old_lambda_form(self,args):
"""
old_lambda_form ::= lambda : old_expression
old_lambda_form ::= lambda parameter_list : old_expression
"""
if len (args) == 3:
return AST(type="old_lambda_form",kids=args[2:])
else:
return AST(type="old_lambda_form",kids=args[1:2] + args[3:])
def p_expression_list(self, args):
"""
expression_list ::= expression
expression_list ::= expression_list , expression
"""
return AST(type="expression_list",kids=args[:1] + args[2:])
def p_simple_stmt(self, args):
"""
simple_stmt ::= expression_stmt
simple_stmt ::= assert_stmt
simple_stmt ::= assignment_stmt
simple_stmt ::= augmented_assignment_stmt
simple_stmt ::= pass_stmt
simple_stmt ::= del_stmt
simple_stmt ::= print_stmt
simple_stmt ::= return_stmt
simple_stmt ::= yield_stmt
simple_stmt ::= raise_stmt
simple_stmt ::= break_stmt
simple_stmt ::= continue_stmt
simple_stmt ::= import_stmt
simple_stmt ::= global_stmt
simple_stmt ::= exec_stmt
"""
return AST(type="simple_stmt",kids=args[:1])
def p_expression_stmt(self, args):
"""
expression_stmt ::= expression_list
"""
return AST(type="expression_stmt", kids=args[:1])
def p_assignemnt_stmt(self, args):
"""
assignment_stmt ::= target_list = expression_list
assignment_stmt ::= target_list = yield_expression
"""
#FIXME
return AST(type="assignment_stmt", kids=args[:1] + args[2:])
def p_target_list(self, args):
"""
target_list ::= target
target_list ::= target_list , target
"""
return AST(type="target_list", kids=args[:1] + args[2:])
def p_target(self, args):
"""
target ::= identifier
target ::= ( target_list )
target ::= [ target_list ]
target ::= attributeref
target ::= subscription
target ::= slicing
"""
if(len(args) == 1):
kids = args
else:
kids = args[1:2]
return AST(type="target", kids=kids)
#}}}
class CommandLineASTRewriterPass1(GenericASTRewriter):#{{{
def process(self, tree):
ntree = self.postorder(tree)
return ntree
def n_slice_list(self, node):
if(node.kids[0] == "slice_list"):
node.kids = node.kids[0].kids + node.kids[1:]
return node
def n_positional_arguments(self, node):
if(node.kids[0] == "positional_arguments"):
node.kids = node.kids[0].kids + node.kids[1:]
return node
def n_keyword_arguments(self, node):
if(node.kids[0] == "keyword_arguments"):
node.kids = node.kids[0].kids + node.kids[1:]
return node
def n_target_list(self, node):
if(node.kids[0] == "target_list"):
node.kids = node.kids[0].kids + node.kids[1:]
return node
def n_expression_list(self, node):
if(node.kids[0] == "expression_list"):
node.kids = node.kids[0].kids + node.kids[1:]
return node
class CommandLineASTRewriterPass2(GenericASTRewriter):#{{{
def process(self, tree, ipshell):
self.ipshell = ipshell
ntree = self.postorder(tree)
return ntree
def objectify(self, node, pos, context=None):
if node.kids[pos] == "identifier":
if context is not None and node.kids[pos].attr == "_":
node.kids[pos] = Token(type="object", attr= context)
else:
node.kids[pos] = Token(type="object", attr= get_from_namespace(node.kids[pos].attr, self.ipshell))
return node
def n_attributeref(self, node, context=None):
node = self.objectify(node,0, context)
if node.kids[0] != "object":
return node
obj = node.kids[0].attr
if(len(node.kids) == 1):
node.kids.append(Token(type="identifier",attr=""))
elif node.kids[1] == "identifier" and hasattr(obj, node.kids[1].attr):
res = getattr(obj, node.kids[1].attr)
return Token(type="object",attr=res)
return node
def n_slicing(self, node, context=None):
node.kids = node.kids[0].kids
node = self.objectify(node,0, context)
if node.kids[0] != "object" or not isinstance(node.kids[0].attr, allowed_clss):
return node
obj = node.kids[0].attr
if node.kids[1] == "object":
return Token(type="object",attr=obj[node.kids[1].attr])
return node
def x_partial_slicing(self, node, context=None):
node.kids[0] = self.postorder(node.kids[0], context)
node = self.objectify(node,0,context)
if node.kids[0] == "object":
context = node.kids[0].attr
return self.postorder(node.kids[1],context)
def n_float(self, node, context=None):
return Token(type="object", attr=float(node.attr))
def n_integer(self, node, context=None):
if node.attr[-1] == 'l' or node.attr[-1] == 'L':
return Token(type="object", attr=long(node.attr))
else:
return Token(type="object", attr=int(node.attr))
def n_string(self, node, context=None):
return Token(type="object", attr=node.attr)
def n_short_slice(self, node, context):
if all([kid == "object" for kid in node.kids]):
return Token(type="object", attr=slice(*[kid.attr for kid in node.kids]))
return node
def n_slice_list(self, node, context=None):
if(len(node.kids) == 1):
return node.kids[0]
if not all([kid == "object" for kid in node.kids]):
return node
return Token(type="object", attr= tuple([kid.attr for kid in node.kids]))
def n_target_list(self, node, context=None):
if(len(node.kids) == 1):
return node.kids[0]
return node
def n_argument_list(self, node, context=None):
nkids = []
params = []
keywords = {}
for kid in node.kids:
if kid == 'positional_arguments':
if(all([skid == "object" for skid in kid.kids])):
params.extend([skid.attr for skid in kid.kids])
else:
return node
elif kid == 'keyword_arguments':
if(not all([skid == "keyword_item" for skid in kid.kids])):
return node
if(not all([skid.kids[1] == "object" for skid in kid.kids])):
return node
for skid in kid.kids:
keywords[skid.kids[0].attr] = skid.kids[1].attr
node.keywords = keywords
node.params = params
return node
def n_assignment_stmt(self, node, context=None):
#no assignments handled yet
return node.kids[1]
def n_call(self, node, context=None):
if not (node.kids[0] == "object" and node.kids[1] == "argument_list" and hasattr(node.kids[1],'params')):
return node
method = node.kids[0].attr
if(inspect.ismethod(method)):
if not isinstance(method.im_self, allowed_clss):
return node
if isinstance(method.im_self, representor.Representor) and method.im_func.func_name in ['__str__','__nonzero__','Copy','__reduce__']:
return node
res = method(*node.kids[1].params, **node.kids[1].keywords)
return Token(type="object", attr= res)
def binoperator(self, node, context=None):
if(len(node.kids) == 1):
return node.kids[0]
node = self.objectify(node,0, context)
if node.kids[0] =="object" and node.kids[2] == "object":
obj1 = node.kids[0].attr
obj2 = node.kids[2].attr
if(isinstance(obj1, allowed_clss) and isinstance(obj2, allowed_clss)):
res = eval('obj1 ' + node.kids[1].type + ' obj2')
return Token(type="object", attr= res)
return node
n_comparison = binoperator
n_or_expr = binoperator
n_and_expr = binoperator
n_xor_expr = binoperator
n_shift_expr = binoperator
n_a_expr = binoperator
n_m_expr = binoperator
n_power = binoperator
def n_removenode(self, node, context=None):
if len(node.kids) == 1:
return node.kids[0]
return node
n_expression_list = n_removenode
n_expression = n_removenode
n_conditional_expression = n_removenode
n_and_test = n_removenode
n_or_test = n_removenode
n_not_test = n_removenode
n_u_expr = n_removenode
n_literal = n_removenode
n_enclosure = n_removenode
n_atom = n_removenode
n_primary = n_removenode
n_proper_slice = n_removenode
n_slice_item = n_removenode
n_simple_stmt = n_removenode
n_expression_stmt = n_removenode
#}}}
class CommandLineASTInterpreterPass(GenericASTRewriter):#{{{
def process(self, tree):
ntree = self.toponly(tree)
return ntree
def toponly(self, node=None):
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
node = func(node)
else:
node = self.default(node)
return node
def n_attributeref(self, node):
if node.kids[0] == "object" and node.kids[1] == "identifier":
return (node.kids[0].attr, node.kids[1].attr)
return node
#}}}
def get_from_namespace(attr, ipshell):
if attr in ipshell.IP.user_ns:
return ipshell.IP.user_ns[attr]
else:
return ipshell.IP.user_global_ns[attr]
def parseCommandLine(line, ipshell, cursor_pos=None, debug=False):
if cursor_pos is None:
parse_line = line
else:
parse_line = line[:cursor_pos]
scanner = CommandLineScanner()
tokens = scanner.tokenize(parse_line)
if(debug):
print 1, tokens
tokens = process_tokens(tokens)
if(debug):
print 2, tokens
parser = CommandLineParser()
tree = parser.parse(tokens)
if(debug):
print 3, tree
rewriter = CommandLineASTRewriterPass1()
tree = rewriter.process(tree)
if(debug):
print 4, tree
rewriter = CommandLineASTRewriterPass2()
tree = rewriter.process(tree,ipshell)
if(debug):
print 5, tree
interpreter = CommandLineASTInterpreterPass()
res = interpreter.process(tree)
if(debug):
print 6, res
return res | PypiClean |
/Agent_Smith-0.1.1-py3-none-any.whl/AgentSmith/Agents.py | import os
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
from collections import namedtuple, deque
import matplotlib.pyplot as plt
from typing import Union, List, Literal, Tuple
Transition = namedtuple('Transition',
('state', 'action', 'reward', 'next_state', 'done'))
class Transitions:
def __init__(self, maxlen: int) -> None:
self.transitions = deque([], maxlen=maxlen)
def append(self, transition) -> None:
self.transitions.append(transition)
def __len__(self) -> int:
return len(self.transitions)
def __getitem__(self, index) -> Transition:
return self.transitions[index]
def sample(self, count: int) -> List[Transition]:
return random.sample(self.transitions, count)
def sampleTensors(self, count: int, device: Literal['cpu', 'gpu']) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
transitions = random.sample(self.transitions, count)
states = torch.cat(
[i.state for i in transitions])
actions = torch.tensor(
[i.action for i in transitions], device=device)
rewards = torch.tensor(
[i.reward for i in transitions], device=device)
next_states = torch.cat(
[i.next_state for i in transitions])
dones = torch.tensor(np.array(
[i.done for i in transitions], dtype=int), device=device)
states = states if len(states.shape) > 1 else states.view(count, -1)
actions = actions if len(
actions.shape) > 1 else actions.view(count, -1)
rewards = rewards if len(
rewards.shape) > 1 else rewards.view(count, -1)
next_states = next_states if len(
next_states.shape) > 1 else next_states.view(count, -1)
dones = dones if len(dones.shape) > 1 else dones.view(count, -1)
return states, actions, rewards, next_states, dones
class DeepQAgent:
def __init__(self,
env,
policy_network: nn.Module,
target_network: nn.Module,
optimizer: optim.Optimizer,
loss_function,
device: Literal['cpu', 'gpu'] = 'cpu',
state_processor=None,
greedy_function=lambda x: 0.2,
replay_size: int = 50000,
batch_size: int = 64,
gamma: float = 0.95,
target_update_rate: int = 1000) -> None:
self.env = env
self.policy_network = policy_network
self.target_network = target_network
self.optimizer = optimizer
self.loss_function = loss_function
self.device = device
self.greedy_function = greedy_function
self.batch_size = batch_size
self.gamma = gamma
self.target_update_rate = target_update_rate
self.action_size = env.action_space.n
self.replay_buffer = Transitions(replay_size)
if state_processor is not None:
self.state_processor = state_processor
else:
self.state_processor = lambda x: torch.tensor(
x, dtype=torch.float32, device=device)
self.update_target_network()
self.target_network.eval()
def update_target_network(self) -> None:
self.target_network.load_state_dict(self.policy_network.state_dict())
def select_action(self, state: torch.Tensor, epsilon: float) -> int:
if random.uniform(0, 1) > epsilon:
with torch.no_grad():
self.policy_network.eval()
action = torch.argmax(self.policy_network(state)).item()
return action
else:
return np.random.randint(self.action_size)
def optimize(self) -> None:
self.policy_network.train()
batch_states, batch_actions, batch_rewards, batch_next_states, batch_dones = self.replay_buffer.sampleTensors(
self.batch_size, self.device)
q = torch.take_along_dim(self.policy_network(
batch_states), batch_actions.view(-1, 1), dim=1)
expected_q = batch_rewards + self.gamma * \
self.target_network(batch_next_states).amax(
dim=1).unsqueeze(1) * (1 - batch_dones.float())
loss = self.loss_function(q, expected_q)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def plot_returns(self, name: str, returns: List[int], average_window: int, show: bool = True) -> None:
plt.clf()
plt.plot(returns, label='Episode Returns')
if len(returns) >= average_window:
y = np.convolve(returns, np.ones(average_window),
'valid') / average_window
x = np.arange(y.shape[0]) + average_window
plt.plot(x, y, label='%u Episode Avg. Returns' % average_window)
plt.xlabel('Episode')
plt.ylabel('Return')
plt.legend(loc='upper left')
plt.savefig('%s_returns.png' % name)
if show:
plt.ion()
plt.figure(1)
plt.show()
plt.pause(0.001)
def train(self, name: str = 'agent', average_window: int = 20, max_episodes: int = 100, return_goal: float = 10e9, plot_returns: bool = False, render_rate: int = 0, save_rate: int = 10) -> None:
if not os.path.exists('models'):
os.makedirs('models')
episode_returns = []
episode_average_window = deque(maxlen=average_window)
step_count = 0
for episode in range(max_episodes):
done = False
episode_return = 0
state = self.state_processor(self.env.reset())
while done is not True:
step_count += 1
if render_rate > 0 and (episode % render_rate) == 0:
self.env.render()
action = self.select_action(
state, self.greedy_function(episode))
next_state, reward, done, _ = self.env.step(action)
next_state = self.state_processor(next_state)
self.replay_buffer.append(Transition(
state, action, reward, next_state, done))
if len(self.replay_buffer) >= self.batch_size:
self.optimize()
if (step_count % self.target_update_rate) == 0:
self.update_target_network()
state = next_state
episode_return += reward
episode_returns.append(episode_return)
episode_average_window.append(episode_return)
print('\rEpisode: %8u, Return (%8u episode averaged): %8u' %
(episode + 1, average_window, np.mean(episode_average_window)), end='')
self.plot_returns(name, episode_returns,
average_window, plot_returns)
if save_rate != 0 and (episode % save_rate) == 0:
torch.save(self.target_network.state_dict(), 'models/%s_%08u.pt' %
(name, episode + 1))
if len(episode_returns) > average_window and np.mean(episode_returns[:-average_window]) >= return_goal:
break
print('\n')
class ActorCriticAgent:
def __init__(self,
env,
actor_network: nn.Module,
critic_network: nn.Module,
actor_optimizer: optim.Optimizer,
critic_optimizer: optim.Optimizer,
loss_function,
device: Literal['cpu', 'gpu'] = 'cpu',
state_processor=None,
gamma: float = 0.95,
entropy_function=lambda x: 0.01,
) -> None:
self.env = env
self.actor_network = actor_network
self.critic_network = critic_network
self.actor_optimizer = actor_optimizer
self.critic_optimizer = critic_optimizer
self.loss_function = loss_function
self.device = device
self.gamma = gamma
self.entropy_function = entropy_function
self.transitions = Transitions(1)
if state_processor is not None:
self.state_processor = state_processor
else:
self.state_processor = lambda x: torch.tensor(
x, dtype=torch.float32, device=device)
def select_action(self, state: torch.Tensor) -> Tuple[float, float]:
self.actor_network.eval()
return self.actor_network(state)
def optimize(self, entropy_weight: float) -> Tuple[float, float]:
self.actor_network.train()
self.critic_network.train()
transition = self.transitions[0]
state, action, reward, next_state, done = transition.state, transition.action, transition.reward, transition.next_state, transition.done
log_probability = action[1].log_prob(action[0]).sum(dim=-1)
predicted_value = self.critic_network(state)
target_value = reward + self.gamma * \
self.critic_network(next_state) * (1 - done)
critic_loss = self.loss_function(
predicted_value, target_value.detach())
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
advantage = (target_value - predicted_value).detach()
actor_loss = - advantage * log_probability
actor_loss += -entropy_weight * log_probability
self.actor_network.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
return actor_loss.item(), critic_loss.item()
def plot_returns(self, name: str, returns: List[int], average_window: int, losses: List, show: bool = True) -> None:
losses = np.array(losses)
plt.clf()
plt.subplot(3, 1, 1)
plt.plot(returns, label='Episode Returns')
plt.subplot(3, 1, 2)
plt.plot(losses[:, 0], label='Actor Losses')
plt.subplot(3, 1, 3)
plt.plot(losses[:, 1], label='Critic Losses')
if len(returns) >= average_window:
y = np.convolve(returns, np.ones(average_window),
'valid') / average_window
x = np.arange(y.shape[0]) + average_window
plt.subplot(3, 1, 1)
plt.plot(x, y, label='%u Episode Avg. Returns' % average_window)
plt.xlabel('Episode')
plt.ylabel('Return')
plt.legend(loc='upper left')
plt.savefig('%s_returns.png' % name)
if show:
plt.ion()
plt.figure(1)
plt.show()
plt.pause(0.001)
def train(self, name: str = 'agent', average_window: int = 20, max_episodes: int = 100, return_goal: float = 10e9, plot_returns: bool = False, render_rate: int = 0, save_rate: int = 10) -> None:
episode_returns = []
step_losses = []
episode_average_window = deque(maxlen=average_window)
step_count = 0
episode = -1
while True:
episode += 1
done = False
episode_return = 0
state = self.state_processor(self.env.reset())
while done is not True:
step_count += 1
if render_rate > 0 and (episode % render_rate) == 0:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.env.step(
action[0].cpu().detach().numpy())
next_state = self.state_processor(next_state)
self.transitions.append(Transition(
state, action, reward, next_state, done))
losses = self.optimize(self.entropy_function(episode))
state = next_state
episode_return += reward
step_losses.append(losses)
episode_returns.append(episode_return)
episode_average_window.append(episode_return)
print('\rEpisode: %8u, Return (%8u episode averaged): %8u' %
(episode + 1, average_window, np.mean(episode_average_window)), end='')
self.plot_returns(name, episode_returns,
average_window, step_losses, plot_returns)
if save_rate != 0 and (episode % save_rate) == 0:
torch.save(self.target_network.state_dict(), 'models/%s_%08u.pt' %
(name, episode + 1))
if (episode + 1) >= max_episodes or (len(episode_returns) > average_window and np.mean(episode_returns[:-average_window]) >= return_goal):
break
print('\n') | PypiClean |
/Chips-0.1.2.tar.gz/Chips-0.1.2/chips/streams.py | from math import log
from inspect import currentframe, getsourcefile
from sys import stdout
from collections import deque
from process import Process
from common import how_many_bits, Unique, resize, c_style_modulo,\
c_style_division
from instruction import Write, Read, Available
from chips_exceptions import ChipsSyntaxError, ChipsSimulationError
__author__ = "Jon Dawson"
__copyright__ = "Copyright 2010, Jonathan P Dawson"
__license__ = "MIT"
__version__ = "0.1.2"
__maintainer__ = "Jon Dawson"
__email__ = "[email protected]"
__status__ = "Prototype"
class Chip:
"""
A Chip is device containing streams, sinks and processes.
Typically a Chip is used to describe a single device. You need to provide
the Chip object with a list of all the sinks (device outputs). You don't
need to include any process, variables or streams. By analysing the sinks,
the chip can work out which processes and streams need to be included in
the device.
Example::
>>> from chips import *
>>> from chips.VHDL_plugin import Plugin
>>> switches = InPort("SWITCHES", 8)
>>> serial_in = SerialIn("RX")
>>> leds = OutPort(switches, "LEDS")
>>> serial_out = SerialOut(serial_in, "TX")
>>> #We need to tell the Chip that leds and serial_out are part of
>>> #the device. The Chip can work out for itself that switches and
>>> #serial_in are part of the device.
>>> s = Chip(
... leds,
... serial_out,
... )
>>> plugin = Plugin()
>>> s.write_code(plugin)
"""
def __init__(self, *args):
"""Create a streams Chip
Arguments:
sinks - A sequence object listing all data.receivers"""
self.sinks = list(args)
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
#begin chip enumeration process
self.streams = []
self.processes = []
self.executables = []
for i in self.sinks:
if (
hasattr(i, "get") or
hasattr(i, "is_process") or
(not hasattr(i, "set_chip"))):
raise ChipsSyntaxError(
(
"Only sinks can be added to chips. " +
repr(i) +
" is not a sink."
),
self.filename,
self.lineno
)
i.set_chip(self)
def write_code(self, plugin):
"""Write source code for the streams chips using the specified plugin
Arguments:
plugin - A code generation plugin such as streams_vhdl.plugin()"""
for i in self.streams:
i.write_code(plugin)
for i in self.processes:
i.write_code(plugin)
plugin.write_chip(self)
def reset(self):
"""Reset the chip to its initial state.
A chip must be reset before it can be executed."""
for i in self.processes:
i.reset()
for i in self.streams:
i.reset()
def execute(self, steps=1):
"""Execute a native simulation
Arguments:
steps - specify the number of execution steps to run"""
for i in range(steps):
for i in self.processes:
i.execute()
for i in self.executables:
i.execute()
for i in self.sinks:
i.execute()
def test(self, name, stop_cycles=False):
"""Perform a test
Resets, and executes the chip for specified number of cycles.
Arguments:
name - a test name that will be reported
stop_cycles - the number of cycles to execute for
Returns:
True - if no assertions occur during execution
False - if assertions occur"""
self.reset()
try:
self.execute(stop_cycles)
except AssertionError:
print name,
print "...Fail"
return False
print name,
print "...Pass"
return True
def __repr__(self):
return "Chip(sinks={0})".format(self.sinks)
class Stream:
def __invert__(self):
return Unary(self, 'invert')
def __abs__(self):
return Unary(self, 'abs')
def Not(self):
"""
Return the logical inverse of the stream.
The resulting stream will yield 0 for data items in the stream with
non-zero values, and -1 for data items in the stream with zero values.
"""
return Unary(self, 'not')
def shift_left(self, n):
"""
Return a stream which has been shifted left by a constant value.
Unlike the << operator, the stream returned by this function will be
shifted by a constant value. Where shifting by a constant value is all
that is needed, this function should be implemented in significantly
less logic since a barrel shifter is not required.
*shift_left* takes a single argument *n* specifying thew number of bits
to shift by.
"""
return Unary(self, 'sln', n)
def shift_right(self, n):
"""
Return a stream which has been shifted right by a constant value.
Unlike the >> operator, the stream returned by this function will be
shifted by a constant value. Where shifting by a constant value is all
that is needed, this function should be implemented in significantly
less logic since a barrel shifter is not required.
*shift_right* takes a single argument *n* specifying thew number of bits
to shift by.
"""
return Unary(self, 'srn', n)
def __add__(self, other):
return Binary(self, _repeaterize(other), 'add')
def __sub__(self, other):
return Binary(self, _repeaterize(other), 'sub')
def __mul__(self, other):
return Binary(self, _repeaterize(other), 'mul')
def __mod__(self, other):
return Binary(self, _repeaterize(other), 'mod')
def __floordiv__(self, other):
return Binary(self, _repeaterize(other), 'div')
def __and__(self, other):
return Binary(self, _repeaterize(other), 'and')
def __or__(self, other):
return Binary(self, _repeaterize(other), 'or')
def __xor__(self, other):
return Binary(self, _repeaterize(other), 'xor')
def __rshift__(self, other):
return Binary(self, _repeaterize(other), 'sr')
def __lshift__(self, other):
return Binary(self, _repeaterize(other), 'sl')
def __eq__(self, other):
return Binary(self, _repeaterize(other), 'eq')
def __ne__(self, other):
return Binary(self, _repeaterize(other), 'ne')
def __gt__(self, other):
return Binary(self, _repeaterize(other), 'gt')
def __ge__(self, other):
return Binary(self, _repeaterize(other), 'ge')
def __lt__(self, other):
return Binary(self, _repeaterize(other), 'lt')
def __le__(self, other):
return Binary(self, _repeaterize(other), 'le')
def __radd__(self, other):
return Binary(_repeaterize(other), self, 'add')
def __rsub__(self, other):
return Binary(_repeaterize(other), self, 'sub')
def __rmul__(self, other):
return Binary(_repeaterize(other), self, 'mul')
def __rmod__(self, other):
return Binary(_repeaterize(other), self, 'mod')
def __rfloordiv__(self, other):
return Binary(_repeaterize(other), self, 'div')
def __rand__(self, other):
return Binary(_repeaterize(other), self, 'and')
def __ror__(self, other):
return Binary(_repeaterize(other), self, 'or')
def __rxor__(self, other):
return Binary(_repeaterize(other), self, 'xor')
def __rrshift__(self, other):
return Binary(_repeaterize(other), self, 'sr')
def __rlshift__(self, other):
return Binary(_repeaterize(other), self, 'sl')
def __req__(self, other):
return Binary(_repeaterize(other), self, 'eq')
def __rne__(self, other):
return Binary(_repeaterize(other), self, 'ne')
def __rgt__(self, other):
return Binary(_repeaterize(other), self, 'gt')
def __rge__(self, other):
return Binary(_repeaterize(other), self, 'ge')
def __rlt__(self, other):
return Binary(_repeaterize(other), self, 'lt')
def __rle__(self, other):
return Binary(_repeaterize(other), self, 'le')
def get_type(self):
return "integer"
def read(self, variable):
return Read(self, variable)
def available(self):
return Available(self)
def set_chip(self, chip):
if hasattr(self, "chip"):
raise ChipsSyntaxError(
"Stream is already part of a chip",
self.filename, self.lineno)
self.chip = chip
chip.streams.append(self)
if hasattr(self, "a"): self.a.set_chip(chip)
if hasattr(self, "b"): self.b.set_chip(chip)
if hasattr(self, "c"): self.c.set_chip(chip)
#streams sources
################################################################################
class Repeater(Stream, Unique):
"""
A stream which repeatedly yields the specified *value*.
The *Repeater* stream is one of the most fundamental streams available.
The width of the stream in bits is calculated automatically. The smallest
number of bits that can represent *value* in twos-complement format will be
used.
Examples::
>>> from chips import *
>>> c=Chip(
... Console(
... Printer(
... Repeater(5) #creates a 4 bit stream
... )
... )
... )
>>> c.reset()
>>> c.execute(100)
5
5
5
...
>>> c=Chip(
... Console(
... Printer(
... Repeater(10) #creates a 5 bit stream
... )
... )
... )
>>> c.reset()
>>> c.execute(100)
10
10
10
...
>>> c=Chip(
... Console(
... Printer(
... #This is shorthand for: Repeater(5)*Repeater(2)
... Repeater(5)*2
... )
... )
... )
>>> c.reset()
>>> c.execute(100)
10
10
10
...
"""
def __init__(self, value):
"""A Stream which repeatedly outputs a constant value.
Arguments:
value - a constant value to be output"""
self.value = value
self.bits = how_many_bits(value)
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def get_bits(self):
return self.bits
def write_code(self, plugin):
plugin.write_repeater(self)
def __repr__(self):
return "Repeater(value={0})".format(self.value)
def reset(self):
pass
def get(self):
return self.value
class Counter(Stream, Unique):
"""
A Stream which yields numbers from *start* to *stop* in *step* increments.
A *Counter* is a versatile, and commonly used construct in device design,
they can be used to number samples, index memories and so on.
Example::
>>> from chips import *
>>> c=Chip(
... Console(
... Printer(
... Counter(0, 10, 2) #creates a 4 bit stream
... )
... )
... )
>>> c.reset()
>>> c.execute(100)
0
2
4
6
8
10
0
...
>>> c=Chip(
... Console(
... Printer(
... Counter(10, 0, -2) #creates a 4 bit stream
... )
... )
... )
>>> c.reset()
>>> c.execute(100)
10
8
6
4
2
0
10
...
"""
def __init__(self, start, stop, step):
"""A Stream which repeatedly outputs a constant value.
Arguments:
start - initial count value that will be output
stop - the last count value that will be output before wrapping
round to zero
step - the count step size"""
self.start = int(start)
self.stop = int(stop)
self.step = int(step)
self.bits = max((how_many_bits(start), how_many_bits(stop)))
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def get_bits(self):
return self.bits
def write_code(self, plugin):
plugin.write_counter(self)
def reset(self):
self.count = self.start
def get(self):
val = self.count
if self.count == self.stop:
self.count = self.start
else:
self.count += self.step
return val
def __repr__(self):
return "Counter(start={0}, stop={1}, step={2})".format(
self.start,
self.stop,
self.bits)
class Stimulus(Stream, Unique):
"""
A Stream that allows a Python iterable to be used as a stream.
A Stimulus stream allows a transparent method to pass data from the Python
environment into the simulation environment. The sequence object is set at
run time using the set_simulation_data() method. The sequence object can be
any iterable Python sequence such as a list, tuple, or even a generator.
Example::
>>> from chips import *
>>> stimulus = Stimulus(8)
>>> c = Chip(Console(Printer(stimulus)))
>>> def count():
... i=0
... while True:
... yield i
... i+=1
...
>>> stimulus.set_simulation_data(count())
>>> c.reset()
>>> c.execute(100)
0
1
2
...
"""
def __init__(self, bits):
"""A Stream that allows a sequence object to be used as simulation stimulus
A source sequence should be set prior to simulation using the
Stimulus.set_simulation_data() method.
arguments:
bits - The resolution in bits of the stream"""
self.bits = bits
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def get_bits(self): return self.bits
def write_code(self, plugin):
plugin.write_stimulus(self)
def reset(self):
pass
def get(self):
return resize(next(self.queue), self.bits)
def set_simulation_data(self, iterator, plugin=None):
if plugin is None:
self.queue = iter(iterator)
else:
plugin.set_simulation_data(self, iterator)
def __repr__(self):
return "Stimulus({0})".format(self.name, self.bits)
class InPort(Stream, Unique):
"""
A device input port stream.
An *InPort* allows a port pins of the target device to be used as a data
stream. There is no handshaking on the input port. The port pins are
sampled at the point when data is transfered by the stream. When
implemented in VHDL, the *InPort* provides double registers on the port
pins to synchronise data to the local clock domain.
Since it is not possible to determine the width of the stream in bits
automatically, this must be specified using the *bits* argument.
The *name* parameter allows a string to be associated with the input port.
In a VHDL implementation, *name* will be used as the port name in the
top level entity.
Example::
>>> from chips import *
>>> dip_switches = InPort("dip_switches", 8)
>>> s = Chip(SerialOut(Printer(dip_switches)))
"""
def __init__(self, name, bits):
"""A Stream of data obtained from input port pins
A source sequence should be set prior to simulation using the
Stimulus.set_simulation_data() method.
arguments:
name - A for the port.
The name will be prepended with OUT_in the component
entity.
bits - The resolution in bits of the stream"""
self.name, self.bits = str(name), int(bits)
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def get_bits(self): return self.bits
def write_code(self, plugin):
plugin.write_in_port(self)
def reset(self):
raise ChipsSimulationError("Inport ignored in native simulation")
def get(self):
raise ChipsSimulationError("Inport ignored in native simulation")
def __repr__(self):
return "InPort(name={0}, bits={1})".format(self.name, self.bits)
class SerialIn(Stream, Unique):
"""
A *SerialIn* yields data from a serial UART port.
*SerialIn* yields one data item from the serial input port for each
character read from the source stream. The stream is always 8 bits wide.
A *SerialIn* accepts an optional *name* argument which is used as the name
for the serial RX line in generated VHDL. The clock rate of the target
device in MHz can be specified using the *clock_rate* argument. The baud
rate of the serial input can be specified using the *baud_rate* argument.
Example::
>>> from chips import *
>>> #echo typed characters
>>> c = Chip(SerialOut(SerialIn()))
"""
def __init__(self, name="RX", clock_rate=50000000, baud_rate=115200):
"""A Stream of data obtained from input port pins
A source sequence should be set prior to simulation using the
Stimulus.set_simulation_data() method.
arguments:
name - A for the port.
The name will be prepended with OUT_in the component
entity.
bits - The resolution in bits of the stream"""
self.name = str(name)
self.clock_rate = int(clock_rate)
self.baud_rate = int(baud_rate)
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def get_bits(self): return 8
def write_code(self, plugin):
plugin.write_serial_in(self)
def reset(self):
raise ChipsSimulationError("SerialIn ignored in native simulation")
def get(self):
raise ChipsSimulationError("SerialIn ignored in native simulation")
def __repr__(self):
return '\n'.join([
" serial_in( name = ",
self.name,
"clock_rate = ",
self.clock_rate,
"baud_rate",
self.baud_rate,
")"
])
class Output(Stream, Unique):
"""
An *Output* is a stream that can be written to by a process.
Any stream can be read from by a process. Only an *Output* stream can be
written to by a process. A process can be written to by using the *read*
method. The read method accepts one argument, an expression to write.
Example::
>>> from chips import *
>>> def tee(input_stream):
... output_stream_1 = Output()
... output_stream_2 = Output()
... temp = Variable(0)
... Process(input_stream.get_bits(),
... Loop(
... input_stream.read(temp),
... output_stream_1.write(temp),
... output_stream_2.write(temp),
... )
... )
... return output_stream_1, output_stream_2
>>> os_1, os_2 = tee(Counter(1, 3, 1))
>>> c = Chip(
... Console(
... Printer(os_1),
... ),
... Console(
... Printer(os_2),
... ),
... )
>>> c.reset()
>>> c.execute(100)
1
1
2
2
3
3
...
"""
def __init__(self):
"""create a process output"""
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def set_chip(self, chip):
if hasattr(self.process, "chip"):
if self.process.chip is not chip:
raise ChipsSyntaxError(
"Process is already part of another Chip",
process.filename, process.lineno)
self.process.set_chip(chip)
Stream.set_chip(self, chip)
def set_process(self, process):
if hasattr(self, "process"):
if self.process is not process:
raise ChipsSyntaxError(
"Output is already part of a Process",
self.filename, self.lineno)
self.process = process
def write(self, variable):
"""write an expression to the process output"""
return Write(self, variable)
def get_bits(self):
return self.process.get_bits()
def write_code(self, plugin):
plugin.write_output(self)
def reset(self):
self.fifo=deque()
def put(self, data):
self.fifo.append(data)
def get(self):
try:
return self.fifo.popleft()
except IndexError:
return None
def __repr__(self):
return "Output() at {0}".format(id(self))
class ExternalIPDefinition:
def __init__(self,
name,
dependencies,
input_streams,
output_streams,
input_ports,
output_ports):
self.name = name
self.dependencies = dependencies
self.input_streams = input_streams
self.output_streams = output_streams
self.input_ports = input_ports
self.output_ports = output_ports
class ExternalIPInstance(Unique):
def __init__(self, input_streams, definition, inport_mapping,
outport_mapping):
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
self.input_streams = input_streams
self.definition = definition
self.output_streams = []
for name, bits in self.definition.output_streams.iteritems():
self.output_streams.append(ExternalIPStream(self, bits))
self.inport_mapping = inport_mapping
self.outport_mapping = outport_mapping
for i in self.input_streams:
if hasattr(i, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno)
else:
i.receiver = self
no_streams_expected = len(self.definition.input_streams)
no_streams_actual = len(self.input_streams)
if no_streams_expected != no_streams_actual:
raise ChipsSyntaxError(
"External IP expects: {0} input streams, actual: {1}".format(
no_streams_expected,
no_streams_actual
),
self.filename,
self.lineno
)
expected_sizes = self.definition.input_streams.values()
for stream, expected_size in zip(self.input_streams, expected_sizes):
if expected_size != stream.get_bits():
raise ChipsSyntaxError(
"incorrect bit width, expected: {0} actual: {1}".format(
expected_size,
stream.get_bits()
),
self.filename,
self.lineno
)
Unique.__init__(self)
def set_system(self, system):
#this should only get called if the IP is added to a system
#ie. it is acting as a sink.
if self.output_streams:
raise ChipsSyntaxError(
"only data sinks can be added to systems",
self.filename,
self.lineno
)
for i in self.input_streams:
i.set_system(system)
system.streams.append(self)
def set_chip(self, chip):
chip.executables.append(self)
Stream.set_chip(self, chip)
def get_output_streams(self):
return self.output_streams
def write_code(self, plugin):
plugin.write_external_ip(self)
def write_input_code(self, output_stream, plugin):
if output_stream is self.output_streams[0]:
plugin.write_external_ip(self)
class ExternalIPStream(Stream, Unique):
def __init__(self, instance, bits):
"""Do not call this manually, ExternalIPStream is
automatically created by ExternalIPInstance"""
self.instance = instance
self.bits = bits
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
def set_chip(self, chip):
if hasattr(self, "chip"):
raise ChipsSyntaxError(
"stream is already part of a chip",
self.filename,
self.lineno
)
else:
self.chip = chip
chip.streams.append(self)
if self is self.instance.output_streams[0]:
for i in self.instance.input_streams:
i.set_chip(chip)
def get_bits(self):
return self.bits
def write_code(self, plugin):
self.instance.write_input_code(self, plugin)
def reset(self):
raise ChipsSimulationError(
"external ip cannot be natively simulated",
self.filename,
self.lineno
)
def get(self):
raise ChipsSimulationError(
"external ip cannot be natively simulated",
self.filename,
self.linenoi
)
#streams combinators
################################################################################
def _repeaterize(potential_repeater):
if hasattr(potential_repeater, "write_code"):
return potential_repeater
else:
return Repeater(int(potential_repeater))
_functions = {
'add' : lambda a, b: a+b,
'sub' : lambda a, b: a-b,
'mul' : lambda a, b: a*b,
'div' : lambda a, b: c_style_division(a, b),
'mod' : lambda a, b: c_style_modulo(a, b),
'and' : lambda a, b: a&b,
'or' : lambda a, b: a|b,
'xor' : lambda a, b: a^b,
'sl' : lambda a, b: a<<b,
'sr' : lambda a, b: a>>b,
'eq' : lambda a, b: -int(a==b),
'ne' : lambda a, b: -int(a!=b),
'lt' : lambda a, b: -int(a<b),
'le' : lambda a, b: -int(a<=b),
'gt' : lambda a, b: -int(a>b),
'ge' : lambda a, b: -int(a>=b),
}
class Binary(Stream, Unique):
def __init__(self, a, b, function):
self.a, self.b, self.function = a, b, function
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
self.function = function
self.binary_function = _functions[function]
self.stored_a = None
self.stored_b = None
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
if hasattr(self.b, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.b, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.b.receiver = self
def get_bits(self):
bit_function = {
'add' : lambda x, y : max((x, y)) + 1,
'sub' : lambda x, y : max((x, y)) + 1,
'mul' : lambda x, y : x + y,
'div' : lambda x, y : max((x, y)) + 1,
'mod' : lambda x, y : max((x, y)),
'and' : lambda x, y : max((x, y)),
'or' : lambda x, y : max((x, y)),
'xor' : lambda x, y : max((x, y)),
'sl' : lambda x, y : x,
'sr' : lambda x, y : x,
'eq' : lambda x, y : 1,
'ne' : lambda x, y : 1,
'lt' : lambda x, y : 1,
'le' : lambda x, y : 1,
'gt' : lambda x, y : 1,
'ge' : lambda x, y : 1,
}
return bit_function[self.function](
self.a.get_bits(),
self.b.get_bits()
)
def write_code(self, plugin):
plugin.write_binary(self)
def reset(self):
pass
def get(self):
if self.stored_a is None:
self.stored_a = self.a.get()
if self.stored_b is None:
self.stored_b = self.b.get()
if self.stored_a is None:
return None
if self.stored_b is None:
return None
val = self.binary_function(self.stored_a, self.stored_b)
self.stored_a = None
self.stored_b = None
return resize(val, self.get_bits())
_unary_functions = {
'not' : lambda a, b: not a,
'invert' : lambda a, b: ~a,
'sln' : lambda a, b: a << b,
'srn' : lambda a, b: a >> b,
'abs' : lambda a, b: abs(a),
}
class Unary(Stream, Unique):
def __init__(self, a, function, constant=0):
self.a, self.function, self.constant = a, function, constant
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
self.function = function
self.unary_function = _unary_functions[function]
self.stored_a = None
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
bit_function = {
'not' : lambda x : 1,
'invert' : lambda x : x,
'sln' : lambda x : x,
'srn' : lambda x : x,
'abs' : lambda x : x,
}
return bit_function[self.function](self.a.get_bits())
def write_code(self, plugin):
plugin.write_unary(self)
def reset(self):
pass
def get(self):
if self.stored_a is None:
self.stored_a = self.a.get()
if self.stored_a is None:
return None
val = self.unary_function(self.stored_a, self.constant)
self.stored_a = None
return resize(val, self.get_bits())
class Lookup(Stream, Unique):
"""
A *Lookup* is a stream yields values from a read-only look up table.
For each data item in the source stream, a *Lookup* will yield the
addressed value in the lookup table. A *Lookup* is basically a Read Only
Memory(ROM) with the source stream forming the address, and the *Lookup*
itself forming the data output.
Example::
>>> from chips import *
>>> def binary_2_gray(input_stream):
... return Lookup(input_stream, 0, 1, 3, 2, 6, 7, 5, 4)
>>> c = Chip(
... Console(
... Printer(binary_2_gray(Counter(0, 7, 1)))
... )
... )
>>> c.reset()
>>> c.execute(100)
0
1
3
2
6
7
5
4
0
...
The first argument to a *Lookup* is the source stream, all additional
arguments form the lookup table. If you want to use a Python sequence
object such as a tuple or a list to form the lookup table use the following
syntax::
>>> my_list = [0, 1, 3, 2, 6, 7, 5, 4]
... my_sequence = Lookup(Counter(0, 7, 1), *my_list)
"""
def __init__(self, source, *args):
self.a = source
self.args = [int(i) for i in args]
self.bits = max((how_many_bits(i) for i in args))
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
return self.bits
def write_code(self, plugin):
plugin.write_lookup(self)
def reset(self):
self.a.reset()
def get(self):
val = self.a.get()
if val is None: return None
if resize(val, self.a.get_bits()) > len(self.args)-1:
print self.filename,
print self.lineno
print val
raise ChipsSimulationError(
"lookup index too large",
self.filename,
self.lineno
)
if resize(val, self.a.get_bits()) < 0:
print self.filename,
print self.lineno
raise ChipsSimulationError(
"negative lookup index",
self.filename,
self.lineno
)
return self.args[resize(val, self.a.get_bits())]
class Fifo(Stream, Unique):
"""
A *Fifo* stores a buffer of data items.
A *Fifo* contains a fixed size buffer of objects obtained from the source
stream. A *Fifo* yields the data items in the same order in which they were
stored.
The first argument to a *Fifo*, is the source stream, the *depth* argument
determines the size of the Fifo buffer.
Example::
>>> from chips import *
>>> def scope(ADC_stream, trigger_level, buffer_depth):
... temp = Variable(0)
... count = Variable(0)
... buffer = Output()
...
... Process(16,
... Loop(
... ADC_stream.read(temp),
... If(temp > trigger_level,
... buffer.write(temp),
... count.set(buffer_depth - 1),
... While(count,
... ADC_stream.read(temp),
... buffer.write(temp),
... count.set(count-1),
... ),
... ),
... ),
... )
...
... return Printer(Fifo(buffer, buffer_depth))
...
>>> test_signal = Sequence(0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5, 5)
>>> c = Chip(Console(scope(test_signal, 0, 5)))
>>> c.reset()
>>> c.execute(100)
1
2
3
4
5
"""
def __init__(self, data_in, depth):
self.a = data_in
self.depth = depth
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"address_in already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def set_chip(self, chip):
Stream.set_chip(self, chip)
def get_bits(self):
return self.a.get_bits()
def write_code(self, plugin):
plugin.write_fifo(self)
def reset(self):
self.a.reset()
def get(self):
return self.a.get()
class Array(Stream, Unique):
"""
An *Array* is a stream yields values from a writeable lookup table.
Like a *Lookup*, an *Array* looks up each data item in the *address_in*
stream, and yields the value in the lookup table. In an *Array*, the lookup
table is set up dynamically using data items from the *address_in* and
*data_in* streams. An *Array* is equivalent to a Random Access Memory (RAM)
with independent read, and write ports.
A *Lookup* accepts *address_in*, *data_in* and *address_out* arguments as
source streams. The *depth* argument specifies the size of the lookup table.
Example::
>>> def video_raster_stream(width, height, row_stream, col_stream,
... intensity):
...
... pixel_clock = Counter(0, width*height, 1)
...
... pixstream = Array(
... address_in = (row_stream * width) + col_stream,
... data_in = intensity,
... address_out = pixel_clock,
... depth = width * height,
... )
...
... return pixstream
>>> pixstream = video_raster_stream(
... 64,
... 64,
... Repeater(32),
... Counter(0, 63, 1),
... Repeater(255),
... )
"""
def __init__(self, address_in, data_in, address_out, depth):
self.a = address_in
self.b = data_in
self.c = address_out
self.depth = depth
self.memory = {}
self.stored_a = None
self.stored_b = None
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"address_in already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
if hasattr(self.b, "receiver"):
raise ChipsSyntaxError(
"data_in already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.b, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.b.receiver = self
if hasattr(self.c, "receiver"):
raise ChipsSyntaxError(
"address_out already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.c, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.c.receiver = self
def set_chip(self, chip): # a RAM behaves a sink for data and address in
chip.executables.append(self)
Stream.set_chip(self, chip)
def get_bits(self):
return self.b.get_bits()
def write_code(self, plugin):
plugin.write_array(self)
def reset(self):
self.a.reset()
self.b.reset()
self.c.reset()
def execute(self):
if self.stored_a is None:
self.stored_a = self.a.get()
if self.stored_b is None:
self.stored_b = self.b.get()
if self.stored_a is None:
return None
if self.stored_b is None:
return None
self.memory[self.stored_a] = self.stored_b
self.stored_a = None
self.stored_b = None
def get(self):
address_out = self.c.get()
if address_out is None:
return None
return self.memory[address_out]
class Decoupler(Stream, Unique):
"""
A *Decoupler* removes stream handshaking.
Usually, data is transfered though streams using blocking transfers. When a
process writes to a stream, execution will be halted until the receiving
process reads the data. While this behaviour greatly simplifies the design
of parallel processes, sometimes Non-blocking transfers are needed. When a
data item is written to a *Decoupler*, it is stored. When a *Decoupler* is
read from, the value of the last stored value is yielded. Neither the
sending or the receiving process ever blocks. This also means that the
number of data items written into the *Decoupler* and the number read out
do not have to be the same.
A *Decoupler* accepts only one argument, the source stream.
Example::
>>> from chips import *
>>> def time_stamp_data(data_stream):
...
... us_time = Output()
... time = Variable(0)
... Process(8,
... Loop(
... WaitUs(),
... time.set(time + 1),
... us_time.write(time),
... ),
... )
...
... output_stream = Output()
... temp = Variable(0)
... Process(8,
... Loop(
... data_stream.read(temp),
... output_stream.write(temp),
... us_time.read(temp),
... output_stream.write(temp),
... ),
... )
...
... return output_stream
>>> time_stamped_stream = time_stamp_data(SerialIn())
"""
def __init__(self, source):
self.a = source
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
return self.a.get_bits()
def write_code(self, plugin):
plugin.write_decoupler(self)
def reset(self):
self.a.reset()
def get(self):
return self.a.get()
class Resizer(Stream, Unique):
"""
A *Resizer* changes the width, in bits, of the source stream.
The *Resizer* takes two arguments, the source stream, and the *width* in
bits. The *Resizer* will truncate data if it is reducing the width, ans
sign extend if it is increasing the width.
Example::
>>> from chips import *
>>> a = InPort(name="din", bits=8) #a has a width of 8 bits
>>> a.get_bits()
8
>>> b = a + 1 #b has a width of 9 bits
>>> b.get_bits()
9
>>> c = Resizer(b, 8) #c is truncated to 8 bits
>>> c.get_bits()
8
>>> Chip(OutPort(c, name="dout"))
Chip(...
"""
def __init__(self, source, bits):
self.a = source
self.bits = bits
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
return self.bits
def write_code(self, plugin):
plugin.write_resizer(self)
def reset(self):
pass
def get(self):
val = self.a.get()
if val is None: return None
return resize(val, self.get_bits())
class Printer(Stream, Unique):
"""
A *Printer* turns data into decimal ASCII characters.
Each each data item is turned into the ASCII representation of its decimal
value, terminated with a newline character. Each character then forms a
data item in the *Printer* stream.
A *Printer* accepts a single argument, the source stream. A *Printer*
stream is always 8 bits wide.
Example::
>>> from chips import *
>>> #print the numbers 0-10 to the console repeatedly
>>> c=Chip(
... Console(
... Printer(
... Counter(0, 10, 1),
... ),
... ),
... )
>>> c.reset()
>>> c.execute(100)
0
1
2
3
4
...
"""
def __init__(self, source):
self.a = source
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
return 8
def get_num_digits(self):
return len(str(2**(self.a.get_bits()-1)))
def write_code(self, plugin):
plugin.write_printer(self)
def reset(self):
self.string = []
def get(self):
if self.string:
return ord(self.string.popleft())
else:
val = self.a.get()
if val is None: return None
self.string = deque(str(val)+'\n')
return ord(self.string.popleft())
class HexPrinter(Stream, Unique):
"""
A *HexPrinter* turns data into hexadecimal ASCII characters.
Each each data item is turned into the ASCII representation of its
hexadecimal value, terminated with a newline character. Each character then
forms a data item in the *HexPrinter* stream.
A *HexPrinter* accepts a single argument, the source stream. A *HexPrinter*
stream is always 8 bits wide.
Example::
>>> from chips import *
>>> #print the numbers 0x0-0x10 to the console repeatedly
>>> c=Chip(
... Console(
... HexPrinter(
... Counter(0x0, 0x10, 1),
... ),
... ),
... )
>>> c.reset()
>>> c.execute(1000)
0
1
2
3
4
5
6
7
8
9
a
b
...
"""
def __init__(self, source):
self.a = source
self.filename = getsourcefile(currentframe().f_back)
self.lineno = currentframe().f_back.f_lineno
Unique.__init__(self)
if hasattr(self.a, "receiver"):
raise ChipsSyntaxError(
"stream already has receiver",
self.filename,
self.lineno
)
else:
if not hasattr(self.a, "get"):
raise ChipsSyntaxError(
(
"Source must be a stream. " +
repr(self.a) +
" is not a stream."
),
self.filename,
self.lineno
)
self.a.receiver = self
def get_bits(self):
return 8
def get_num_digits(self):
maxval = 2**(self.a.get_bits()-1)
digits = len(hex(maxval)[2:])
return digits
def write_code(self, plugin):
plugin.write_hex_printer(self)
def reset(self):
self.string = []
def get(self):
if self.string:
return ord(self.string.popleft())
else:
val = self.a.get()
if val is None: return None
self.string = deque(hex(val)[2:]+"\n")
return ord(self.string.popleft()) | PypiClean |
/Kyoukai-2.2.1.tar.gz/Kyoukai-2.2.1/README.rst | Kyōkai (境界)
-------------
|Coverage| |CircleCI|
Kyōkai is a fast asynchronous Python server-side web framework. It is
built upon `asyncio <https://docs.python.org/3/library/asyncio.html>`__
and the `Asphalt <https://github.com/asphalt-framework/asphalt>`__
framework for an extremely fast web server.
Setting up a Kyōkai app is incredibly simple. Here's a simple server
that echoes your client's headers:
.. code:: python
import json
from kyoukai import Kyoukai, HTTPRequestContext
kyk = Kyoukai("example_app")
@kyk.route("/")
async def index(ctx: HTTPRequestContext):
return json.dumps(request.headers), 200, {"Content-Type": "application/json"}
kyk.run()
For more information, see the docs at https://mirai.veriny.tf.
.. |Coverage| image:: https://codecov.io/github/SunDwarf/Kyoukai/coverage.svg
:target: https://codecov.io/gh/SunDwarf/Kyoukai
.. |CircleCI| image:: https://img.shields.io/circleci/project/SunDwarf/Kyoukai.svg?maxAge=2592000
:target: https://circleci.com/gh/SunDwarf/Kyoukai/
| PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/__test__/ac_query.py |
from __future__ import print_function, unicode_literals
_attr_ac_query = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> PAP = scope.PAP
>>> SRM = scope.SRM
>>> p1 = PAP.Person ("Glueck", "Martin")
>>> p2 = PAP.Person ("Tanzer", "Christian", "", "Mag.", lifetime = dict (start = u"26.9.1959", raw = True))
>>> p3 = PAP.Person ("Franz-Ferdinand", "Karl")
>>> p4 = PAP.Person ("Tanzer", "Egon", lifetime = dict (start = u"1907-03-08", finish = "1994-08-04", raw = True))
>>> s2 = SRM.Sailor (p2)
>>> for value in "Ma", "martin", "CHRi" :
... q = PAP.Person.AQ.first_name.AC (value)
... for o in (p1, p2, p3) :
... print (value, o.first_name, q (o))
Ma martin True
Ma christian False
Ma karl False
martin martin True
martin christian False
martin karl False
CHRi martin False
CHRi christian True
CHRi karl False
>>> for value in "Gl", "Glueck", "Ferdinand" :
... q = PAP.Person.AQ.last_name.AC (value)
... for o in (p1, p2, p3) :
... print (value, o.last_name, q (o))
Gl glueck True
Gl tanzer False
Gl franz-ferdinand False
Glueck glueck True
Glueck tanzer False
Glueck franz-ferdinand False
Ferdinand glueck False
Ferdinand tanzer False
Ferdinand franz-ferdinand True
>>> q1 = PAP.Person.AQ.lifetime.AC (dict (start = "1959-09-26"))
>>> q2 = PAP.Person.AQ.lifetime.AC (dict (start = "1907-03-08", finish = "1994-08-04"))
>>> q3 = PAP.Person.AQ.lifetime.AC (dict (finish = "1994-08-04"))
>>> q4 = PAP.Person.lifetime.AQ.EQ (dict (start = "1907", finish = "1994"))
>>> q5 = PAP.Person.first_name.AQ.CONTAINS ("ti")
>>> q6 = PAP.Person.AQ.lifetime.AC (dict (start = "1959"))
>>> q7 = PAP.Person.AQ.lifetime.start.AC ("1959-09-26")
>>> print (q1)
Q.lifetime.start == datetime.date(1959, 9, 26)
>>> print (q2)
<Filter_And [Q.lifetime.finish == datetime.date(1994, 8, 4), Q.lifetime.start == datetime.date(1907, 3, 8)]>
>>> print (q4)
<Filter_And [Q.lifetime.finish.between (datetime.date(1994, 1, 1), datetime.date(1994, 12, 31)), Q.lifetime.start.between (datetime.date(1907, 1, 1), datetime.date(1907, 12, 31))]>
>>> print (q5)
Q.first_name.contains ('ti',)
>>> print (q6)
Q.lifetime.start.between (datetime.date(1959, 1, 1), datetime.date(1959, 12, 31))
>>> print (q7)
Q.lifetime.start == datetime.date(1959, 9, 26)
>>> print (" and ".join (str (p) for p in q2.predicates))
Q.lifetime.finish == datetime.date(1994, 8, 4) and Q.lifetime.start == datetime.date(1907, 3, 8)
>>> PAP.Person.query_s (q1).all ()
[PAP.Person ('tanzer', 'christian', '', 'mag.')]
>>> PAP.Person.query_s (q2).all ()
[PAP.Person ('tanzer', 'egon', '', '')]
>>> PAP.Person.query_s (q1 | q3).all ()
[PAP.Person ('tanzer', 'christian', '', 'mag.'), PAP.Person ('tanzer', 'egon', '', '')]
>>> PAP.Person.query_s (q4).all ()
[PAP.Person ('tanzer', 'egon', '', '')]
>>> prepr ((list (p.ui_display for p in PAP.Person.query_s (q5))))
['Glueck Martin', 'Tanzer Christian, Mag.']
>>> prepr ((PAP.Person.query_s (q6).all ()))
[PAP.Person ('tanzer', 'christian', '', 'mag.')]
>>> prepr ((PAP.Person.query_s (q7).all ()))
[PAP.Person ('tanzer', 'christian', '', 'mag.')]
>>> q = PAP.Person.AQ.last_name.AC ("Franz")
>>> print (" or ".join (str (p) for p in q.predicates))
Q.last_name.startswith ('franz',) or Q.last_name.contains ('-franz',)
>>> q = PAP.Person.AQ.last_name.AC ("Franz-F")
>>> print (q)
Q.last_name.startswith ('franz-f',)
>>> qs1 = SRM.Sailor.AQ.left.AC (dict (last_name = "Tan"))
>>> qs2 = SRM.Sailor.AQ.left.last_name.AC ("Tan")
>>> print (qs1)
<Filter_Or [Q.left.last_name.startswith ('tan',), Q.left.last_name.contains ('-tan',)]>
>>> print (qs2)
<Filter_Or [Q.left.last_name.startswith ('tan',), Q.left.last_name.contains ('-tan',)]>
>>> SRM.Sailor.query_s (qs1).all ()
[SRM.Sailor (('tanzer', 'christian', '', 'mag.'), '', None, '')]
>>> SRM.Sailor.query_s (qs2).all ()
[SRM.Sailor (('tanzer', 'christian', '', 'mag.'), '', None, '')]
>>> a1 = PAP.Address ("Langstrasse 4", "2244", "Spannberg", "Austria")
>>> a2 = PAP.Address ("Glasauergasse 32", "1130", "Wien", "Austria")
>>> for value in "22", "11", "10" :
... q = PAP.Address.AQ.zip.AC (value)
... for o in (a1, a2) :
... print (value, o.zip, q (o))
22 2244 True
22 1130 False
11 2244 False
11 1130 True
10 2244 False
10 1130 False
>>> SRM = scope.SRM
>>> opti = SRM.Boat_Class ("Optimist", max_crew = 1)
>>> b1 = SRM.Boat.instance_or_new ('Optimist', u"1107", u"AUT", raw = True) ### 1
>>> b2 = SRM.Boat.instance_or_new ('Optimist', u"1208", u"AUT", raw = True) ### 2
>>> for value in "11", "12" :
... q = SRM.Boat.AQ.sail_number.AC (value)
... for o in (b1, b2) :
... print (value, o.sail_number, q (o))
11 1107 True
11 1208 False
12 1107 False
12 1208 True
>>> a3 = PAP.Address ("Glasauergasse 32/3", "1130", "Wien", "Austria", raw = True)
>>> a3p = PAP.Address_Position (a3, position = dict (lat = "48.190111", lon = "16.26867"), raw = True)
>>> a3p.position
MOM.Position (48.190111, 16.26867)
>>> a3.gps
PAP.Address_Position (('glasauergasse 32/3', '1130', 'wien', 'austria'))
>>> a4 = PAP.Address ("Glasauergasse 32/2", "1130", "Wien", "Austria", raw = True)
>>> a4p = PAP.Address_Position (a4, position = ("48d 11m 25s", "16d 16m 7s"), raw = True)
>>> a4p.position
MOM.Position (48.1902777778, 16.2686111111)
>>> print (a4p)
(('glasauergasse 32/2', '1130', 'wien', 'austria'))
>>> print (a4p.position)
(48.1902777778, 16.2686111111)
>>> print (a4p.attr_prop ("position").as_code (a4p.position))
(48.1902777778, 16.2686111111)
>>> p42 = scope.MOM.Position (42.4242, 137.137137)
>>> p42
MOM.Position (42.4242, 137.137137)
>>> print (p42.as_code ())
MOM.Position (42.4242, 137.137137)
>>> print (p42.attr_as_code ())
42.4242, 137.137137
>>> p42r = scope.MOM.Position ("42d42m42s", "137d13m7.137s", raw = True)
>>> p42r
MOM.Position (42.7116666667, 137.218649167)
>>> list (PAP.Person.raw_query_attrs (["first_name"], dict (first_name = "Martin")))
[Q.first_name == 'martin']
>>> list (PAP.Address_Position.raw_query_attrs (["position"], dict (position = dict (lat = "48.190111"))))
[Q.position.lat == 48.190111]
>>> apq = list (PAP.Address_Position.raw_query_attrs (["position"], dict (position = dict (lat = "48d 11m 25s")))) [0]
>>> prepr (apq)
Q.position.lat == 48.1902777778
>>> PAP.Address_Position.query_s (apq).all ()
[PAP.Address_Position (('glasauergasse 32/2', '1130', 'wien', 'austria'))]
"""
_epk_splitter_test = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> PAP = scope.PAP
>>> prepr ((scope.PAP.Person.epk_splitter ("Ma")))
[('Ma',)]
>>> prepr ((scope.PAP.Person.epk_splitter ("Martin G")))
[('Martin G',), ('Martin', 'G')]
>>> prepr ((scope.PAP.Person.epk_splitter ("Gl Ma")))
[('Gl Ma',), ('Gl', 'Ma')]
>>> prepr ((scope.PAP.Person.epk_splitter ("Van der Bel")))
[('Van der Bel',), ('Van der', 'Bel'), ('Van', 'der Bel')]
"""
_ac_query = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> PAP = scope.PAP
>>> _ = PAP.Person ("Glueck", "Martin")
>>> _ = PAP.Person ("Tanzer", "Christian", "", "Mag.")
>>> _ = PAP.Person ("Franz-Ferdinand", "Karl")
>>> _ = PAP.Person ("Van der Bellen", "Alexander")
>>> _ = PAP.Person ("van Persie", "Robin")
>>> scope.commit ()
>>> for acs in ("Ma", "Ta", "Van", "Van der B") :
... for p, qs in enumerate (PAP.Person.ac_query_auto_split (acs)) :
... print (p, acs)
... for o in sorted (qs, key = lambda p : p.last_name) :
... print (" ", o)
0 Ma
('glueck', 'martin', '', '')
('tanzer', 'christian', '', 'mag.')
0 Ta
('tanzer', 'christian', '', 'mag.')
0 Van
('van der bellen', 'alexander', '', '')
('van persie', 'robin', '', '')
0 Van der B
('van der bellen', 'alexander', '', '')
1 Van der B
2 Van der B
"""
from _GTW.__test__.model import *
__test__ = dict \
( Scaffold.create_test_dict
( dict
( attr_ac_query = _attr_ac_query
, ac_query = _ac_query
)
)
, ** Scaffold.create_test_dict
(dict (ekp_splitter = _epk_splitter_test), backends = ("HPS", ))
)
### __END__ GTW.__test__.ac_query | PypiClean |
/Flask-Colorpicker-0.9.tar.gz/Flask-Colorpicker-0.9/README.md | <h1 align='center'>flask_colorpicker</h1>
<h3 align='center'>
A Flask extension to add Spectrum jQuery color picker into the template, it makes adding and configuring multiple color pickers at a time much easier and less time consuming.
</h3>
## Install :
#### - With pip:
> - `pip install Flask-Colorpicker` <br />
#### - From the source:
> - `git clone https://github.com/mrf345/flask_colorpicker.git`<br />
> - `cd flask_colorpicker` <br />
> - `python setup.py install`
## Setup :
#### - Inside the Flask app:
```python
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_colorpicker import colorpicker
app = Flask(__name__)
Bootstrap(app)
colorpicker(app)
```
#### - inside the jinja template
```jinja
{% extends 'bootstrap/base.html'}
{% block scripts %}
{{ super() }}
{{ colorpicker.loader() }}
{{ colorpicker.picker(id=".cp") }}
{% endblock %}
{% block content %}
<form class="verticalform">
<input type="text" class="form-control cp" />
</form>
{% endblock %}
```
## Settings:
#### - Customize:
>The accepted arguments to be passed to the `colorpicker.picker()` function are as follow:
```python
def picker(self, ids=[".colorpicker"], # list of ids of element to assign colorpicker to
default_color='rgb(0,0,255,0.5)', # default color to start with
color_format='rgb', # color format to use
showAlpha='true', # enable or disable transparency
showInput='false', # display or hide color picker
showButtons='false', # display or hide buttons
allowEmpty='true'): # allow empty input
```
#### - Local source:
> by default the extension will load spectrum plugin from [a remote CDN][25530337]. Although you can configure that to be locally through passing a list of two files .js and .css into the colorpicker module like such:
```python
colorpicker(app=app, local=['static/js/spectrum.js', 'static/css/spectrum.css'])
```
_The order in-which the items of list are passed is not of importance, it will be auto detected via file extension_
[25530337]: https://cdnjs.com/libraries/spectrum "Spectrum CDN"
## Credit:
> - [Spectrum][33c1000c]: jQuery color picker.
[33c1000c]: https://github.com/bgrins/spectrum "Spectrum website"
| PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/job/es/__init__.py | from ... import ElementsType
from .. import Provider as BaseProvider
class Provider(BaseProvider):
# Source:
# https://www.ilo.org/public/spanish/bureau/stat/isco/docs/struct08.xls
jobs: ElementsType[str] = (
"Abogado",
"Acarreador de agua",
"Recolector de leña",
"Ayudante de cámara",
"Actor",
"Administrador de sistemas",
"Agente de administración tributaria",
"Agente de aduanas",
"Inspector de fronteras",
"Agente de bolsa",
"Agente de compras",
"Consignatario",
"Agente de empleo",
"Agente de seguros",
"Agente de servicios de expedición de licencias y permisos",
"Agente de servicios de seguridad social",
"Agente inmobiliario",
"Agricultor",
"Agrónomo",
"Albañil",
"Alfarero",
"Analista de gestión y organización",
"Analista de sistemas",
"Analista financiero",
"Aparejador",
"Empalmador de cables",
"Curtidor",
"Apicultor",
"Sericultor",
"Archivista",
"Curador de museos",
"Arquitecto",
"Paisajista",
"Artesano",
"Artista plástico",
"Asesor financiero y en inversiones",
"Asesor de inversiones",
"Asistente de venta",
"Astrólogo",
"Adivinador",
"Deportista",
"Audiólogo",
"Escritor",
"Auxiliar de maestro",
"Auxiliar de servicio de abordo",
"Auxiliar laico de las religión",
"Avicultor",
"Ayudante de ambulancia",
"Ayudante de cocina",
"Bailarín",
"Coreógrafo",
"Barnizador",
"Barrendero",
"Bibliotecarios",
"Focumentalista",
"Biólogo",
"Botánico",
"Zoólogo",
"Zoólogo",
"Bombero",
"Buzo",
"Cajero de banco",
"Cajero",
"Tipógrafo",
"Camarero de barra",
"Camarero de mesa",
"Capitán decubierta",
"Oficial de cubierta",
"Carnicero",
"Pescadero",
"Carpintero",
"Cartógrafo",
"Agrimensor",
"Catador de alimentos y bebidas",
"Catador de bebidas",
"Cazador",
"Tramper",
"Chapista",
"Calderero",
"Chef",
"Clasificador de desechos",
"Clasificador de productos",
"Cobrador",
"Cocinero",
"Cocinero de comidas rápidas",
"Codificador de datos",
"Corrector de pruebas de imprenta",
"Comerciante de tiendas",
"Conductor de autobús",
"Conductor de tranvía",
"Conductor de automóviles",
"Conductor de taxis",
"Conductor de camiones pesados",
"Conductor de motocicletas",
"Conductor de vehículos accionados a pedal o a brazo",
"Conductor de vehículos y máquinas de tracción animal",
"Conserje",
"Constructor de casas",
"Contable",
"Controlador de instalaciones de procesamiento de productos químicos",
"Controlador de procesos",
"Controlador de tráfico aéreo",
"Costurero",
"Bordador",
"Criador de ganado",
"Cristalero",
"Cuidador de animales",
"Cuidador de niños",
"Declarante de aduana",
"Gestor de aduana",
"Delineante",
"Dibujante técnico",
"Demostrador de tiendas",
"Dentista",
"Ayudante de odontología",
"Desarrollador de software",
"Desarrollador Web y multimedia",
"Nutricionista",
"Dinamitero",
"Director de servicios de bienestar social",
"Director de cine",
"Director de teatro",
"Director de empresas de abastecimiento, distribución y afines",
"Director de empresas de construcción",
"Director de explotaciones de minería",
"Director de industrias manufactureras",
"Director de investigación y desarrollo",
"Director de políticas y planificación",
"Director de producción agropecuaria y silvicultura",
"Director de producción de piscicultura y pesca",
"Director de publicidad y relaciones públicas",
"Director de recursos humanos",
"Director de servicios de cuidado de las personas de edad",
"Director de servicios de cuidados infantiles",
"Director de servicios de educación",
"Director de servicios de salud",
"Director de servicios de tecnología de la información y las comunicaciones",
"Director de ventas y comercialización",
"Director financiero",
"Gerente general",
"Diseñador de productos",
"Diseñador de prendas",
"Diseñador gráfico",
"Diseñador multimedia",
"Diseñador de bases de datos",
"Administrador de bases de datos",
"Diseñador de interior",
"Decorador de interior",
"Ebanista",
"Economista",
"Ecónomo y mayordomos domésticos",
"Mayordomo doméstico",
"Educador para necesidades especiales",
"Electricista de obras",
"Electrotécnico",
"Empacador manual",
"Empleado de agencia de viajes",
"Empleado de archivos",
"Empleado de biblioteca",
"Empleado de centro de llamadas",
"Empleado de contabilidad y cálculo de costos",
"Empleado de control de abastecimientos e inventario",
"Empleado de servicios de apoyo a la producción",
"Empleado de servicios de correos",
"Empleado de servicios de transporte",
"Empleado de servicios estadísticos, financieros y de seguros",
"Empleado de ventanillas de informaciones",
"Empleado del servicio de personal",
"Empleado encargado de las nóminas",
"Encuadernador",
"Ensamblador de equipos eléctricos",
"Ensamblador de equipos electrónicos",
"Ensamblador de maquinaria mecánica",
"Entrenador deportivo",
"Árbitro deportivo",
"Entrevistador de encuestas",
"Entrevistador de investigaciones de mercados",
"Escribiente público",
"Especialista en formación del personal",
"Especialista en métodos pedagógicos",
"Especialista en políticas de administración",
"Especialista en políticas y servicios de personal",
"Especialista en tratamientos de belleza",
"Expendedor de gasolineras",
"Fabricante de instrumentos musicales",
"Afinador de instrumentos musicales",
"Farmacéutico",
"Filósofo",
"Historiador",
"Especialista en ciencias políticas",
"Físico",
"Astrónomos",
"Fisioterapeuta",
"Fontanero",
"Fotógrafo",
"Fumigador",
"Controlador de plagas y malas hierbas",
"Geólogo",
"Ggeofísico",
"Gerente de centros deportivos, de esparcimiento y culturales",
"Gerente de comercios al por mayor y al por menor",
"Gerente de hoteles o restaurantes",
"Gerente de sucursales de bancos, de servicios financieros y de seguros",
"Grabador de datos",
"Guardafrenos",
"Guardagujas",
"Agente de maniobras",
"Guardián de prisión",
"Guardia de protección",
"Guía de turismo",
"Herramentista",
"Herrero",
"Gorjadore",
"Impresor",
"Ingeniero civil",
"Ingeniero de minas",
"Ingeniero metalúrgico",
"Ingeniero electricista",
"Ingeniero electrónico",
"Ingeniero en telecomunicaciones",
"Ingeniero industrial",
"Ingeniero mecánico",
"Ingeniero medioambiental",
"Ingeniero químico",
"Inspector de la salud laboral",
"Inspector medioambiental y afines",
"Inspector de policía",
"Detective",
"Instalador de material aislante y de insonorización",
"Instalador y reparador de líneas eléctricas",
"Instalador y reparador en tecnología de la información y las comunicaciones",
"Instructor de autoescuela",
"Instructor de educación física y actividades recreativas",
"Instructor en tecnologías de la información",
"Jefe de pequeñas poblaciones",
"Joyero",
"Orfebre",
"Platero",
"Juez",
"Lavador de vehículos",
"Lavador de ventanas",
"Lavandero",
"Planchador manuales",
"Limpiador de fachadas",
"Deshollinador",
"Limpiador y asistente de oficinas, hoteles y otros establecimientos",
"Limpiador y asistente doméstico",
"Locutor de radio",
"Locutor de televisión",
"Maestro de enseñanza primaria",
"Maestro preescolar",
"Mampostero",
"Labrante",
"Tronzador",
"Grabador de piedra",
"Maquinista de locomotoras",
"Marinero de cubierta",
"Matemático",
"Actuario",
"Estadístico",
"Mecánico y ajustador electricista",
"Mecánico y reparador de instrumentos de precisión",
"Mecánico y reparador de máquinas agrícolas e industriales",
"Mecánico y reparador de motores de avión",
"Mecánico y reparador de vehículos de motor",
"Mecánico y reparador en electrónica",
"Mecánico-montador de instalaciones de refrigeración y climatización",
"Médico especialista",
"Médico general",
"Mensajero",
"Mandader",
"Maleter",
"Repartidor",
"Meteorólogo",
"Minero",
"Operador de instalaciones mineras",
"Modelo de moda, arte y publicidad",
"Moldeador y machero",
"Montador de estructuras metálicas",
"Músico",
"Cantante",
"Compositor",
"Oficial de las fuerzas armadas",
"Oficial de préstamos y créditos",
"Oficial maquinistas en navegación",
"Oficinista general",
"Operador de autoelevadoras",
"Operador de grúas y aparatos elevadores",
"Operador de incineradores, instalaciones de tratamiento de agua",
"Operador de instalaciones de tratamiento de agua",
"Operador de instalaciones de procesamiento de la madera",
"Operador de instalaciones de procesamiento de metales",
"Operador de instalaciones de procesamiento de minerales y rocas",
"Operador de instalaciones de producción de energía",
"Operador de instalaciones de refinación de petróleo y gas natural",
"Operador de instalaciones de vidriería y cerámica",
"Operador de instalaciones para la preparación de pasta para papel y papel",
"Operador de maquinaria agrícola y forestal móvil",
"Operador de máquinas de blanqueamiento, teñido y limpieza de tejidos",
"Operador de máquinas de coser",
"Operador de máquinas de embalaje, embotellamiento y etiquetado ",
"Operador de máquinas de movimiento de tierras",
"Operador de máquinas de preparación de fibras, hilado y devanado",
"Operador de máquinas de procesamiento de texto y mecanógrafos",
"Operador de máquinas de tratamiento de pieles y cueros",
"Operador de máquinas de vapor y calderas",
"Operador de máquinas lavarropas",
"Operador de máquinas para elaborar alimentos y productos afines",
"Operador de máquinas para fabricar cemento y otros productos minerales",
"Operador de máquinas para fabricar productos de caucho",
"Operador de máquinas para fabricar productos de material plástico",
"Operador de máquinas para fabricar productos de papel",
"Operador de máquinas para fabricar productos fotográficos",
"Operador de máquinas para la fabricación de calzado",
"Operador de máquinas pulidoras, galvanizadoras y recubridoras de metales ",
"Operador de plantas y máquinas de productos químicos",
"Operador de telar y otras máquinas tejedoras",
"Operario de la conservación de frutas, legumbres y verduras",
"Operario de la elaboración de productos lácteos",
"Operario del tratamiento de la madera",
"Operario en cemento armado y enfoscador",
"Optometrista",
"Organizador de conferencias y eventos",
"Personal de limpieza",
"Miembro de las fuerzas armadas",
"Profesional de nivel medio en actividades culturales y artísticas",
"Profesor de artes",
"Profesor de idiomas",
"Profesor de música",
"Panaderos, pasteleros y confiteros",
"Parquetero y colocador de suelos",
"Patronista y cortador de tela",
"Peluqueros",
"Peón de carga",
"Peón de explotaciones agrícolas",
"Peón de explotaciones de cultivos mixtos y ganaderos",
"Peón de explotaciones ganaderas",
"Peón de jardinería y horticultura",
"Peón de la construcción de edificios",
"Peón de minas y canteras",
"Peón de obras públicas y mantenimiento",
"Peón de pesca y acuicultura",
"Peón forestales",
"Perforador y sondista de pozos",
"Periodista",
"Personal de pompas fúnebres y embalsamador",
"Personal directivo de la administración pública",
"Personas que realizan trabajos varios",
"Pescador, cazador, tramperos y recolector de subsistencia",
"Pescador de agua dulce y en aguas costeras",
"Pescador de alta mar",
"Piloto de aviación",
"Pintor y empapelador",
"Policías",
"Practicante paramédico",
"Practicante y asistente médico",
"Preparador y elaborador de tabaco y sus productos",
"Prestamista",
"Productor y trabajador calificado de explotaciones agropecuarias mixtas",
"Profesional de enfermería",
"Profesional de la protección medioambiental",
"Profesional de la publicidad y la comercialización",
"Profesional de la salud y la higiene laboral y ambiental",
"Profesional de medicina",
"Profesional de medicina alternativa",
"Profesional de nivel medio de enfermería",
"Profesional de nivel medio de medicina tradicional y alternativa",
"Profesional de nivel medio de medicina alternativa",
"Profesional de nivel medio de partería",
"Profesional de nivel medio de servicios estadísticos o matemáticos",
"Profesional de nivel medio del derecho y servicios legales",
"Profesional de partería",
"Profesional de relaciones públicas",
"Profesional de ventas de tecnología de la información y las comunicaciones",
"Profesional de ventas técnicas y médicas",
"Profesional del trabajo social",
"Profesional en redes de computadores",
"Profesional religioso",
"Profesor de enseñanza secundaria",
"Profesor de formación profesional",
"Profesor de universidades y de la enseñanza superior",
"Programador de aplicaciones",
"Psicólogo",
"Pulidor de metales y afilador de herramientas",
"Químico",
"Recepcionista de hoteles",
"Recepcionista",
"Receptor de apuestas",
"Recolector de basura y material reciclable",
"Recolector de dinero en aparatos de venta automática y lector de medidores",
"Redactor de carteles, pintor decorativos y grabador",
"Regulador y operador de máquinas de labrar madera",
"Regulador y operador de máquinas y herramientas",
"Reparador de bicicletas",
"Reponedor de estanterías",
"Representante comercial",
"Revisor y cobrador de los transportes públicos",
"Revocador",
"Modisto",
"Peletero",
"Sombrerero",
"Secretario administrativo",
"Secretario ejecutivo",
"Secretario (general)",
"Secretario jurídicos",
"Secretario médicos",
"Sociólogo",
"Antropólogo",
"Soldador y oxicortador",
"Soplador de vidrio",
"Modelador de vidrio",
"Laminador de vidrio",
"Cortador de vidrio",
"Pulidor de vidrio",
"Suboficial de las fuerzas armadas",
"Supervisor de industria manufacturera",
"Supervisor de la construcción",
"Supervisor de mantenimiento y limpieza en oficinas, hoteles y otros establecimientos",
"Supervisor de secretaría",
"Supervisor de tiendas y almacenes",
"Supervisor en ingeniería de minas",
"Tapicero",
"Colchonero",
"Tasador",
"Techador",
"Técnico agropecuario",
"Técnico de telecomunicaciones",
"Técnico de la Web",
"Técnico de laboratorio médico",
"Técnico de prótesis médicas y dentales",
"Técnico de radiodifusión y grabación audio visual",
"Técnico en aparatos de diagnóstico y tratamiento médico",
"Técnico en asistencia al usuario de tecnología de la información y las comunicaciones",
"Técnico en ciencias biológicas",
"Técnico en ciencias físicas y químicas",
"Técnico en documentación sanitaria",
"Técnico en electrónica",
"Técnico en galerías de arte, museos y bibliotecas",
"Técnico en ingeniería civil",
"Técnico en ingeniería de minas y metalurgia",
"Técnico en ingeniería mecánica",
"Técnico en operaciones de tecnología de la información y las comunicaciones",
"Técnico en optometría y ópticos",
"Técnico en química industrial",
"Técnico en redes y sistemas de computadores",
"Técnico en seguridad aeronáutica",
"Técnico forestal",
"Asistente farmacéutico",
"Asistente fisioterapeuta",
"Asistente veterinario",
"Telefonista",
"Tenedor de libros",
"Trabajador agrícola de subsistencia",
"Trabajador agropecuario de subsistencia",
"Trabajador ambulante de servicios",
"Trabajador comunitario de la salud",
"Trabajador de explotaciones de acuicultura",
"Trabajador de cuidados personales a domicilio",
"Trabajador de cuidados personales en instituciones",
"Trabajador forestal calificado",
"Trabajador pecuario de subsistencia",
"Trabajador social de nivel medio",
"Traductor e intérprete",
"Lingüista",
"Urbanistas e ingenieros de tránsito",
"Vendedor ambulantes de productos comestibles",
"Vendedor ambulantes (excluyendo de comida)",
"Vendedor de comidas al mostrador",
"Vendedor de quioscos y de puestos de mercado",
"Vendedor por teléfono",
"Vendedor puerta a puerta",
"Veterinario",
"Zapatero",
"Miembro del poder legislativo",
) | PypiClean |
/Minetorch-0.6.17.tar.gz/Minetorch-0.6.17/minetorch/miner.py | import logging
import math
import os
import time
from datetime import datetime
from pathlib import Path
import torch
import tqdm
from IPython.core.display import HTML, display
from . import drawers
class Miner(object):
"""The heart of minetorch
Args:
alchemistic_directory (string):
The directory which minetorch will use to store everything in
model (torch.nn.Module):
Pytorch model optimizer (torch.optim.Optimizer): Pytorch optimizer
loss_func (function):
A special hook function to compute loss, the function receive 2 variable:
* Miner: the miner object
* Data: Batch data been yield by the loader
return value of the hook function should be a float number of the loss
code (str, optional):
Defaults to "geass". It's a code name of one
attempt. Assume one is doing kaggle competition and will try
different models, parameters, optimizers... To keep results of every
attempt, one should change the code name before tweaking things.
train_dataloader (torch.utils.data.DataLoader):
Pytorch dataloader
val_dataloader (torch.utils.data.DataLoader, optional):
Defaults to None, if no validation dataloader is provided, will skip validation
resume (bool, optional):
Defaults to True. Resume from last training, could be:
* True: resume from the very last epochs
* String: resume from the specified epochs
etc. `34`, `68` `best`
eval_stride (int, optional):
Defaults to 1. Validate every `eval_stride` epochs
persist_stride (int, optional):
Defaults to 1.
Save model every `persist_stride` epochs
drawer (minetorch.Drawer or string, optional):
Defaults to matplotlib.
If provide, Miner will draw training loss and validation loss
curves, could be `tensorboard` or self implemented Drawer object
hooks (dict, optional):
Defaults to {}. Define hook functions.
max_epochs ([type], optional):
Defaults to None. How many epochs to train, None means unlimited.
logging_format ([type], optional):
Defaults to None. logging format
trival ([Boolean], optional):
Defaults to False. If true, both training and validation
process will be breaked in 10 iterations
plugins (list, optional):
Defaults to []. This is actually a collection of `hooks`, do not set
hooks and plugins the same time.
forward (function, optional):
custom forward function.
verbose (boolean, optional):
los loss of every iteration
"""
def __init__(
self,
alchemistic_directory,
model,
optimizer,
loss_func,
code="geass",
train_dataloader=None,
val_dataloader=None,
resume=True,
eval_stride=1,
persist_stride=1,
gpu=True,
drawer="matplotlib",
hooks={},
max_epochs=None,
statable={},
logging_format=None,
trival=False,
in_notebook=False,
plugins=[],
logger=None,
sheet=None,
accumulated_iter=1,
ignore_optimizer_resume=False,
forward=None,
verbose=False,
amp=False,
amp_scaler=True,
):
self.alchemistic_directory = alchemistic_directory
self.code = code
if trival:
self.code = f"trival_{code}"
self.create_dirs()
self.gpu = gpu
self.devices = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.logger = logger
self.code_dir = os.path.join(alchemistic_directory, self.code)
if self.logger is None:
self.set_logging_config(alchemistic_directory, self.code, logging_format)
self.logger = logging
self.create_drawer(drawer)
self.models_dir = os.path.join(alchemistic_directory, self.code, "models")
self.in_notebook = in_notebook
self.statable = statable
self.accumulated_iter = float(accumulated_iter)
self.ignore_optimizer_resume = ignore_optimizer_resume
self.model = model
self.optimizer = optimizer
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.loss_func = loss_func
self.resume = resume
self.eval_stride = eval_stride
self.persist_stride = persist_stride
self.lowest_train_loss = float("inf")
self.lowest_val_loss = float("inf")
self.current_epoch = 0
self.current_train_iteration = 0
self.current_val_iteration = 0
self.hook_funcs = hooks
self.max_epochs = max_epochs
self.trival = trival
self.forward_fn = forward
self.verbose = verbose
self.amp = amp
self.amp_scaler = amp_scaler
if self.amp and self.amp_scaler:
self.scaler = torch.cuda.amp.GradScaler()
self.sheet = sheet
if self.sheet:
self._init_sheet()
self.plugins = plugins
for plugin in self.plugins:
plugin.set_miner(self)
self._set_tqdm()
self.call_hook_func("before_init")
self._check_statable()
self.init_model()
if self.sheet:
self.sheet_progress = dict(
epoch=0, train_percentage="0%", val_percentage="0%"
)
self.last_flushed_at = 0
self.sheet.onready()
self.sheet.flush()
self.status = "init"
self.call_hook_func("after_init")
def _check_statable(self):
for name, statable in self.statable.items():
if not (
hasattr(statable, "state_dict") and hasattr(statable, "load_state_dict")
):
raise Exception(f"The {name} is not a statable object")
def _set_tqdm(self):
if self.in_notebook:
self.tqdm = tqdm.notebook.tqdm
else:
self.tqdm = tqdm.tqdm
def _init_sheet(self):
self.sheet.set_miner(self)
self.sheet.reset_index()
self.sheet.create_column("code", "Code")
self.sheet.create_column("progress", "Progress")
self.sheet.create_column("loss", "Loss")
self.sheet.update("code", self.code)
def create_sheet_column(self, key, title):
if self.sheet is None:
return
self.sheet.create_column(key, title)
def update_sheet(self, key, value):
if self.sheet is None:
return
self.sheet.update(key, value)
def set_logging_config(self, alchemistic_directory, code, logging_format):
self.log_dir = os.path.join(alchemistic_directory, code)
log_file = os.path.join(self.log_dir, "log.txt")
logging_format = (
logging_format
if logging_format is not None
else "%(levelname)s %(asctime)s %(message)s"
)
logging.basicConfig(
filename=log_file,
format=logging_format,
datefmt="%m-%d %H:%M:%S",
level=logging.INFO,
)
def create_drawer(self, drawer):
if drawer == "tensorboard":
self.drawer = drawers.TensorboardDrawer(self)
elif drawer == "matplotlib":
self.drawer = drawers.MatplotlibDrawer(self)
else:
self.drawer = drawer
def notebook_output(self, message, _type="info"):
type_config = {
"info": ["💬", "#6f818a"],
"success": ["✅", "#7cb305"],
"error": ["❌", "#cf1322"],
"warning": ["⚠️", "#d46b08"],
}[_type]
if self.in_notebook:
display(
HTML(
f'<div style="font-size: 12px; color: {type_config[1]}">'
f'⏰ {time.strftime("%b %d - %H:%M:%S")} >>> '
f"{type_config[0]} {message}"
"</div>"
)
)
def notebook_divide(self, message):
if self.in_notebook:
display(
HTML(
'<div style="display: flex; justify-content: center;">'
f'<h3 style="color: #7cb305; border-bottom: 4px dashed #91d5ff; padding-bottom: 6px;">{message}</h3>'
"</div>"
)
)
def init_model(self):
"""resume from some checkpoint"""
if isinstance(self.model, torch.nn.DataParallel):
raise Exception(
"Don't parallel the model yourself, instead, if the "
"`gpu` option is true(default), Minetorch will do this for you."
)
if self.resume is True:
# resume from the newest model
if self.model_file_path("latest") is not None:
checkpoint_path = self.model_file_path("latest")
else:
checkpoint_path = None
msg = "Could not find checkpoint to resume, " "train from scratch"
self.notify(msg, "warning")
elif isinstance(self.resume, str):
checkpoint_path = self.model_file_path(self.resume)
elif isinstance(self.resume, int):
checkpoint_path = self.model_file_path(str(self.resume))
else:
checkpoint_path = None
if self.resume is not True and self.resume and checkpoint_path is None:
# user has specified a none existed model, should raise a error
raise Exception(f"Could not find model {self.resume}")
if checkpoint_path is not None:
msg = f"Start to load checkpoint {checkpoint_path}"
self.notify(msg)
checkpoint = torch.load(checkpoint_path)
self.current_epoch = checkpoint.get("epoch", 0)
self.current_train_iteration = checkpoint.get("train_iteration", 0)
self.current_val_iteration = checkpoint.get("val_iteration", 0)
self.lowest_train_loss = checkpoint.get("lowest_train_loss", 9999)
self.lowest_val_loss = checkpoint.get("lowest_val_loss", 9999)
# load model state
try:
self.model.load_state_dict(checkpoint["state_dict"], strict=True)
except Exception as e:
msg = (
f"load checkpoint failed with {e}, the state in the "
"checkpoint is not matched with the model, "
"try to reload checkpoint with unstrict mode"
)
self.notify(msg, "warning")
self.model.load_state_dict(checkpoint["state_dict"], strict=False)
# load optimizer state
if "optimizer" in checkpoint and not self.ignore_optimizer_resume:
try:
self.optimizer.load_state_dict(checkpoint["optimizer"])
except Exception as e:
msg = (
f"load optimizer state failed with {e}, will skip this error and continue, "
"stop the process if it is not expected"
)
self.notify(msg, "warning")
# load drawer state
if (self.drawer is not None) and ("drawer_state" in checkpoint):
self.drawer.set_state(checkpoint["drawer_state"])
# load scaler state
if self.amp and self.amp_scaler:
try:
self.scaler.load_state_dict(checkpoint["scaler"])
except Exception as e:
msg = (
f"load scaler state failed with {e}, will skip this error and continue, "
"stop the process if it is not expected"
)
self.notify(msg, "warning")
# load other statable state
if "statable" in checkpoint:
for name, statable in self.statable.items():
if name not in checkpoint["statable"]:
continue
statable.load_state_dict(checkpoint["statable"][name])
# load plugin states
for plugin in self.plugins:
key = f"__plugin.{plugin.__class__.__name__}__"
plugin.load_state_dict(checkpoint.get(key, {}))
msg = "checkpoint loaded"
self.notify(msg, "success")
self.model = self.parallel_model(self.model)
def parallel_model(self, model):
if self.gpu:
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
self.notify("no GPU detected, will train on CPU.")
else:
self.notify(f"found {gpu_count} GPUs, will use all of them to train")
devices = list(map(lambda x: f"cuda:{x}", range(gpu_count)))
model.cuda()
model = torch.nn.DataParallel(model, devices)
return model
def notify(self, message, _type="info"):
getattr(self.logger, "info" if _type == "success" else _type)(message)
self.notebook_output(message, _type)
def call_hook_func(self, name, **payload):
if name in self.hook_funcs:
self.hook_funcs[name](miner=self, **payload)
for plugin in self.plugins:
if not plugin.before_hook(name, payload):
continue
if hasattr(plugin, name):
getattr(plugin, name)(**payload)
def train(self):
"""start to train the model"""
while True:
self.current_epoch += 1
self.call_hook_func("before_epoch_start", epoch=self.current_epoch)
self.notebook_divide(f"Epoch {self.current_epoch}")
self.model.train()
train_iters = len(self.train_dataloader)
total_train_loss = 0
percentage = 0
total = len(self.train_dataloader)
self.notify(f"start to train epoch {self.current_epoch}")
self._update_progress(
force=True,
epoch=self.current_epoch,
train_percentage="0%",
val_percentage="0%",
)
t = self.tqdm(self.train_dataloader)
for index, data in enumerate(t):
if self.trival is True and index == 10:
break
train_loss = self.run_train_iteration(index, data, train_iters)
t.set_postfix({"train loss": train_loss})
if int((index + 1) % self.accumulated_iter) == 0:
if self.amp and self.amp_scaler:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if self.amp and self.amp_scaler:
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad(set_to_none=True)
total_train_loss += train_loss
current_percentage = math.ceil(index / total * 100)
if current_percentage != percentage:
self._update_progress(train_percentage=f"{percentage}%")
percentage = current_percentage
if self.amp and self.amp_scaler:
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad(set_to_none=True)
self._update_progress(force=True, train_percentage=f"{current_percentage}%")
total_train_loss = total_train_loss / train_iters
self.notify(
f"training of epoch {self.current_epoch} finished, "
f"loss is {total_train_loss}"
)
total_val_loss = 0
percentage = 0
total = len(self.val_dataloader)
if self.val_dataloader is not None:
val_iters = len(self.val_dataloader)
with torch.set_grad_enabled(False):
self.model.eval()
self.notify(f"validate epoch {self.current_epoch}")
t = self.tqdm(self.val_dataloader)
for index, data in enumerate(t):
if self.trival is True and index == 10:
break
val_loss = self.run_val_iteration(index, data, val_iters)
t.set_postfix({"val loss": val_loss})
total_val_loss += val_loss
current_percentage = math.ceil(index / total * 100)
if current_percentage != percentage:
self._update_progress(val_percentage=f"{percentage}%")
percentage = current_percentage
self._update_progress(
force=True, val_percentage=f"{current_percentage}%"
)
total_val_loss = total_val_loss / val_iters
self.notify(
f"validation of epoch {self.current_epoch} "
f"finished, loss is {total_val_loss}"
)
if self.drawer is not None:
png_file = self.drawer.scalars(
self.current_epoch,
{"train": total_train_loss, "val": total_val_loss},
"loss",
)
if png_file is not None:
self.update_sheet(
"loss", {"raw": png_file, "processor": "upload_image"}
)
if total_train_loss < self.lowest_train_loss:
self.lowest_train_loss = total_train_loss
should_persist_best = False
if total_val_loss < self.lowest_val_loss:
message = (
"current val loss {} is lower than lowest {}, "
"persist this model as best one".format(
total_val_loss, self.lowest_val_loss
)
)
self.notify(message, "success")
self.lowest_val_loss = total_val_loss
should_persist_best = True
self.call_hook_func("before_persist_checkpoint")
if should_persist_best:
self.persist("best")
self.persist("latest")
if not self.current_epoch % self.persist_stride:
self.persist("epoch_{}".format(self.current_epoch))
if self.max_epochs is not None and self.current_epoch >= self.max_epochs:
self.call_hook_func("before_quit")
self.notify("exceed max epochs, quit!")
break
if self.sheet:
self.sheet.flush()
self.call_hook_func(
"after_epoch_end",
train_loss=total_train_loss,
val_loss=total_val_loss,
epoch=self.current_epoch,
)
def run_train_iteration(self, index, data, train_iters):
self.status = "train"
self.current_train_iteration += 1
self.call_hook_func(
"before_train_iteration_start",
data=data,
index=index,
total_iters=train_iters,
iteration=self.current_train_iteration,
)
if self.amp and self.amp_scaler:
with torch.cuda.amp.autocast():
_, loss = self._forward(data)
seperate_loss = loss / self.accumulated_iter
seperate_loss = self.scaler.scale(seperate_loss)
else:
_, loss = self._forward(data)
seperate_loss = loss / self.accumulated_iter
seperate_loss.backward()
loss = loss.detach().cpu().item()
if self.verbose:
self.logger.info(
"[train {}/{}/{}] loss {}".format(
self.current_epoch, index, train_iters, loss
)
)
self.call_hook_func(
"after_train_iteration_end",
loss=loss,
data=data,
index=index,
total_iters=train_iters,
iteration=self.current_train_iteration,
)
return loss
def _forward(self, data):
if self.forward_fn:
return self.forward_fn(self, data)
else:
predict = self.model(data[0].to(self.devices))
loss = self.loss_func(predict, data[1].to(self.devices))
return predict, loss
def run_val_iteration(self, index, data, val_iters):
self.status = "val"
self.current_val_iteration += 1
self.call_hook_func(
"before_val_iteration_start",
data=data,
index=index,
total_iters=val_iters,
iteration=self.current_val_iteration,
)
predict, loss = self._forward(data)
loss = loss.detach().cpu().item()
if self.verbose:
self.logger.info(
"[val {}/{}/{}] loss {}".format(
self.current_epoch, index, val_iters, loss
)
)
self.call_hook_func(
"after_val_iteration_ended",
predicts=predict,
loss=loss,
data=data,
index=index,
total_iters=val_iters,
iteration=self.current_val_iteration,
)
return loss
def persist(self, name):
"""save the model to disk"""
self.call_hook_func("before_checkpoint_persisted")
if self.drawer is not None:
drawer_state = self.drawer.get_state()
else:
drawer_state = {}
if isinstance(self.model, torch.nn.DataParallel):
model_state_dict = self.model.module.state_dict()
else:
model_state_dict = self.model.state_dict()
state = {
"state_dict": model_state_dict,
"optimizer": self.optimizer.state_dict(),
"epoch": self.current_epoch,
"train_iteration": self.current_train_iteration,
"val_iteration": self.current_val_iteration,
"lowest_train_loss": self.lowest_train_loss,
"lowest_val_loss": self.lowest_val_loss,
"drawer_state": drawer_state,
"statable": {},
}
for statable_name, statable in self.statable.items():
state["statable"][statable_name] = statable.state_dict()
for plugin in self.plugins:
key = f"__plugin.{plugin.__class__.__name__}__"
state[key] = plugin.state_dict()
if self.amp and self.amp_scaler:
state["scaler"] = self.scaler.state_dict()
modelpath = self.standard_model_path(name)
torch.save(state, modelpath)
message = f"save checkpoint to {self.standard_model_path(name)}"
self.notify(message)
self.call_hook_func("after_checkpoint_persisted", modelpath=modelpath)
def standard_model_path(self, model_name):
return os.path.join(self.models_dir, f"{model_name}.pth.tar")
def model_file_path(self, model_name):
model_name_path = Path(str(model_name))
models_dir_path = Path(self.models_dir)
search_paths = [
model_name_path,
models_dir_path / model_name_path,
models_dir_path / f"{model_name}.pth.tar",
models_dir_path / f"epoch_{model_name}.pth.tar",
]
for path in search_paths:
if path.is_file():
return path.resolve()
return None
# TODO: implement methods below
def graceful_stop(self):
"""stop train and exist after this epoch"""
pass
def save_and_stop(self):
"""save the model immediately and stop training"""
pass
def create_dirs(self):
"""Create directories"""
self.create_dir("")
self.create_dir(self.code)
self.create_dir(self.code, "models")
def create_dir(self, *args):
"""Create directory"""
current_dir = self.alchemistic_directory
for dir_name in args:
current_dir = os.path.join(current_dir, dir_name)
if not os.path.isdir(current_dir):
os.mkdir(current_dir)
def periodly_flush(self, force=False):
if self.sheet is None:
return
now = int(datetime.now().timestamp())
# flush every 10 seconds
if not force and now - self.last_flushed_at < 10:
return
self.sheet.flush()
self.last_flushed_at = now
def _update_progress(self, force=False, **kwargs):
if self.sheet is None:
return
self.sheet_progress.update(kwargs)
progress = f"""
epoch: {self.sheet_progress.get('epoch')}
train progress: {self.sheet_progress.get('train_percentage')}
val progress: {self.sheet_progress.get('val_percentage')}
"""
self.sheet.update("progress", progress)
self.periodly_flush(force) | PypiClean |
/ConfigMaster-2.3.6.tar.gz/ConfigMaster-2.3.6/configmaster/JSONConfigFile.py | import json
from configmaster import exc
from configmaster.ConfigGenerator import GenerateConfigFile, GenerateNetworkedConfigFile
import sys
# Fix for 3.5
if sys.version_info < (3, 5):
JSONDecodeError = ValueError
else:
JSONDecodeError = json.JSONDecodeError
def json_load_hook(is_net: bool=False):
def actual_load_hook(cfg, **kwargs):
"""
This handles automatically opening/creating the JSON configuration files.
>>> import configmaster.JSONConfigFile
>>> cfg = configmaster.JSONConfigFile.JSONConfigFile("test.json") # Accepts a string for input
>>> fd = open("test.json") # Accepts a file descriptor too
>>> cfg2 = configmaster.JSONConfigFile.JSONConfigFile(fd)
ConfigMaster objects accepts either a string for the relative path of the INI file to load, or a :io.TextIOBase: object to read from.
If you pass in a string, the file will automatically be created if it doesn't exist. However, if you do not have permission to write to it, a :PermissionError: will be raised.
To access config objects programmatically, a config object is exposed via the use of cfg.config.
These config objects can be accessed via cfg.config.attr, without having to resort to looking up objects in a dict.
>>> # Sample JSON data is {"abc": [1, 2, 3]}
... print(cfg.config.abc) # Prints [1, 2, 3]
"""
# Load the data from the JSON file.
try:
if not is_net:
data = json.load(cfg.fd)
else:
data = cfg.request.json()
except JSONDecodeError as e:
raise exc.LoaderException("Could not decode JSON file: {}".format(e)) from e
# Serialize the data into new sets of ConfigKey classes.
cfg.config.load_from_dict(data)
return actual_load_hook
def json_dump_hook(cfg, text: bool=False):
"""
Dumps all the data into a JSON file.
"""
data = cfg.config.dump()
if not text:
json.dump(data, cfg.fd)
else:
return json.dumps(data)
JSONConfigFile = GenerateConfigFile(load_hook=json_load_hook(False), dump_hook=json_dump_hook, json_fix=True)
NetworkedJSONConfigFile = GenerateNetworkedConfigFile(load_hook=json_load_hook(True),
normal_class_load_hook=json_load_hook(False), normal_class_dump_hook=json_dump_hook) | PypiClean |
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model_utils.py | from datetime import date, datetime # noqa: F401
from copy import deepcopy
import inspect
import io
import os
import pprint
import re
import tempfile
import json
from dateutil.parser import parse
from freeclimb.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on function methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __copy__(self):
cls = self.__class__
if self.get("_spec_property_naming", False):
return cls._new_from_openapi_data(**self.__dict__)
else:
return new_cls.__new__(cls, **self.__dict__)
def __deepcopy__(self, memo):
cls = self.__class__
if self.get("_spec_property_naming", False):
new_inst = cls._new_from_openapi_data()
else:
new_inst = cls.__new__(cls)
for k, v in self.__dict__.items():
setattr(new_inst, k, deepcopy(v, memo))
return new_inst
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map.get(discr_propertyname_py)
cls_with_attr = [c for c in list(visited_composed_classes) + [cls] if hasattr(c, discr_propertyname_py)]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
elif len(cls_with_attr) > 0:
discr_value = getattr(cls_with_attr[0], discr_propertyname_py)
kwargs[discr_propertyname_py] = discr_value
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
if kwargs.get("_spec_property_naming", False):
# when true, implies new is from deserialization
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
else:
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return cls._from_openapi_data(*args, **kwargs)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return cls._from_openapi_data(*args, **kwargs)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
"""
Use cases:
1. additional_properties_type is None (additionalProperties == False in spec)
Check for property presence in self.openapi_types
if not present then throw an error
if present set in self, set attribute
always set on composed schemas
2. additional_properties_type exists
set attribute on self
always set on composed schemas
"""
if self.additional_properties_type is None:
"""
For an attribute to exist on a composed schema it must:
- fulfill schema_requirements in the self composed schema not considering oneOf/anyOf/allOf schemas AND
- fulfill schema_requirements in each oneOf/anyOf/allOf schemas
schema_requirements:
For an attribute to exist on a schema it must:
- be present in properties at the schema OR
- have additionalProperties unset (defaults additionalProperties = any type) OR
- have additionalProperties set
"""
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
# attribute must be set on self and composed instances
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(name)
values = []
# A composed model stores self and child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances.
# Any property must exist in self and all model instances
# The value stored in all model instances must be the same
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class._new_from_openapi_data(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
if issubclass(input_class_simple, OpenApiModel) and \
valid_classes == (bool, date, datetime, dict, float, int, list, str, none_type,) or \
"enum" in str(valid_classes):
return True
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if issubclass(input_class_simple, valid_class):
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
extract_item = lambda item: (item[0], model_to_dict(item[1], serialize=serialize)) if hasattr(item[1], '_data_store') else item
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map.get(attr)
if (attr == None):
continue
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
elif isinstance(v, dict):
res.append(dict(map(
extract_item,
v.items()
)))
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
extract_item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict):
metadata arguments:
_check_type
_path_to_item
_spec_property_naming
_configuration
_visited_composed_classes
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
if constant_args.get('_spec_property_naming'):
allof_instance = allof_class._from_openapi_data(**model_args, **constant_args)
else:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._from_openapi_data(**model_kwargs, **constant_kwargs)
else:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._from_openapi_data(model_arg, **constant_kwargs)
else:
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
if constant_args.get('_spec_property_naming'):
anyof_instance = anyof_class._from_openapi_data(**model_args, **constant_args)
else:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
"""
Gathers the args that were discarded by configuration.discard_unknown_keys
"""
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
"""
set additional_properties_model_instances
additional properties must be evaluated at the schema level
so self's additional properties are most important
If self is a composed schema with:
- no properties defined in self
- additionalProperties: False
Then for object payloads every property is an additional property
and they are not allowed, so only empty dict is allowed
Properties must be set on all matching schemas
so when a property is assigned toa composed instance, it must be set on all
composed instances regardless of additionalProperties presence
keeping it to prevent breaking changes in v5.0.1
TODO remove cls._additional_properties_model_instances in 6.0.0
"""
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
"""
no need to set properties on self in here, they will be set in __init__
By here all composed schema oneOf/anyOf/allOf instances have their properties set using
model_args
"""
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
] | PypiClean |
/Flashcards%20Terminal%20App-0.0.1.tar.gz/Flashcards Terminal App-0.0.1/flashcardscode/accountlogin.py | import os
import config
import getpass
def check_account():
while True:
username = input('What is your username? ')
#Checks username.txt for user's inputted username
with open('username.txt', 'r') as searchUsername:
textFileSearch = searchUsername.readlines()
for row in textFileSearch:
findUserName = row.find(username)
if findUserName == 0:
print('\nWelcome back, {}.'.format(username))
global workingFolder
workingFolder = r'/Users/' + getpass.getuser() + r'/Documents/Flashcards Program/Users/' + username
config.workingFolder = workingFolder
#return instead of break - return ends function rather than just breaking loop
return
#this code will only run if the user inputted username was not found
print('\n{} is not a registered username.\n'.format(username))
loopBreak = input('Are you sure you have an account? If you do not have an account, type \'create account.\' ')
if loopBreak.lower() == 'create account':
create_account()
def create_account():
while True:
username = input('\nCreate a username for your account. If you already have an account, type \'have account\': ')
if username.lower() == 'have account':
check_account()
return
try:
if username == '\'\'' or username == '\"\"':
raise Exception('Cannot use {} as a username'.format(username))
newPath = os.path.join(r'/Users/' + getpass.getuser() + r'/Documents/Flashcards Program/Users/' + username)
os.makedirs(newPath)
with open('username.txt', 'a') as usernameHolder:
usernameHolder.write(username + '\n')
print('\nYour account has successfully been created. You are currently logged in as: {}\n'.format(username))
config.workingFolder = newPath
break
except Exception:
print('\nCannot use {} as a username. Please try a different one. '.format(username))
except:
print('This user has already been created. Please try another username.')
def main():
if config.error_control('yes','no', '\nDo you have an account? (Enter Yes or No) ') == True:
check_account()
else:
create_account()
#User account is confirmed by this point. 'Welcome back user' message pops up
if __name__ == "__main__":
main() | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/lib/js/button.js | +function ($) {
'use strict';
// BUTTON PUBLIC CLASS DEFINITION
// ==============================
var Button = function (element, options) {
this.$element = $(element)
this.options = $.extend({}, Button.DEFAULTS, options)
this.isLoading = false
}
Button.VERSION = '3.4.1'
Button.DEFAULTS = {
loadingText: 'loading...'
}
Button.prototype.setState = function (state) {
var d = 'disabled'
var $el = this.$element
var val = $el.is('input') ? 'val' : 'html'
var data = $el.data()
state += 'Text'
if (data.resetText == null) $el.data('resetText', $el[val]())
// push to event loop to allow forms to submit
setTimeout($.proxy(function () {
$el[val](data[state] == null ? this.options[state] : data[state])
if (state == 'loadingText') {
this.isLoading = true
$el.addClass(d).attr(d, d).prop(d, true)
} else if (this.isLoading) {
this.isLoading = false
$el.removeClass(d).removeAttr(d).prop(d, false)
}
}, this), 0)
}
Button.prototype.toggle = function () {
var changed = true
var $parent = this.$element.closest('[data-toggle="buttons"]')
if ($parent.length) {
var $input = this.$element.find('input')
if ($input.prop('type') == 'radio') {
if ($input.prop('checked')) changed = false
$parent.find('.active').removeClass('active')
this.$element.addClass('active')
} else if ($input.prop('type') == 'checkbox') {
if (($input.prop('checked')) !== this.$element.hasClass('active')) changed = false
this.$element.toggleClass('active')
}
$input.prop('checked', this.$element.hasClass('active'))
if (changed) $input.trigger('change')
} else {
this.$element.attr('aria-pressed', !this.$element.hasClass('active'))
this.$element.toggleClass('active')
}
}
// BUTTON PLUGIN DEFINITION
// ========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.button')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.button', (data = new Button(this, options)))
if (option == 'toggle') data.toggle()
else if (option) data.setState(option)
})
}
var old = $.fn.button
$.fn.button = Plugin
$.fn.button.Constructor = Button
// BUTTON NO CONFLICT
// ==================
$.fn.button.noConflict = function () {
$.fn.button = old
return this
}
// BUTTON DATA-API
// ===============
$(document)
.on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) {
var $btn = $(e.target).closest('.btn')
Plugin.call($btn, 'toggle')
if (!($(e.target).is('input[type="radio"], input[type="checkbox"]'))) {
// Prevent double click on radios, and the double selections (so cancellation) on checkboxes
e.preventDefault()
// The target component still receive the focus
if ($btn.is('input,button')) $btn.trigger('focus')
else $btn.find('input:visible,button:visible').first().trigger('focus')
}
})
.on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) {
$(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type))
})
}(jQuery); | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/xinha/plugins/TableOperations/TableOperations.js | Xinha.Config.prototype.TableOperations={showButtons:true};function TableOperations(e){this.editor=e;var a=e.config;var h=TableOperations.btnList;var b=this;a.removeToolbarElement(" inserttable toggleborders ");var f=["linebreak","inserttable","toggleborders"];for(var d=0;d<h.length;++d){var c=h[d];if(!c){if(a.TableOperations.showButtons){f.push("separator")}}else{var g="TO-"+c[0];a.registerButton(g,Xinha._lc(c[2],"TableOperations"),e.imgURL(c[0]+".gif","TableOperations"),false,function(i,j){b.buttonPress(i,j)},c[1]);if(a.TableOperations.showButtons){f.push(g)}}}a.toolbar.push(f);if(typeof PopupWin=="undefined"){Xinha._loadback(_editor_url+"modules/Dialogs/popupwin.js")}if(typeof Xinha.InlineStyler=="undefined"){Xinha._loadback(_editor_url+"modules/InlineStyler/InlineStyler.js")}}TableOperations._pluginInfo={name:"TableOperations",version:"1.0",developer:"Mihai Bazon",developer_url:"http://dynarch.com/mishoo/",c_owner:"Mihai Bazon",sponsor:"Zapatec Inc.",sponsor_url:"http://www.bloki.com",license:"htmlArea"};TableOperations.prototype._lc=function(a){return Xinha._lc(a,"TableOperations")};TableOperations.prototype.getClosest=function(c){var f=this.editor;var e=f.getAllAncestors();var a=null;c=(""+c).toLowerCase();for(var b=0;b<e.length;++b){var d=e[b];if(d.tagName.toLowerCase()==c){a=d;break}}return a};TableOperations.prototype.buttonPress=function(e,k){this.editor=e;var f=Xinha.is_gecko?"<br />":"";function g(I){var H=I.getElementsByTagName("td");for(var G=H.length;--G>=0;){var J=H[G];J.rowSpan=1;J.innerHTML=f}}function y(L){var K=parseInt(""+L.rowSpan);var J=parseInt(""+L.colSpan);L.rowSpan=1;a=L.parentNode;var H=a.rowIndex;var i=a.parentNode.rows;var G=L.cellIndex;while(--K>0){a=i[++H];var I=e._doc.createElement("td");I.colSpan=L.colSpan;I.innerHTML=f;a.insertBefore(I,a.cells[G])}e.forceRedraw();e.updateToolbar()}function s(I){var H=parseInt(""+I.colSpan);I.colSpan=1;a=I.parentNode;var G=I.nextSibling;while(--H>0){var i=e._doc.createElement("td");i.rowSpan=I.rowSpan;i.innerHTML=f;a.insertBefore(i,G)}e.forceRedraw();e.updateToolbar()}function F(I){var H=parseInt(""+I.colSpan);s(I);var i=I.parentNode.cells;var G=I.cellIndex;while(H-->0){y(i[G++])}}function z(i){var G=i.nextSibling;while(G&&G.nodeType!=1){G=G.nextSibling}if(!G){G=i.previousSibling;while(G&&G.nodeType!=1){G=G.previousSibling}}if(!G){G=i.parentNode}e.selectNodeContents(G)}function l(S,H,O,G,J){var V=[];var T=[];try{for(C=O;C<O+J;C++){var U=S.rows[C];for(K=H;K<H+G;K++){if(U.cells[K].colSpan>1||U.cells[K].rowSpan>1){F(U.cells[K])}T.push(U.cells[K])}if(T.length>0){V.push(T);T=[]}}}catch(N){alert("Invalid selection");return false}var Q=V[0][0].parentNode.rowIndex;var P=V[V.length-1][0].parentNode.rowIndex;var M=V[V.length-1][0].rowSpan;var L="";for(C=0;C<V.length;++C){var T=V[C];for(var K=0;K<T.length;++K){var R=T[K];L+=R.innerHTML;(C||K)&&(R.parentNode.removeChild(R))}}var I=V[0][0];I.innerHTML=L;I.rowSpan=P-Q+M;var i=0;for(K=0;K<V[0].length;K++){i+=V[0][K].colSpan}I.colSpan=i;e.selectNodeContents(I);e.forceRedraw();e.focusEditor()}switch(k){case"TO-row-insert-above":case"TO-row-insert-under":var a=this.getClosest("tr");if(!a){break}var q=a.cloneNode(true);g(q);a.parentNode.insertBefore(q,/under/.test(k)?a.nextSibling:a);e.forceRedraw();e.focusEditor();break;case"TO-row-delete":var a=this.getClosest("tr");if(!a){break}var u=a.parentNode;if(u.rows.length==1){alert(Xinha._lc("Xinha cowardly refuses to delete the last row in table.","TableOperations"));break}z(a);u.removeChild(a);e.forceRedraw();e.focusEditor();e.updateToolbar();break;case"TO-row-split":var m=this.getClosest("td");if(!m){break}y(m);break;case"TO-col-insert-before":case"TO-col-insert-after":var m=this.getClosest("td");if(!m){break}var p=m.parentNode.parentNode.rows;var j=m.cellIndex;var n=(m.parentNode.cells.length==j+1);for(var C=p.length;--C>=0;){var a=p[C];var B=e._doc.createElement("td");B.innerHTML=f;if(n&&Xinha.is_ie){a.insertBefore(B)}else{var h=a.cells[j+(/after/.test(k)?1:0)];a.insertBefore(B,h)}}e.focusEditor();break;case"TO-col-split":var m=this.getClosest("td");if(!m){break}s(m);break;case"TO-col-delete":var m=this.getClosest("td");if(!m){break}var j=m.cellIndex;if(m.parentNode.cells.length==1){alert(Xinha._lc("Xinha cowardly refuses to delete the last column in table.","TableOperations"));break}z(m);var p=m.parentNode.parentNode.rows;for(var C=p.length;--C>=0;){var a=p[C];a.removeChild(a.cells[j])}e.forceRedraw();e.focusEditor();e.updateToolbar();break;case"TO-cell-split":var m=this.getClosest("td");if(!m){break}F(m);break;case"TO-cell-insert-before":case"TO-cell-insert-after":var m=this.getClosest("td");if(!m){break}var a=m.parentNode;var B=e._doc.createElement("td");B.innerHTML=f;a.insertBefore(B,/after/.test(k)?m.nextSibling:m);e.forceRedraw();e.focusEditor();break;case"TO-cell-delete":var m=this.getClosest("td");if(!m){break}if(m.parentNode.cells.length==1){alert(Xinha._lc("Xinha cowardly refuses to delete the last cell in row.","TableOperations"));break}z(m);m.parentNode.removeChild(m);e.forceRedraw();e.updateToolbar();break;case"TO-cell-merge":var A=e._getSelection();if(!Xinha.is_ie&&A.rangeCount>1){var t=A.getRangeAt(0);var m=t.startContainer.childNodes[t.startOffset];var a=m.parentNode;var o=m.cellIndex;var r=a.rowIndex;var x=0;var w=r;var c=0;var v=0;var d,E;for(C=0;C<A.rangeCount;C++){t=A.getRangeAt(C);d=t.startContainer.childNodes[t.startOffset];E=d.parentNode;if(E.rowIndex!=w){w=E.rowIndex;v=0}v+=d.colSpan;if(v>c){c=v}if(E.rowIndex+d.rowSpan-1>x){x=E.rowIndex+d.rowSpan-1}}var b=x-r+1;var D=a.parentNode;l(D,o,r,c,b)}else{var m=this.getClosest("td");if(!m){alert(Xinha._lc("Please click into some cell","TableOperations"));break}var a=m.parentNode;var o=m.cellIndex;var r=a.rowIndex;this.dialogMerge(l,o,r)}break;case"TO-table-prop":this.dialogTableProperties();break;case"TO-row-prop":this.dialogRowCellProperties(false);break;case"TO-cell-prop":this.dialogRowCellProperties(true);break;default:alert("Button ["+k+"] not yet implemented")}};TableOperations.btnList=[["table-prop","table","Table properties"],null,["row-prop","tr","Row properties"],["row-insert-above","tr","Insert row before"],["row-insert-under","tr","Insert row after"],["row-delete","tr","Delete row"],["row-split","td[rowSpan!=1]","Split row"],null,["col-insert-before","td","Insert column before"],["col-insert-after","td","Insert column after"],["col-delete","td","Delete column"],["col-split","td[colSpan!=1]","Split column"],null,["cell-prop","td","Cell properties"],["cell-insert-before","td","Insert cell before"],["cell-insert-after","td","Insert cell after"],["cell-delete","td","Delete cell"],["cell-merge","tr","Merge cells"],["cell-split","td[colSpan!=1,rowSpan!=1]","Split cell"]];TableOperations.prototype.dialogMerge=function(g,h,e){var f=this.getClosest("table");var b=this;var d=this.editor;if(!this.dialogMergeCellsHtml){Xinha._getback(Xinha.getPluginDir("TableOperations")+"/popups/dialogMergeCells.html",function(i){b.dialogMergeCellsHtml=i;b.dialogMerge(g,h,e)});return}if(!this.dialogMergeCells){this.dialogMergeCells=new Xinha.Dialog(d,this.dialogMergeCellsHtml,"TableOperations",{width:400});this.dialogMergeCells.getElementById("cancel").onclick=function(){b.dialogMergeCells.hide()}}var c=this.dialogMergeCells;function a(){c.hide();no_cols=parseInt(c.getElementById("f_cols").value,10)+1;no_rows=parseInt(c.getElementById("f_rows").value,10)+1;g(f,h,e,no_cols,no_rows);return}this.dialogMergeCells.getElementById("ok").onclick=a;this.dialogMergeCells.show();this.dialogMergeCells.getElementById("f_cols").focus()};TableOperations.prototype.dialogTableProperties=function(){var k=this.getClosest("table");var l=this;var d=this.editor;if(!this.dialogTablePropertiesHtml){Xinha._getback(Xinha.getPluginDir("TableOperations")+"/popups/dialogTable.html",function(m){l.dialogTablePropertiesHtml=m;l.dialogTableProperties()});return}if(!this.dialogTable){this.dialogTable=new Xinha.Dialog(d,this.dialogTablePropertiesHtml,"TableOperations",{width:440});this.dialogTable.getElementById("cancel").onclick=function(){l.dialogTable.hide()}}var g=this.dialogTable;var c=new Xinha.InlineStyler(k,this.editor,g);function i(){var q=g.hide();c.applyStyle(q);for(var n in q){if(typeof q[n]=="function"){continue}var p=q[n];if(typeof p=="object"&&p!=null&&p.tagName){p=p.value}switch(n){case"caption":if(/\S/.test(p)){var m=k.getElementsByTagName("caption")[0];if(!m){m=g.editor._doc.createElement("caption");k.insertBefore(m,k.firstChild)}m.innerHTML=p}else{var m=k.getElementsByTagName("caption")[0];if(m){m.parentNode.removeChild(m)}}break;case"summary":k.summary=p;break;case"width":k.style.width=(""+p)+q.f_unit;break;case"align":k.align=p;break;case"spacing":k.cellSpacing=p;break;case"padding":k.cellPadding=p;break;case"borders":k.border=p;break;case"frames":k.frame=p;break;case"rules":k.rules=p;break}}l.editor.forceRedraw();l.editor.focusEditor();l.editor.updateToolbar();var o=k.style.borderCollapse;k.style.borderCollapse="collapse";k.style.borderCollapse="separate";k.style.borderCollapse=o}var a=c.createStyleLayoutFieldset();var b=g.getElementById("TO_layout");b.replaceChild(a,b.firstChild);var j=c.createStyleFieldset();b=g.getElementById("TO_style");b.replaceChild(j,b.firstChild);this.dialogTable.getElementById("ok").onclick=i;var h={};var e=k.getElementsByTagName("caption")[0];if(e){h.caption=e.innerHTML}else{h.caption=""}h.summary=k.summary;h.spacing=k.cellSpacing;h.padding=k.cellPadding;var f=k.border;h.frames=k.frame;h.rules=k.rules;this.dialogTable.show(h)};TableOperations.prototype.dialogRowCellProperties=function(h){var d=this.getClosest(h?"td":"tr");var j=this.getClosest("table");var k=this;var e=this.editor;if(!k.dialogRowCellPropertiesHtml){Xinha._getback(Xinha.getPluginDir("TableOperations")+"/popups/dialogRowCell.html",function(l){k.dialogRowCellPropertiesHtml=l;k.dialogRowCellProperties(h)});return}if(!this.dialogRowCell){this.dialogRowCell=new Xinha.Dialog(e,k.dialogRowCellPropertiesHtml,"TableOperations",{width:440});this.dialogRowCell.getElementById("cancel").onclick=function(){k.dialogRowCell.hide()}}var f=this.dialogRowCell;f.getElementById("title").innerHTML=h?Xinha._lc("Cell Properties","TableOperations"):Xinha._lc("Row Properties","TableOperations");var c=new Xinha.InlineStyler(d,k.editor,f);function g(){var m=f.hide();c.applyStyle(m);k.editor.forceRedraw();k.editor.focusEditor();k.editor.updateToolbar();var l=j.style.borderCollapse;j.style.borderCollapse="collapse";j.style.borderCollapse="separate";j.style.borderCollapse=l}var a=c.createStyleLayoutFieldset();var b=f.getElementById("TO_layout");b.replaceChild(a,b.firstChild);var i=c.createStyleFieldset();b=f.getElementById("TO_style");b.replaceChild(i,b.firstChild);this.dialogRowCell.getElementById("ok").onclick=g;this.dialogRowCell.show()}; | PypiClean |
/Mesa-2.1.1-py3-none-any.whl/mesa/visualization/modules/HexGridVisualization.py | from collections import defaultdict
from mesa.visualization.ModularVisualization import VisualizationElement
class CanvasHexGrid(VisualizationElement):
"""A CanvasHexGrid object functions similarly to a CanvasGrid object. It takes a portrayal dictionary and talks to HexDraw.js to draw that shape.
A portrayal as a dictionary with the following structure:
"x", "y": Coordinates for the cell in which the object is placed.
"Shape": Can be either "hex" or "circle"
"r": The radius, defined as a fraction of cell size. r=1 will
fill the entire cell.
"Color": The color to draw the shape in; needs to be a valid HTML
color, e.g."Red" or "#AA08F8"
"Filled": either "true" or "false", and determines whether the shape is
filled or not.
"Layer": Layer number of 0 or above; higher-numbered layers are drawn
above lower-numbered layers.
"text": The text to be inscribed inside the Shape. Normally useful for
showing the unique_id of the agent.
"text_color": The color to draw the inscribed text. Should be given in
conjunction of "text" property.
Attributes:
portrayal_method: Function which generates portrayals from objects, as
described above.
grid_height, grid_width: Size of the grid to visualize, in cells.
canvas_height, canvas_width: Size, in pixels, of the grid visualization
to draw on the client.
template: "canvas_module.html" stores the module's HTML template.
"""
package_includes = ["HexDraw.js", "CanvasHexModule.js", "InteractionHandler.js"]
portrayal_method = None # Portrayal function
canvas_width = 500
canvas_height = 500
def __init__(
self,
portrayal_method,
grid_width,
grid_height,
canvas_width=500,
canvas_height=500,
):
"""Instantiate a new CanvasGrid.
Args:
portrayal_method: function to convert each object on the grid to
a portrayal, as described above.
grid_width, grid_height: Size of the grid, in cells.
canvas_height, canvas_width: Size of the canvas to draw in the
client, in pixels. (default: 500x500)
"""
self.portrayal_method = portrayal_method
self.grid_width = grid_width
self.grid_height = grid_height
self.canvas_width = canvas_width
self.canvas_height = canvas_height
new_element = "new CanvasHexModule({}, {}, {}, {})".format(
self.canvas_width, self.canvas_height, self.grid_width, self.grid_height
)
self.js_code = "elements.push(" + new_element + ");"
def render(self, model):
grid_state = defaultdict(list)
for x in range(model.grid.width):
for y in range(model.grid.height):
cell_objects = model.grid.get_cell_list_contents([(x, y)])
for obj in cell_objects:
portrayal = self.portrayal_method(obj)
if portrayal:
portrayal["x"] = x
portrayal["y"] = y
grid_state[portrayal["Layer"]].append(portrayal)
return grid_state | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/integrations/handlers/frappe_handler/frappe_tables.py | import pandas as pd
from typing import List
from mindsdb.integrations.libs.api_handler import APITable
from mindsdb.integrations.utilities.sql_utils import extract_comparison_conditions
from mindsdb_sql.parser import ast
class FrappeDocumentsTable(APITable):
def select(self, query: ast.Select) -> pd.DataFrame:
"""Selects data from the Frappe API and returns it as a pandas DataFrame.
Returns dataframe representing the Frappe API results.
Args:
query (ast.Select): Given SQL SELECT query
"""
conditions = extract_comparison_conditions(query.where)
params = {}
filters = []
for op, arg1, arg2 in conditions:
if arg1 == 'doctype':
if op != '=':
raise NotImplementedError
params['doctype'] = arg2
elif arg1 == 'name':
params['name'] = arg2
else:
filters.append([arg1, op, arg2])
if not 'doctype' in params:
raise ValueError('"doctype" parameter required')
if query.limit:
params['limit'] = query.limit.value
if filters:
params['filters'] = filters
if 'name' in params:
document_data = self.handler.call_frappe_api(
method_name='get_document',
params=params
)
else:
document_data = self.handler.call_frappe_api(
method_name='get_documents',
params=params
)
# Only return the columns we need to.
columns = []
for target in query.targets:
if isinstance(target, ast.Star):
columns = document_data.columns
break
elif isinstance(target, ast.Identifier):
columns.append(target.parts[-1])
else:
raise NotImplementedError
if len(document_data) == 0:
return pd.DataFrame([], columns=columns)
# Remove columns not part of select.
for col in set(document_data.columns).difference(set(columns)):
document_data = document_data.drop(col, axis=1)
return document_data
def insert(self, query: ast.Insert) -> pd.DataFrame:
columns = [col.name for col in query.columns]
for row in query.values:
params = dict(zip(columns, row))
self.handler.call_frappe_api('create_document', params)
def get_columns(self) -> List:
"""Gets all columns to be returned in pandas DataFrame responses"""
return ['doctype', 'data'] | PypiClean |
/NMR_peaks_picking-0.1-py3-none-any.whl/nmrglue/analysis/leastsqbound.py |
import warnings
from numpy import array, take, eye, triu, transpose, dot
from numpy import empty_like, sqrt, cos, sin, arcsin
from scipy.optimize.minpack import _check_func
from scipy.optimize import _minpack, leastsq
def _internal2external_grad(xi, bounds):
"""
Calculate the internal (unconstrained) to external (constained)
parameter gradiants.
"""
grad = empty_like(xi)
for i, (v, bound) in enumerate(zip(xi, bounds)):
lower, upper = bound
if lower is None and upper is None: # No constraints
grad[i] = 1.0
elif upper is None: # only lower bound
grad[i] = v / sqrt(v * v + 1.)
elif lower is None: # only upper bound
grad[i] = -v / sqrt(v * v + 1.)
else: # lower and upper bounds
grad[i] = (upper - lower) * cos(v) / 2.
return grad
def _internal2external_func(bounds):
"""
Make a function which converts between internal (unconstrained) and
external (constrained) parameters.
"""
ls = [_internal2external_lambda(b) for b in bounds]
def convert_i2e(xi):
xe = empty_like(xi)
xe[:] = [l(p) for l, p in zip(ls, xi)]
return xe
return convert_i2e
def _internal2external_lambda(bound):
"""
Make a lambda function which converts a single internal (uncontrained)
parameter to a external (constrained) parameter.
"""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
elif upper is None: # only lower bound
return lambda x: lower - 1. + sqrt(x * x + 1.)
elif lower is None: # only upper bound
return lambda x: upper + 1. - sqrt(x * x + 1.)
else:
return lambda x: lower + ((upper - lower) / 2.) * (sin(x) + 1.)
def _external2internal_func(bounds):
"""
Make a function which converts between external (constrained) and
internal (unconstrained) parameters.
"""
ls = [_external2internal_lambda(b) for b in bounds]
def convert_e2i(xe):
xi = empty_like(xe)
xi[:] = [l(p) for l, p in zip(ls, xe)]
return xi
return convert_e2i
def _external2internal_lambda(bound):
"""
Make a lambda function which converts an single external (constrained)
parameter to a internal (unconstrained) parameter.
"""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
elif upper is None: # only lower bound
return lambda x: sqrt((x - lower + 1.) ** 2 - 1)
elif lower is None: # only upper bound
return lambda x: sqrt((upper - x + 1.) ** 2 - 1)
else:
return lambda x: arcsin((2. * (x - lower) / (upper - lower)) - 1.)
def leastsqbound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
"""
Bounded minimization of the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers.
x0 : ndarray
The starting estimate for the minimization.
args : tuple
Any extra arguments to func are placed in this tuple.
bounds : list
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
Dfun : callable
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool
non-zero to return all optional outputs.
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int
The maximum number of calls to the function. If zero, then 100*(N+1) is
the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of the
Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
it is assumed that the relative errors in the functions are of the
order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. ``None`` if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s::
- 'nfev' : the number of function calls
- 'fvec' : the function evaluated at the output
- 'fjac' : A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
- 'ipvt' : an integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
- 'qtf' : the vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
Contraints on the parameters are enforced using an internal parameter list
with appropiate transformations such that these internal parameters can be
optimized without constraints. The transfomation between a given internal
parameter, p_i, and a external parameter, p_e, are as follows:
With ``min`` and ``max`` bounds defined ::
p_i = arcsin((2 * (p_e - min) / (max - min)) - 1.)
p_e = min + ((max - min) / 2.) * (sin(p_i) + 1.)
With only ``max`` defined ::
p_i = sqrt((max - p_e + 1.)**2 - 1.)
p_e = max + 1. - sqrt(p_i**2 + 1.)
With only ``min`` defined ::
p_i = sqrt((p_e - min + 1.)**2 - 1.)
p_e = min - 1. + sqrt(p_i**2 + 1.)
These transfomations are used in the MINUIT package, and described in
detail in the section 1.3.1 of the MINUIT User's Guide.
To Do
-----
Currently the ``factor`` and ``diag`` parameters scale the
internal parameter list, but should scale the external parameter list.
The `qtf` vector in the infodic dictionary reflects internal parameter
list, it should be correct to reflect the external parameter list.
References
----------
* F. James and M. Winkler. MINUIT User's Guide, July 16, 2004.
"""
# use leastsq if no bounds are present
if bounds is None:
return leastsq(func, x0, args, Dfun, full_output, col_deriv,
ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
# create function which convert between internal and external parameters
i2e = _internal2external_func(bounds)
e2i = _external2internal_func(bounds)
x0 = array(x0, ndmin=1)
i0 = e2i(x0)
n = len(x0)
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if not isinstance(args, tuple):
args = (args,)
m = _check_func('leastsq', 'func', func, x0, args, n)[0][0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
# define a wrapped func which accept internal parameters, converts them
# to external parameters and calls func
def wfunc(x, *args):
return func(i2e(x), *args)
if Dfun is None:
if (maxfev == 0):
maxfev = 200 * (n + 1)
retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
def wDfun(x, *args): # wrapped Dfun
return Dfun(i2e(x), *args)
retval = _minpack._lmder(
func, wDfun, i0, args, full_output, col_deriv, ftol, xtol, gtol,
maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction in the sum "
"of squares\n is possible.""" % ftol, ValueError],
7: ["xtol=%f is too small, no further improvement in the "
"approximate\n solution is possible." % xtol, ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1, 2, 3, 4] and not full_output):
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
x = i2e(retval[0]) # internal params to external params
if full_output:
# convert fjac from internal params to external
grad = _internal2external_grad(retval[0], bounds)
retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad,
retval[1]['ipvt'] - 1)).T
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (x, cov_x) + retval[1:-1] + (mesg, info)
else:
return (x, info) | PypiClean |
/FiberPhotometryDataAnalysis-0.0.9.tar.gz/FiberPhotometryDataAnalysis-0.0.9/README.rst | ========
Overview
========
A package for analysis of multi-fiber photmetry data and behaviour.
* Free software: MIT license
Installation
============
::
pip install FiberPhotometryDataAnalysis
You can also install the in-development version with::
pip install git+ssh://git@https://github.com/katemartian/FiberPhotometryDataAnalysis.git@master
Documentation
=============
https://FiberPhotometryDataAnalysis.readthedocs.io/
Development
===========
To run all the tests run::
tox
Note, to combine the coverage data from all the tox environments run:
.. list-table::
:widths: 10 90
:stub-columns: 1
- - Windows
- ::
set PYTEST_ADDOPTS=--cov-append
tox
- - Other
- ::
PYTEST_ADDOPTS=--cov-append tox
| PypiClean |
/Kr0nOs_Bot-3.3.11-py3-none-any.whl/redbot/core/utils/errors.py | from discord.errors import DiscordException
__all__ = (
"CommandError",
"MissingRequiredArgument",
"BadArgument",
"PrivateMessageOnly",
"NoPrivateMessage",
"CheckFailure",
"CheckAnyFailure",
"CommandNotFound",
"DisabledCommand",
"CommandInvokeError",
"TooManyArguments",
"UserInputError",
"CommandOnCooldown",
"MaxConcurrencyReached",
"NotOwner",
"MissingRole",
"BotMissingRole",
"MissingAnyRole",
"BotMissingAnyRole",
"MissingPermissions",
"BotMissingPermissions",
"NSFWChannelRequired",
"ConversionError",
"BadUnionArgument",
"ArgumentParsingError",
"UnexpectedQuoteError",
"InvalidEndOfQuotedStringError",
"ExpectedClosingQuoteError",
"ExtensionError",
"ExtensionAlreadyLoaded",
"ExtensionNotLoaded",
"NoEntryPointError",
"ExtensionFailed",
"ExtensionNotFound",
)
class CommandError(DiscordException):
r"""The base exception type for all command related errors.
This inherits from :exc:`discord.DiscordException`.
This exception and exceptions inherited from it are handled
in a special way as they are caught and passed into a special event
from :class:`.Bot`\, :func:`on_command_error`.
"""
def __init__(self, message=None, *args):
if message is not None:
# clean-up @everyone and @here mentions
m = message.replace("@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
super().__init__(m, *args)
else:
super().__init__(*args)
class ConversionError(CommandError):
"""Exception raised when a Converter class raises non-CommandError.
This inherits from :exc:`CommandError`.
Attributes
----------
converter: :class:`discord.ext.commands.Converter`
The converter that failed.
original
The original exception that was raised. You can also get this via
the ``__cause__`` attribute.
"""
def __init__(self, converter, original):
self.converter = converter
self.original = original
class UserInputError(CommandError):
"""The base exception type for errors that involve errors
regarding user input.
This inherits from :exc:`CommandError`.
"""
pass
class CommandNotFound(CommandError):
"""Exception raised when a command is attempted to be invoked
but no command under that name is found.
This is not raised for invalid subcommands, rather just the
initial main command that is attempted to be invoked.
This inherits from :exc:`CommandError`.
"""
pass
class MissingRequiredArgument(UserInputError):
"""Exception raised when parsing a command and a parameter
that is required is not encountered.
This inherits from :exc:`UserInputError`
Attributes
-----------
param: :class:`inspect.Parameter`
The argument that is missing.
"""
def __init__(self, param):
self.param = param
super().__init__("{0.name} is a required argument that is missing.".format(param))
class TooManyArguments(UserInputError):
"""Exception raised when the command was passed too many arguments and its
:attr:`.Command.ignore_extra` attribute was not set to ``True``.
This inherits from :exc:`UserInputError`
"""
pass
class BadArgument(UserInputError):
"""Exception raised when a parsing or conversion failure is encountered
on an argument to pass into a command.
This inherits from :exc:`UserInputError`
"""
pass
class CheckFailure(CommandError):
"""Exception raised when the predicates in :attr:`.Command.checks` have failed.
This inherits from :exc:`CommandError`
"""
pass
class CheckAnyFailure(CheckFailure):
"""Exception raised when all predicates in :func:`check_any` fail.
This inherits from :exc:`CheckFailure`.
.. versionadded:: 1.3
Attributes
------------
errors: List[:class:`CheckFailure`]
A list of errors that were caught during execution.
checks: List[Callable[[:class:`Context`], :class:`bool`]]
A list of check predicates that failed.
"""
def __init__(self, checks, errors):
self.checks = checks
self.errors = errors
super().__init__("You do not have permission to run this command.")
class PrivateMessageOnly(CheckFailure):
"""Exception raised when an operation does not work outside of private
message contexts.
This inherits from :exc:`CheckFailure`
"""
def __init__(self, message=None):
super().__init__(message or "This command can only be used in private messages.")
class NoPrivateMessage(CheckFailure):
"""Exception raised when an operation does not work in private message
contexts.
This inherits from :exc:`CheckFailure`
"""
def __init__(self, message=None):
super().__init__(message or "This command cannot be used in private messages.")
class NotOwner(CheckFailure):
"""Exception raised when the message author is not the owner of the bot.
This inherits from :exc:`CheckFailure`
"""
pass
class DisabledCommand(CommandError):
"""Exception raised when the command being invoked is disabled.
This inherits from :exc:`CommandError`
"""
pass
class CommandInvokeError(CommandError):
"""Exception raised when the command being invoked raised an exception.
This inherits from :exc:`CommandError`
Attributes
-----------
original
The original exception that was raised. You can also get this via
the ``__cause__`` attribute.
"""
def __init__(self, e):
self.original = e
super().__init__("Command raised an exception: {0.__class__.__name__}: {0}".format(e))
class CommandOnCooldown(CommandError):
"""Exception raised when the command being invoked is on cooldown.
This inherits from :exc:`CommandError`
Attributes
-----------
cooldown: Cooldown
A class with attributes ``rate``, ``per``, and ``type`` similar to
the :func:`.cooldown` decorator.
retry_after: :class:`float`
The amount of seconds to wait before you can retry again.
"""
def __init__(self, cooldown, retry_after):
self.cooldown = cooldown
self.retry_after = retry_after
super().__init__("You are on cooldown. Try again in {:.2f}s".format(retry_after))
class MaxConcurrencyReached(CommandError):
"""Exception raised when the command being invoked has reached its maximum concurrency.
This inherits from :exc:`CommandError`.
Attributes
------------
number: :class:`int`
The maximum number of concurrent invokers allowed.
per: :class:`BucketType`
The bucket type passed to the :func:`.max_concurrency` decorator.
"""
def __init__(self, number, per):
self.number = number
self.per = per
name = per.name
suffix = "per %s" % name if per.name != "default" else "globally"
plural = "%s times %s" if number > 1 else "%s time %s"
fmt = plural % (number, suffix)
super().__init__(
"Too many people using this command. It can only be used {} concurrently.".format(fmt)
)
class MissingRole(CheckFailure):
"""Exception raised when the command invoker lacks a role to run a command.
This inherits from :exc:`CheckFailure`
.. versionadded:: 1.1
Attributes
-----------
missing_role: Union[:class:`str`, :class:`int`]
The required role that is missing.
This is the parameter passed to :func:`~.commands.has_role`.
"""
def __init__(self, missing_role):
self.missing_role = missing_role
message = "Role {0!r} is required to run this command.".format(missing_role)
super().__init__(message)
class BotMissingRole(CheckFailure):
"""Exception raised when the bot's member lacks a role to run a command.
This inherits from :exc:`CheckFailure`
.. versionadded:: 1.1
Attributes
-----------
missing_role: Union[:class:`str`, :class:`int`]
The required role that is missing.
This is the parameter passed to :func:`~.commands.has_role`.
"""
def __init__(self, missing_role):
self.missing_role = missing_role
message = "Bot requires the role {0!r} to run this command".format(missing_role)
super().__init__(message)
class MissingAnyRole(CheckFailure):
"""Exception raised when the command invoker lacks any of
the roles specified to run a command.
This inherits from :exc:`CheckFailure`
.. versionadded:: 1.1
Attributes
-----------
missing_roles: List[Union[:class:`str`, :class:`int`]]
The roles that the invoker is missing.
These are the parameters passed to :func:`~.commands.has_any_role`.
"""
def __init__(self, missing_roles):
self.missing_roles = missing_roles
missing = ["'{}'".format(role) for role in missing_roles]
if len(missing) > 2:
fmt = "{}, or {}".format(", ".join(missing[:-1]), missing[-1])
else:
fmt = " or ".join(missing)
message = "You are missing at least one of the required roles: {}".format(fmt)
super().__init__(message)
class BotMissingAnyRole(CheckFailure):
"""Exception raised when the bot's member lacks any of
the roles specified to run a command.
This inherits from :exc:`CheckFailure`
.. versionadded:: 1.1
Attributes
-----------
missing_roles: List[Union[:class:`str`, :class:`int`]]
The roles that the bot's member is missing.
These are the parameters passed to :func:`~.commands.has_any_role`.
"""
def __init__(self, missing_roles):
self.missing_roles = missing_roles
missing = ["'{}'".format(role) for role in missing_roles]
if len(missing) > 2:
fmt = "{}, or {}".format(", ".join(missing[:-1]), missing[-1])
else:
fmt = " or ".join(missing)
message = "Bot is missing at least one of the required roles: {}".format(fmt)
super().__init__(message)
class NSFWChannelRequired(CheckFailure):
"""Exception raised when a channel does not have the required NSFW setting.
This inherits from :exc:`CheckFailure`.
.. versionadded:: 1.1
Parameters
-----------
channel: :class:`discord.abc.GuildChannel`
The channel that does not have NSFW enabled.
"""
def __init__(self, channel):
self.channel = channel
super().__init__("Channel '{}' needs to be NSFW for this command to work.".format(channel))
class MissingPermissions(CheckFailure):
"""Exception raised when the command invoker lacks permissions to run a
command.
This inherits from :exc:`CheckFailure`
Attributes
-----------
missing_perms: :class:`list`
The required permissions that are missing.
"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [
perm.replace("_", " ").replace("guild", "server").title() for perm in missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format(", ".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
message = "You are missing {} permission(s) to run this command.".format(fmt)
super().__init__(message, *args)
class BotMissingPermissions(CheckFailure):
"""Exception raised when the bot's member lacks permissions to run a
command.
This inherits from :exc:`CheckFailure`
Attributes
-----------
missing_perms: :class:`list`
The required permissions that are missing.
"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [
perm.replace("_", " ").replace("guild", "server").title() for perm in missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format(", ".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
message = "Bot requires {} permission(s) to run this command.".format(fmt)
super().__init__(message, *args)
class BadUnionArgument(UserInputError):
"""Exception raised when a :data:`typing.Union` converter fails for all
its associated types.
This inherits from :exc:`UserInputError`
Attributes
-----------
param: :class:`inspect.Parameter`
The parameter that failed being converted.
converters: Tuple[Type, ...]
A tuple of converters attempted in conversion, in order of failure.
errors: List[:class:`CommandError`]
A list of errors that were caught from failing the conversion.
"""
def __init__(self, param, converters, errors):
self.param = param
self.converters = converters
self.errors = errors
def _get_name(x):
try:
return x.__name__
except AttributeError:
return x.__class__.__name__
to_string = [_get_name(x) for x in converters]
if len(to_string) > 2:
fmt = "{}, or {}".format(", ".join(to_string[:-1]), to_string[-1])
else:
fmt = " or ".join(to_string)
super().__init__('Could not convert "{0.name}" into {1}.'.format(param, fmt))
class ArgumentParsingError(UserInputError):
"""An exception raised when the parser fails to parse a user's input.
This inherits from :exc:`UserInputError`.
There are child classes that implement more granular parsing errors for
i18n purposes.
"""
pass
class UnexpectedQuoteError(ArgumentParsingError):
"""An exception raised when the parser encounters a quote mark inside a non-quoted string.
This inherits from :exc:`ArgumentParsingError`.
Attributes
------------
quote: :class:`str`
The quote mark that was found inside the non-quoted string.
"""
def __init__(self, quote):
self.quote = quote
super().__init__("Unexpected quote mark, {0!r}, in non-quoted string".format(quote))
class InvalidEndOfQuotedStringError(ArgumentParsingError):
"""An exception raised when a space is expected after the closing quote in a string
but a different character is found.
This inherits from :exc:`ArgumentParsingError`.
Attributes
-----------
char: :class:`str`
The character found instead of the expected string.
"""
def __init__(self, char):
self.char = char
super().__init__("Expected space after closing quotation but received {0!r}".format(char))
class ExpectedClosingQuoteError(ArgumentParsingError):
"""An exception raised when a quote character is expected but not found.
This inherits from :exc:`ArgumentParsingError`.
Attributes
-----------
close_quote: :class:`str`
The quote character expected.
"""
def __init__(self, close_quote):
self.close_quote = close_quote
super().__init__("Expected closing {}.".format(close_quote))
class ExtensionError(DiscordException):
"""Base exception for extension related errors.
This inherits from :exc:`~discord.DiscordException`.
Attributes
------------
name: :class:`str`
The extension that had an error.
"""
def __init__(self, message=None, *args, name):
self.name = name
message = message or "Extension {!r} had an error.".format(name)
# clean-up @everyone and @here mentions
m = message.replace("@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
super().__init__(m, *args)
class ExtensionAlreadyLoaded(ExtensionError):
"""An exception raised when an extension has already been loaded.
This inherits from :exc:`ExtensionError`
"""
def __init__(self, name):
super().__init__("Extension {!r} is already loaded.".format(name), name=name)
class ExtensionNotLoaded(ExtensionError):
"""An exception raised when an extension was not loaded.
This inherits from :exc:`ExtensionError`
"""
def __init__(self, name):
super().__init__("Extension {!r} has not been loaded.".format(name), name=name)
class NoEntryPointError(ExtensionError):
"""An exception raised when an extension does not have a ``setup`` entry point function.
This inherits from :exc:`ExtensionError`
"""
def __init__(self, name):
super().__init__("Extension {!r} has no 'setup' function.".format(name), name=name)
class ExtensionFailed(ExtensionError):
"""An exception raised when an extension failed to load during execution of the module or ``setup`` entry point.
This inherits from :exc:`ExtensionError`
Attributes
-----------
name: :class:`str`
The extension that had the error.
original: :exc:`Exception`
The original exception that was raised. You can also get this via
the ``__cause__`` attribute.
"""
def __init__(self, name, original):
self.original = original
fmt = "Extension {0!r} raised an error: {1.__class__.__name__}: {1}"
super().__init__(fmt.format(name, original), name=name)
class ExtensionNotFound(ExtensionError):
"""An exception raised when an extension is not found.
This inherits from :exc:`ExtensionError`
.. versionchanged:: 1.3
Made the ``original`` attribute always None.
Attributes
-----------
name: :class:`str`
The extension that had the error.
original: :class:`NoneType`
Always ``None`` for backwards compatibility.
"""
def __init__(self, name, original=None):
self.original = None
fmt = "Extension {0!r} could not be loaded."
super().__init__(fmt.format(name), name=name) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile/app/List.js | define(["dijit","dojo","dojox","dojo/require!dojo/string,dijit/_WidgetBase"],function(_1,_2,_3){
_2.provide("dojox.mobile.app.List");
_2.experimental("dojox.mobile.app.List");
_2.require("dojo.string");
_2.require("dijit._WidgetBase");
(function(){
var _4={};
_2.declare("dojox.mobile.app.List",_1._WidgetBase,{items:null,itemTemplate:"",emptyTemplate:"",dividerTemplate:"",dividerFunction:null,labelDelete:"Delete",labelCancel:"Cancel",controller:null,autoDelete:true,enableDelete:true,enableHold:true,formatters:null,_templateLoadCount:0,_mouseDownPos:null,baseClass:"list",constructor:function(){
this._checkLoadComplete=_2.hitch(this,this._checkLoadComplete);
this._replaceToken=_2.hitch(this,this._replaceToken);
this._postDeleteAnim=_2.hitch(this,this._postDeleteAnim);
},postCreate:function(){
var _5=this;
if(this.emptyTemplate){
this._templateLoadCount++;
}
if(this.itemTemplate){
this._templateLoadCount++;
}
if(this.dividerTemplate){
this._templateLoadCount++;
}
this.connect(this.domNode,"onmousedown",function(_6){
var _7=_6;
if(_6.targetTouches&&_6.targetTouches.length>0){
_7=_6.targetTouches[0];
}
var _8=_5._getRowNode(_6.target);
if(_8){
_5._setDataInfo(_8,_6);
_5._selectRow(_8);
_5._mouseDownPos={x:_7.pageX,y:_7.pageY};
_5._dragThreshold=null;
}
});
this.connect(this.domNode,"onmouseup",function(_9){
if(_9.targetTouches&&_9.targetTouches.length>0){
_9=_9.targetTouches[0];
}
var _a=_5._getRowNode(_9.target);
if(_a){
_5._setDataInfo(_a,_9);
if(_5._selectedRow){
_5.onSelect(_a._data,_a._idx,_a);
}
this._deselectRow();
}
});
if(this.enableDelete){
this.connect(this.domNode,"mousemove",function(_b){
_2.stopEvent(_b);
if(!_5._selectedRow){
return;
}
var _c=_5._getRowNode(_b.target);
if(_5.enableDelete&&_c&&!_5._deleting){
_5.handleDrag(_b);
}
});
}
this.connect(this.domNode,"onclick",function(_d){
if(_d.touches&&_d.touches.length>0){
_d=_d.touches[0];
}
var _e=_5._getRowNode(_d.target,true);
if(_e){
_5._setDataInfo(_e,_d);
}
});
this.connect(this.domNode,"mouseout",function(_f){
if(_f.touches&&_f.touches.length>0){
_f=_f.touches[0];
}
if(_f.target==_5._selectedRow){
_5._deselectRow();
}
});
if(!this.itemTemplate){
throw Error("An item template must be provided to "+this.declaredClass);
}
this._loadTemplate(this.itemTemplate,"itemTemplate",this._checkLoadComplete);
if(this.emptyTemplate){
this._loadTemplate(this.emptyTemplate,"emptyTemplate",this._checkLoadComplete);
}
if(this.dividerTemplate){
this._loadTemplate(this.dividerTemplate,"dividerTemplate",this._checkLoadComplete);
}
},handleDrag:function(_10){
var _11=_10;
if(_10.targetTouches&&_10.targetTouches.length>0){
_11=_10.targetTouches[0];
}
var _12=_11.pageX-this._mouseDownPos.x;
var _13=Math.abs(_12);
if(_13>10&&!this._dragThreshold){
this._dragThreshold=_2.marginBox(this._selectedRow).w*0.6;
if(!this.autoDelete){
this.createDeleteButtons(this._selectedRow);
}
}
this._selectedRow.style.left=(_13>10?_12:0)+"px";
if(this._dragThreshold&&this._dragThreshold<_13){
this.preDelete(_12);
}
},handleDragCancel:function(){
if(this._deleting){
return;
}
_2.removeClass(this._selectedRow,"hold");
this._selectedRow.style.left=0;
this._mouseDownPos=null;
this._dragThreshold=null;
this._deleteBtns&&_2.style(this._deleteBtns,"display","none");
},preDelete:function(_14){
var _15=this;
this._deleting=true;
_2.animateProperty({node:this._selectedRow,duration:400,properties:{left:{end:_14+((_14>0?1:-1)*this._dragThreshold*0.8)}},onEnd:_2.hitch(this,function(){
if(this.autoDelete){
this.deleteRow(this._selectedRow);
}
})}).play();
},deleteRow:function(row){
_2.style(row,{visibility:"hidden",minHeight:"0px"});
_2.removeClass(row,"hold");
this._deleteAnimConn=this.connect(row,"webkitAnimationEnd",this._postDeleteAnim);
_2.addClass(row,"collapsed");
},_postDeleteAnim:function(_16){
if(this._deleteAnimConn){
this.disconnect(this._deleteAnimConn);
this._deleteAnimConn=null;
}
var row=this._selectedRow;
var _17=row.nextSibling;
var _18=row.previousSibling;
if(_18&&_18._isDivider){
if(!_17||_17._isDivider){
_18.parentNode.removeChild(_18);
}
}
row.parentNode.removeChild(row);
this.onDelete(row._data,row._idx,this.items);
while(_17){
if(_17._idx){
_17._idx--;
}
_17=_17.nextSibling;
}
_2.destroy(row);
_2.query("> *:not(.buttons)",this.domNode).forEach(this.applyClass);
this._deleting=false;
this._deselectRow();
},createDeleteButtons:function(_19){
var mb=_2.marginBox(_19);
var pos=_2._abs(_19,true);
if(!this._deleteBtns){
this._deleteBtns=_2.create("div",{"class":"buttons"},this.domNode);
this.buttons=[];
this.buttons.push(new _3.mobile.Button({btnClass:"mblRedButton",label:this.labelDelete}));
this.buttons.push(new _3.mobile.Button({btnClass:"mblBlueButton",label:this.labelCancel}));
_2.place(this.buttons[0].domNode,this._deleteBtns);
_2.place(this.buttons[1].domNode,this._deleteBtns);
_2.addClass(this.buttons[0].domNode,"deleteBtn");
_2.addClass(this.buttons[1].domNode,"cancelBtn");
this._handleButtonClick=_2.hitch(this._handleButtonClick);
this.connect(this._deleteBtns,"onclick",this._handleButtonClick);
}
_2.removeClass(this._deleteBtns,"fade out fast");
_2.style(this._deleteBtns,{display:"",width:mb.w+"px",height:mb.h+"px",top:(_19.offsetTop)+"px",left:"0px"});
},onDelete:function(_1a,_1b,_1c){
_1c.splice(_1b,1);
if(_1c.length<1){
this.render();
}
},cancelDelete:function(){
this._deleting=false;
this.handleDragCancel();
},_handleButtonClick:function(_1d){
if(_1d.touches&&_1d.touches.length>0){
_1d=_1d.touches[0];
}
var _1e=_1d.target;
if(_2.hasClass(_1e,"deleteBtn")){
this.deleteRow(this._selectedRow);
}else{
if(_2.hasClass(_1e,"cancelBtn")){
this.cancelDelete();
}else{
return;
}
}
_2.addClass(this._deleteBtns,"fade out");
},applyClass:function(_1f,idx,_20){
_2.removeClass(_1f,"first last");
if(idx==0){
_2.addClass(_1f,"first");
}
if(idx==_20.length-1){
_2.addClass(_1f,"last");
}
},_setDataInfo:function(_21,_22){
_22.item=_21._data;
_22.index=_21._idx;
},onSelect:function(_23,_24,_25){
},_selectRow:function(row){
if(this._deleting&&this._selectedRow&&row!=this._selectedRow){
this.cancelDelete();
}
if(!_2.hasClass(row,"row")){
return;
}
if(this.enableHold||this.enableDelete){
_2.addClass(row,"hold");
}
this._selectedRow=row;
},_deselectRow:function(){
if(!this._selectedRow||this._deleting){
return;
}
this.handleDragCancel();
_2.removeClass(this._selectedRow,"hold");
this._selectedRow=null;
},_getRowNode:function(_26,_27){
while(_26&&!_26._data&&_26!=this.domNode){
if(!_27&&_2.hasClass(_26,"noclick")){
return null;
}
_26=_26.parentNode;
}
return _26==this.domNode?null:_26;
},applyTemplate:function(_28,_29){
return _2._toDom(_2.string.substitute(_28,_29,this._replaceToken,this.formatters||this));
},render:function(){
_2.query("> *:not(.buttons)",this.domNode).forEach(_2.destroy);
if(this.items.length<1&&this.emptyTemplate){
_2.place(_2._toDom(this.emptyTemplate),this.domNode,"first");
}else{
this.domNode.appendChild(this._renderRange(0,this.items.length));
}
if(_2.hasClass(this.domNode.parentNode,"mblRoundRect")){
_2.addClass(this.domNode.parentNode,"mblRoundRectList");
}
var _2a=_2.query("> .row",this.domNode);
if(_2a.length>0){
_2.addClass(_2a[0],"first");
_2.addClass(_2a[_2a.length-1],"last");
}
},_renderRange:function(_2b,_2c){
var _2d=[];
var row,i;
var _2e=document.createDocumentFragment();
_2b=Math.max(0,_2b);
_2c=Math.min(_2c,this.items.length);
for(i=_2b;i<_2c;i++){
row=this.applyTemplate(this.itemTemplate,this.items[i]);
_2.addClass(row,"row");
row._data=this.items[i];
row._idx=i;
_2d.push(row);
}
if(!this.dividerFunction||!this.dividerTemplate){
for(i=_2b;i<_2c;i++){
_2d[i]._data=this.items[i];
_2d[i]._idx=i;
_2e.appendChild(_2d[i]);
}
}else{
var _2f=null;
var _30;
var _31;
for(i=_2b;i<_2c;i++){
_2d[i]._data=this.items[i];
_2d[i]._idx=i;
_30=this.dividerFunction(this.items[i]);
if(_30&&_30!=_2f){
_31=this.applyTemplate(this.dividerTemplate,{label:_30,item:this.items[i]});
_31._isDivider=true;
_2e.appendChild(_31);
_2f=_30;
}
_2e.appendChild(_2d[i]);
}
}
return _2e;
},_replaceToken:function(_32,key){
if(key.charAt(0)=="!"){
_32=_2.getObject(key.substr(1),false,_this);
}
if(typeof _32=="undefined"){
return "";
}
if(_32==null){
return "";
}
return key.charAt(0)=="!"?_32:_32.toString().replace(/"/g,""");
},_checkLoadComplete:function(){
this._templateLoadCount--;
if(this._templateLoadCount<1&&this.get("items")){
this.render();
}
},_loadTemplate:function(url,_33,_34){
if(!url){
_34();
return;
}
if(_4[url]){
this.set(_33,_4[url]);
_34();
}else{
var _35=this;
_2.xhrGet({url:url,sync:false,handleAs:"text",load:function(_36){
_4[url]=_2.trim(_36);
_35.set(_33,_4[url]);
_34();
}});
}
},_setFormattersAttr:function(_37){
this.formatters=_37;
},_setItemsAttr:function(_38){
this.items=_38||[];
if(this._templateLoadCount<1&&_38){
this.render();
}
},destroy:function(){
if(this.buttons){
_2.forEach(this.buttons,function(_39){
_39.destroy();
});
this.buttons=null;
}
this.inherited(arguments);
}});
})();
}); | PypiClean |
/Flask-Atomic-0.1.11.tar.gz/Flask-Atomic-0.1.11/flask_atomic/orm/mixins/abstracts.py | from sqlalchemy import Column
from sqlalchemy import String
from flask_atomic.orm.operators import commitsession
class DYNAFlagMixin(object):
"""
DYNA stands for (in context of the 'active' field)
D - Deleted
Y - Yes
N - No (inactive, i.e suspended)
A - Approval required
This is only a suggested pattern for soft database deletion. Just a sensible
rule is that nothing should really be deleted from a database by a user.
Database deletions should be handled by application owners or data owners.
Allowing customers to modify the existence of data is not good.
"""
active = Column(String(1), default='Y')
def can_commit(self, commit=True):
if commit:
commitsession()
return self
def safe_delete(self, commit=True):
self.active = 'D'
return self.can_commit(commit)
def deactivate(self, commit=True):
self.active = 'N'
return self.can_commit(commit)
def restore(self, commit=True):
self.active = 'Y'
return self.can_commit(commit)
class FlagMixin(object):
"""
DYNA stands for (in context of the 'active' field)
D - Deleted
Y - Yes
N - No (inactive, i.e suspended)
A - Approval required
This is only a suggested pattern for soft database deletion. Just a sensible
rule is that nothing should really be deleted from a database by a user.
Database deletions should be handled by application owners or data owners.
Allowing customers to modify the existence of data is not good.
"""
active = Column(String(1), default='Y')
def can_commit(self, commit=True):
if commit:
commitsession()
return self
def safe_delete(self, commit=True):
self.active = 'D'
return self.can_commit(commit)
def deactivate(self, commit=True):
self.active = 'N'
return self.can_commit(commit)
def restore(self, commit=True):
self.active = 'Y'
return self.can_commit(commit) | PypiClean |
/Bempp-cl-0.3.1.tar.gz/Bempp-cl-0.3.1/bempp/api/space/maxwell_spaces.py |
import numpy as _np
import numba as _numba
def _is_screen(grid):
"""Check if there is an edge only adjacent to one triangle."""
for e in range(grid.edges.shape[1]):
if len([j for i in grid.element_edges for j in i if j == e]) < 2:
return True
return False
def rwg0_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of RWG functions of order 0."""
from .space import SpaceBuilder, _process_segments
from bempp.api.utils.helpers import serialise_list_of_lists
support, normal_multipliers = _process_segments(
grid, support_elements, segments, swapped_normals
)
edge_neighbors, edge_neighbors_ptr = serialise_list_of_lists(grid.edge_neighbors)
(
global_dof_count,
support,
local2global,
local_multipliers,
) = _compute_rwg0_space_data(
support,
edge_neighbors,
edge_neighbors_ptr,
grid.element_edges,
grid.number_of_elements,
grid.number_of_edges,
include_boundary_dofs,
truncate_at_segment_edge,
)
return (
SpaceBuilder(grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(False)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_barycentric_representation(rwg0_barycentric_function_space)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def rwg0_barycentric_function_space(coarse_space):
"""Define a space of RWG functions of order 0 over a barycentric grid."""
from .space import SpaceBuilder
from scipy.sparse import coo_matrix
number_of_support_elements = coarse_space.number_of_support_elements
bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements
bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
bary_support_size = len(bary_support_elements)
support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool_)
support[bary_support_elements] = True
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local_coords = _np.array(
[[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]
).T
coeffs = (
_np.array(
[
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
]
),
_np.array(
[
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
]
),
_np.array(
[
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
]
),
)
coarse_dofs, bary_dofs, values = generate_rwg0_map(
coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs
)
local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local2global[support] = _np.arange(3 * bary_support_size).reshape(
bary_support_size, 3
)
local_multipliers[support] = 1
transform = coo_matrix(
(values, (bary_dofs, coarse_dofs)),
shape=(3 * bary_support_size, 3 * number_of_support_elements),
dtype=_np.float64,
).tocsr()
dof_transformation = transform @ coarse_space.map_to_localised_space
return (
SpaceBuilder(coarse_space.grid.barycentric_refinement)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def snc0_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of SNC functions of order 0."""
from .space import SpaceBuilder, _process_segments
from bempp.api.utils.helpers import serialise_list_of_lists
support, normal_multipliers = _process_segments(
grid, support_elements, segments, swapped_normals
)
edge_neighbors, edge_neighbors_ptr = serialise_list_of_lists(grid.edge_neighbors)
(
global_dof_count,
support,
local2global,
local_multipliers,
) = _compute_rwg0_space_data(
support,
edge_neighbors,
edge_neighbors_ptr,
grid.element_edges,
grid.number_of_elements,
grid.number_of_edges,
include_boundary_dofs,
truncate_at_segment_edge,
)
return (
SpaceBuilder(grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(False)
.set_shapeset("snc0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_barycentric_representation(snc0_barycentric_function_space)
.set_numba_evaluator(_numba_snc0_evaluate)
.set_numba_surface_curl(_numba_snc0_surface_curl)
.build()
)
def snc0_barycentric_function_space(coarse_space):
"""Define a space of SNC functions of order 0 over a barycentric grid."""
from .space import SpaceBuilder
from scipy.sparse import coo_matrix
number_of_support_elements = coarse_space.number_of_support_elements
bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements
bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
bary_support_size = len(bary_support_elements)
support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool_)
support[bary_support_elements] = True
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local_coords = _np.array(
[[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]
).T
coeffs = (
_np.array(
[
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
]
),
_np.array(
[
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
]
),
_np.array(
[
[0, 0, 1.0 / 6],
[1.0 / 3, 0, -1.0 / 6],
[1, -1.0 / 3, 0],
[-1.0 / 3, 1, 0],
[0, 1.0 / 3, -1.0 / 6],
[0, 0, 1.0 / 6],
]
),
)
coarse_dofs, bary_dofs, values = generate_rwg0_map(
coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs
)
local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype="uint32")
local2global[support] = _np.arange(3 * bary_support_size).reshape(
bary_support_size, 3
)
local_multipliers[support] = 1
transform = coo_matrix(
(values, (bary_dofs, coarse_dofs)),
shape=(3 * bary_support_size, 3 * number_of_support_elements),
dtype=_np.float64,
).tocsr()
dof_transformation = transform @ coarse_space.map_to_localised_space
return (
SpaceBuilder(coarse_space.grid.barycentric_refinement)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_snc0_evaluate)
.build()
)
def bc_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of BC functions."""
from .space import SpaceBuilder
if _is_screen(grid):
# Grid is a screen, not a polyhedron
raise ValueError("BC spaces not yet supported on screens")
bary_grid = grid.barycentric_refinement
coarse_space = rwg0_function_space(
grid,
support_elements,
segments,
swapped_normals,
include_boundary_dofs=include_boundary_dofs,
truncate_at_segment_edge=truncate_at_segment_edge,
)
(
dof_transformation,
support,
normal_multipliers,
local2global,
local_multipliers,
) = _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
)
return (
SpaceBuilder(bary_grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("rwg0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_rwg0_evaluate)
.build()
)
def rbc_function_space(
grid,
support_elements=None,
segments=None,
swapped_normals=None,
include_boundary_dofs=False,
truncate_at_segment_edge=True,
):
"""Define a space of RBC functions."""
from .space import SpaceBuilder
if _is_screen(grid):
# Grid is a screen, not a polyhedron
raise ValueError("BC spaces not yet supported on screens")
bary_grid = grid.barycentric_refinement
coarse_space = rwg0_function_space(
grid,
support_elements,
segments,
swapped_normals,
include_boundary_dofs=include_boundary_dofs,
truncate_at_segment_edge=truncate_at_segment_edge,
)
(
dof_transformation,
support,
normal_multipliers,
local2global,
local_multipliers,
) = _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
)
return (
SpaceBuilder(bary_grid)
.set_codomain_dimension(3)
.set_support(support)
.set_normal_multipliers(normal_multipliers)
.set_order(0)
.set_is_localised(True)
.set_is_barycentric(True)
.set_shapeset("rwg0")
.set_identifier("snc0")
.set_local2global(local2global)
.set_local_multipliers(local_multipliers)
.set_dof_transformation(dof_transformation)
.set_numba_evaluator(_numba_snc0_evaluate)
.build()
)
def _compute_bc_space_data(
grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals
):
"""Generate the BC map."""
from bempp.api.grid.grid import enumerate_vertex_adjacent_elements
from scipy.sparse import coo_matrix
coarse_support = _np.zeros(grid.entity_count(0), dtype=_np.bool_)
coarse_support[coarse_space.support_elements] = True
if not truncate_at_segment_edge:
for global_dof_index in range(coarse_space.global_dof_count):
local_dofs = coarse_space.global2local[global_dof_index]
edge_index = grid.data().element_edges[local_dofs[0][1], local_dofs[0][0]]
for v in range(2):
vertex = grid.data().edges[v, edge_index]
start = grid.vertex_neighbors.indexptr[vertex]
end = grid.vertex_neighbors.indexptr[vertex + 1]
for cell in grid.vertex_neighbors.indices[start:end]:
coarse_support[cell] = True
coarse_support_elements = _np.array([i for i, j in enumerate(coarse_support) if j])
number_of_support_elements = len(coarse_support_elements)
bary_support_elements = 6 * _np.repeat(coarse_support_elements, 6) + _np.tile(
_np.arange(6), number_of_support_elements
)
support = _np.zeros(bary_grid.number_of_elements, dtype=_np.bool_)
support[bary_support_elements] = True
bary_support_size = len(bary_support_elements)
bary_vertex_to_edge = enumerate_vertex_adjacent_elements(
bary_grid, bary_support_elements, swapped_normals
)
edge_vectors = (
bary_grid.vertices[:, bary_grid.edges[0, :]]
- bary_grid.vertices[:, bary_grid.edges[1, :]]
)
edge_lengths = _np.linalg.norm(edge_vectors, axis=0)
normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)
local2global = _np.zeros((bary_grid.number_of_elements, 3), dtype="uint32")
local_multipliers = _np.zeros((bary_grid.number_of_elements, 3), dtype="uint32")
local2global[support] = _np.arange(3 * bary_support_size).reshape(
bary_support_size, 3
)
local_multipliers[support] = 1
coarse_dofs = []
bary_dofs = []
values = []
for global_dof_index in range(coarse_space.global_dof_count):
local_dofs = coarse_space.global2local[global_dof_index]
edge_index = grid.data().element_edges[local_dofs[0][1], local_dofs[0][0]]
neighbors = grid.edge_neighbors[edge_index]
other = neighbors[1] if local_dofs[0][0] == neighbors[0] else neighbors[0]
if coarse_space.local_multipliers[local_dofs[0][0], local_dofs[0][1]] > 0:
lower = local_dofs[0][0]
upper = other
else:
lower = other
upper = local_dofs[0][0]
vertex1, vertex2 = grid.data().edges[:, edge_index]
# Re-order the vertices so that they appear in anti-clockwise
# order.
for local_index, vertex_index in enumerate(grid.data().elements[:, upper]):
if vertex_index == vertex1:
break
if vertex2 == grid.data().elements[(local_index - 1) % 3, upper]:
vertex1, vertex2 = vertex2, vertex1
# Get the local indices of vertex1 and vertex2 in upper and lower
local_vertex1 = -1
for index, value in enumerate(grid.data().elements[:, upper]):
if value == vertex1:
local_vertex1 = index
break
else:
local_vertex1 = -1
for index, value in enumerate(grid.data().elements[:, lower]):
if value == vertex2:
local_vertex2 = index
break
else:
local_vertex2 = -1
for vertex_index, bary_element, sign in [
(vertex1, 6 * upper + 2 * local_vertex1, -1.0),
(vertex2, 6 * lower + 2 * local_vertex2, 1.0),
]:
# Find the reference element index in elements adjacent to that vertex
for ind, elem in enumerate(bary_vertex_to_edge[vertex_index]):
if bary_element == elem[0]:
break
# Now get all the relevant edges starting to count above
# ind
num_bary_elements = len(bary_vertex_to_edge[vertex_index])
vertex_edges = []
for index in range(num_bary_elements):
elem_edge_pair = bary_vertex_to_edge[vertex_index][
(index + ind) % num_bary_elements
]
for n in range(1, 3):
vertex_edges.append((elem_edge_pair[0], elem_edge_pair[n]))
# We do not want the reference edge part of this list
vertex_edges.pop(0)
vertex_edges.pop(-1)
# We now have a list of edges associated with the vertex counting from edge
# after the reference edge onwards in anti-clockwise order. We can now
# assign the coefficients
nc = num_bary_elements // 2 # Number of elements on coarse grid
# adjacent to vertex.
count = 0
for index, edge in enumerate(vertex_edges):
if index % 2 == 0:
count += 1
elem_index, local_edge_index = edge[:]
edge_length = edge_lengths[
bary_grid.data().element_edges[local_edge_index, elem_index]
]
bary_dofs.append(local2global[elem_index, local_edge_index])
coarse_dofs.append(global_dof_index)
values.append(sign * (nc - count) / (2 * nc * edge_length))
sign *= -1
# Now process the tangential rwgs close to the reference edge
# Get the associated barycentric elements and fill the coefficients in
# the matrix.
bary_upper_minus = 6 * upper + 2 * local_vertex1
bary_upper_plus = 6 * upper + 2 * local_vertex1 + 1
bary_lower_minus = 6 * lower + 2 * local_vertex2
bary_lower_plus = 6 * lower + 2 * local_vertex2 + 1
# The edge that we need always has local edge index 2.
# Can compute the edge length now.
edge_length_upper = edge_lengths[
bary_grid.data().element_edges[2, bary_upper_minus]
]
edge_length_lower = edge_lengths[
bary_grid.data().element_edges[2, bary_lower_minus]
]
# Now assign the dofs in the arrays
coarse_dofs.append(global_dof_index)
coarse_dofs.append(global_dof_index)
coarse_dofs.append(global_dof_index)
coarse_dofs.append(global_dof_index)
bary_dofs.append(local2global[bary_upper_minus, 2])
bary_dofs.append(local2global[bary_upper_plus, 2])
bary_dofs.append(local2global[bary_lower_minus, 2])
bary_dofs.append(local2global[bary_lower_plus, 2])
values.append(1.0 / (2 * edge_length_upper))
values.append(-1.0 / (2 * edge_length_upper))
values.append(-1.0 / (2 * edge_length_lower))
values.append(1.0 / (2 * edge_length_lower))
nentries = len(coarse_dofs)
np_coarse_dofs = _np.zeros(nentries, dtype=_np.uint32)
np_bary_dofs = _np.zeros(nentries, dtype=_np.uint32)
np_values = _np.zeros(nentries, dtype=_np.float64)
np_coarse_dofs[:] = coarse_dofs
np_bary_dofs[:] = bary_dofs
np_values[:] = values
dof_transformation = coo_matrix(
(np_values, (np_bary_dofs, np_coarse_dofs)),
shape=(3 * bary_support_size, coarse_space.global_dof_count),
dtype=_np.float64,
).tocsr()
return (
dof_transformation,
support,
normal_multipliers,
local2global,
local_multipliers,
)
@_numba.njit(cache=True)
def _compute_rwg0_space_data(
support,
edge_neighbors,
edge_neighbors_ptr,
element_edges,
number_of_elements,
number_of_edges,
include_boundary_dofs,
truncate_at_segment_edge,
):
"""Compute the local2global map for the space."""
local2global_map = _np.zeros((number_of_elements, 3), dtype=_np.uint32)
local_multipliers = _np.zeros((number_of_elements, 3), dtype=_np.float64)
edge_dofs = -_np.ones(number_of_edges, dtype=_np.int32)
dof_count = 0
for element in _np.flatnonzero(support):
has_dof = False
for local_index in range(3):
edge_index = element_edges[local_index, element]
if edge_dofs[edge_index] != -1:
has_dof = True
else:
current_neighbors = edge_neighbors[
edge_neighbors_ptr[edge_index] : edge_neighbors_ptr[1 + edge_index]
]
supported_neighbors = [e for e in current_neighbors if support[e]]
if len(supported_neighbors) == 2:
if edge_dofs[edge_index]:
edge_dofs[edge_index] = dof_count
dof_count += 1
has_dof = True
if len(supported_neighbors) == 1 and include_boundary_dofs:
if edge_dofs[edge_index]:
edge_dofs[edge_index] = dof_count
dof_count += 1
has_dof = True
if not truncate_at_segment_edge:
for cell in current_neighbors:
# Add the element to the support
support[cell] = True
if not has_dof:
# If the element has no DOFs, remove it from support
support[element] = False
for element_index in _np.flatnonzero(support):
dofmap = -_np.ones(3, dtype=_np.int32)
for local_index in range(3):
edge_index = element_edges[local_index, element_index]
if edge_dofs[edge_index] != -1:
dofmap[local_index] = edge_dofs[edge_index]
current_neighbors = edge_neighbors[
edge_neighbors_ptr[edge_index] : edge_neighbors_ptr[1 + edge_index]
]
supported_neighbors = [e for e in current_neighbors if support[e]]
if len(supported_neighbors) == 1:
local_multipliers[element_index, local_index] = 1
else:
# Assign 1 or -1 depending on element index
local_multipliers[element_index, local_index] = (
1 if element_index == min(supported_neighbors) else -1
)
# For every zero local multiplier assign an existing global dof
# in this element. This does not change the result as zero multipliers
# do not contribute. But it allows us not to have to distinguish between
# existing and non existing dofs later on.
first_nonzero = 0
for local_index in range(3):
if local_multipliers[element_index, local_index] != 0:
first_nonzero = local_index
break
for local_index in range(3):
if local_multipliers[element_index, local_index] == 0:
dofmap[local_index] = dofmap[first_nonzero]
local2global_map[element_index, :] = dofmap
return dof_count, support, local2global_map, local_multipliers
@_numba.njit(cache=True)
def generate_rwg0_map(grid_data, support_elements, local_coords, coeffs):
"""Actually generate the sparse matrix data."""
number_of_elements = len(support_elements)
coarse_dofs = _np.empty(3 * 18 * number_of_elements, dtype=_np.uint32)
bary_dofs = _np.empty(3 * 18 * number_of_elements, dtype=_np.uint32)
values = _np.empty(3 * 18 * number_of_elements, dtype=_np.float64)
# Iterate through the global dofs and fill up the
# corresponding coefficients.
count = 0
for index, elem_index in enumerate(support_elements):
# Compute all the local vertices
local_vertices = grid_data.local2global(elem_index, local_coords)
l1 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 4])
l2 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 3])
l3 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 5])
l4 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 2])
l5 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 1])
l6 = _np.linalg.norm(local_vertices[:, 6] - local_vertices[:, 0])
le1 = _np.linalg.norm(local_vertices[:, 2] - local_vertices[:, 0])
le2 = _np.linalg.norm(local_vertices[:, 4] - local_vertices[:, 0])
le3 = _np.linalg.norm(local_vertices[:, 4] - local_vertices[:, 2])
outer_edges = [le1, le2, le3]
dof_mult = _np.array(
[
[le1, l6, l5],
[l4, le1, l5],
[le3, l4, l2],
[l1, le3, l2],
[le2, l1, l3],
[l6, le2, l3],
]
)
# Assign the dofs for the six barycentric elements
bary_elements = _np.arange(6) + 6 * index
for local_dof in range(3):
coarse_dof = 3 * index + local_dof
bary_coeffs = coeffs[local_dof]
dof_coeffs = bary_coeffs * outer_edges[local_dof] / dof_mult
coarse_dofs[count : count + 18] = coarse_dof
bary_dofs[count : count + 18] = _np.arange(
3 * bary_elements[0], 3 * bary_elements[0] + 18
)
values[count : count + 18] = dof_coeffs.ravel()
count += 18
return coarse_dofs, bary_dofs, values
@_numba.njit()
def _numba_rwg0_evaluate(
element_index,
shapeset_evaluate,
local_coordinates,
grid_data,
local_multipliers,
normal_multipliers,
):
"""Evaluate the basis on an element."""
reference_values = shapeset_evaluate(local_coordinates)
npoints = local_coordinates.shape[1]
result = _np.empty((3, 3, npoints), dtype=_np.float64)
edge_lengths = _np.empty(3, dtype=_np.float64)
edge_lengths[0] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[0, element_index]]
- grid_data.vertices[:, grid_data.elements[1, element_index]]
)
edge_lengths[1] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[2, element_index]]
- grid_data.vertices[:, grid_data.elements[0, element_index]]
)
edge_lengths[2] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[1, element_index]]
- grid_data.vertices[:, grid_data.elements[2, element_index]]
)
for index in range(3):
result[:, index, :] = (
local_multipliers[element_index, index]
* edge_lengths[index]
/ grid_data.integration_elements[element_index]
* grid_data.jacobians[element_index].dot(reference_values[:, index, :])
)
return result
@_numba.njit()
def _numba_snc0_evaluate(
element_index,
shapeset_evaluate,
local_coordinates,
grid_data,
local_multipliers,
normal_multipliers,
):
"""Evaluate the basis on an element."""
reference_values = shapeset_evaluate(local_coordinates)
npoints = local_coordinates.shape[1]
result = _np.empty((3, 3, npoints), dtype=_np.float64)
tmp = _np.empty((3, 3, npoints), dtype=_np.float64)
normal = grid_data.normals[element_index] * normal_multipliers[element_index]
edge_lengths = _np.empty(3, dtype=_np.float64)
edge_lengths[0] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[0, element_index]]
- grid_data.vertices[:, grid_data.elements[1, element_index]]
)
edge_lengths[1] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[2, element_index]]
- grid_data.vertices[:, grid_data.elements[0, element_index]]
)
edge_lengths[2] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[1, element_index]]
- grid_data.vertices[:, grid_data.elements[2, element_index]]
)
for index in range(3):
tmp[:, index, :] = (
local_multipliers[element_index, index]
* edge_lengths[index]
/ grid_data.integration_elements[element_index]
* grid_data.jacobians[element_index].dot(reference_values[:, index, :])
)
result[0, :, :] = normal[1] * tmp[2, :, :] - normal[2] * tmp[1, :, :]
result[1, :, :] = normal[2] * tmp[0, :, :] - normal[0] * tmp[2, :, :]
result[2, :, :] = normal[0] * tmp[1, :, :] - normal[1] * tmp[0, :, :]
return result
@_numba.njit
def _numba_snc0_surface_curl(
element_index,
shapeset_gradient,
local_coordinates,
grid_data,
local_multipliers,
normal_multipliers,
):
"""Evaluate the curl on an element."""
normal = grid_data.normals[element_index] * normal_multipliers[element_index]
reference_derivatives = shapeset_gradient(local_coordinates)
jac_inv_t = grid_data.jac_inv_trans[element_index]
derivatives = jac_inv_t @ reference_derivatives @ jac_inv_t.T
reference_values = normal[0] * (derivatives[2, 1] - derivatives[1, 2]) + normal[1] * (derivatives[0, 2] - derivatives[2, 0]) + normal[2] * (derivatives[1, 0] - derivatives[0, 1])
result = _np.empty(3, dtype=_np.float64)
edge_lengths = _np.empty(3, dtype=_np.float64)
edge_lengths[0] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[0, element_index]]
- grid_data.vertices[:, grid_data.elements[1, element_index]]
)
edge_lengths[1] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[2, element_index]]
- grid_data.vertices[:, grid_data.elements[0, element_index]]
)
edge_lengths[2] = _np.linalg.norm(
grid_data.vertices[:, grid_data.elements[1, element_index]]
- grid_data.vertices[:, grid_data.elements[2, element_index]]
)
for index in range(3):
result[index] = (
local_multipliers[element_index, index]
* edge_lengths[index] * reference_values
)
return result | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.