id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Nagababu_Django_Webapps-0.0.2-py3-none-any.whl/webapp3/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Checkout_order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('your_name', models.CharField(max_length=100)),
('mobile_no', models.CharField(max_length=10)),
('delivery_time', models.CharField(choices=[('by11', 'by 11.00 AM'), ('by11.15', 'by 11.00 AM'), ('by11.30', 'by 11.30 AM'), ('by11.45', 'by 11.45 AM'), ('by12.00', ']by 12.00 PM'), ('by12.15', 'by 12.15 PM'), ('by12.30', 'by 12.30 PM'), ('by12.45', 'by 12.45 PM'), ('by1.00', 'by 1.00 PM')], max_length=100)),
('type_of_order', models.CharField(choices=[('Hand it to me', 'Hand it to me'), ('Leave it at my door', 'Leave it at my door')], max_length=100)),
('city', models.CharField(choices=[('bapatla', 'Bapatla'), ('charala', 'Chirala'), ('guntur', 'Guntur'), ('ongole', 'Ongole'), ('nellore', 'Nellore')], max_length=100)),
('address', models.CharField(max_length=100)),
('payment_type', models.CharField(choices=[('Pay with cash to the courier', 'Pay with cash to the courier'), ('Online payment', 'Online Payment')], max_length=100)),
],
),
migrations.CreateModel(
name='order_items_menu',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_image', models.ImageField(upload_to='order_menu_page/')),
('item_title', models.CharField(max_length=100)),
('item_desc', models.CharField(max_length=100)),
('item_size', models.CharField(choices=[('small', 'Small'), ('medium', 'Medium.'), ('standard', 'Standars'), ('large', 'Large'), ('thin', 'Thin')], default='select', max_length=20)),
('item_cost', models.FloatField(default=0)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='Order_item_model',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_image', models.ImageField(upload_to='images_app2/')),
('item_title', models.CharField(max_length=100)),
('item_price', models.FloatField(default=0)),
('item_quantity', models.IntegerField(default=1)),
('total_price', models.FloatField(default=0)),
('slug', models.SlugField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='webapp3.order_items_menu')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('items', models.ManyToManyField(to='webapp3.Order_item_model')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/baselib/specparser/SpecParser.py | from JumpScale import j
from SpecModelActorsGenerator import SpecModelActorsGenerator
class Specbase(j.code.classGetBase()):
def __init__(self,linenr):
self.name=""
self.description=""
self.tags=""
self.comment=""
self.linenr=linenr
def addDefaults(self):
pass
def getDefaultValue(self, type, value):
if value.strip()=="" or value.strip() == "None":
return None
elif j.basetype.string.check(value):
value = value.strip("\"")
if type == 'int' and value:
return int(value)
elif type == 'bool' and value:
return j.basetype.boolean.fromString(value)
return value
class SpecEnum(Specbase):
def __init__(self,name,specpath,linenr):
Specbase.__init__(self,linenr)
self.name=name
self.specpath=specpath
self.appname=""
self.enums=[]
self.actorname=""
def _parse(self,parser,content):
for line in content.split("\n"):
if line.strip()<>"":
self.enums.append(line.strip())
class SpecActor(Specbase):
def __init__(self,name,descr,tags,specpath,linenr):
Specbase.__init__(self,linenr)
self.name=name
self.description=descr
self.tags=tags
self.specpath=specpath
self.appname=""
self.methods=[]
self.type=""
def _addItem(self,obj):
self.methods.append(obj)
def addDefaults(self):
pass
class SpecActorMethod(Specbase):
def __init__(self,linenr):
Specbase.__init__(self,linenr)
self.vars=[]
self.result=None
def _parseFirstLine(self,parser,line):
self.comment,self.tags,line=parser.getTagsComment(line)
self.name=line.strip()
def _parse(self,parser,content):
content=parser.deIndent(content,self.linenr)
linenr=self.linenr
for line in content.split("\n"):
linenr+=1
line0=line
if line.strip()=="" or line.strip()[0]=="#":
continue
if line.find(":")<>-1:
comments,tags,line=parser.getTagsComment(line)
errormsg="Syntax error, right syntax var:$name $type,$defaultvalue,$description @tags #remarks"
try:
varname,line=line.split(":",1)
except:
return parser.raiseError(errormsg,line0,linenr)
if varname=="var":
if line.find(" ")==-1:
return parser.raiseError(errormsg,line0,linenr)
else:
varname,line=line.split(" ",1)
try:
ttype,default,descr=line.split(",",2)
except:
return parser.raiseError(errormsg,line0,linenr)
default = self.getDefaultValue(ttype, default)
spec=SpecActorMethodVar(varname,descr,tags,linenr,default)
spec.comment=comments
self.vars.append(spec)
elif varname=="result":
errormsg="Syntax error, right syntax result:$type @tags #remarks"
if line.find(" ")==-1 and (line.find("@")<>-1 or line.find("$")<>-1):
return parser.raiseError(errormsg,line0,linenr)
if line.find(" ")==-1:
ttype=line
else:
ttype,line=line.split(" ",1)
self.result=Specbase(linenr)
self.result.type=ttype
self.result.comment=comments
else:
return parser.raiseError("Only var & result support on line, syntaxerror.",line0,linenr)
class SpecActorMethodVar(Specbase):
def __init__(self,name,descr,tags,linenr,default):
Specbase.__init__(self,linenr)
self.name=name
self.description=descr
self.tags=tags
self.defaultvalue=default
class SpecModel(Specbase):
def __init__(self,name,descr,tags,specpath,linenr):
Specbase.__init__(self,linenr)
self.name=name
self.description=descr
self.tags=tags
self.specpath=specpath
self.properties=[]
self.type=""
self.actorname=""
self.rootobject=False
def _addItem(self,obj):
self.properties.append(obj)
def exists(self,propname):
for prop in self.properties:
if str(prop.name)==propname:
return True
return False
def addDefaults(self):
if self.type=="rootmodel":
if not self.exists("id"):
s=SpecModelProperty(0)
s.type = 'int'
s.name = 'id'
s.description = 'Auto generated id @optional'
self._addItem(s)
class SpecModelProperty(Specbase):
def __init__(self,linenr):
Specbase.__init__(self,linenr)
self.default=None
self.type=None
def _parseFirstLine(self,parser,line):
errormsg="Syntax error, right syntax prop:$name $type,$defaultvalue,$description @tags #remarks"
line0="prop:%s"%line
if line.find(" ")==-1:
return parser.raiseError(errormsg,line0,self.linenr)
else:
self.name,line=line.split(" ",1)
try:
self.type,self.default,self.description=line.split(",",2)
self.default = self.getDefaultValue(self.type, self.default)
except:
return parser.raiseError(errormsg,line0,self.linenr)
def _parse(self,parser,content):
pass
class SpecBlock():
"""
generic block of specs identified with starting with [...]
can be multiple types
"""
def __init__(self,parser,line,linenr,appname,actorname):
self.appname=appname
self.actorname=actorname
self.descr=""
self.content=""
self.name=""
self.comment,self.tags,line=parser.getTagsComment(line) #get @ out of the block
#if line.find("@")<>-1:
#line=line.split("@")[0]
#if line.find("#")<>-1:
#line=line.split("#")[0]
line=line.replace("[","")
if line.find("]")==-1:
return parser.raiseError("each [ on block should finish with ]",line,linenr)
line=line.replace("]","").strip()
splitted=line.split(":")
splitted=[item.strip().lower() for item in splitted]
self.type=splitted[0]
if len(splitted)==1:
self.name=""
elif len(splitted)==2:
self.name=splitted[1]
else:
return parser.raiseError("each [...] on block need to be in format [$type:$name] or [$type], did not find :",line,linenr)
self.parser=parser
self.startline=linenr
self.items=[]
def parse(self):
self.content=self.parser.deIndent(self.content,self.startline)
if self.type=="actor":
ttypeId="method"
spec=None
if len(j.core.specparser.specs.keys())>0 and self.type=="actor":
key="%s_%s"%(self.appname,self.actorname)
if key in j.core.specparser.actornames:
spec=j.core.specparser.getActorSpec(self.appname,self.actorname, False)
if spec==None:
spec=SpecActor(self.name,self.descr,self.tags,self.parser.path,self.startline)
spec.actorname=self.actorname
spec.appname=self.appname
if not j.core.specparser.app_actornames.has_key(spec.appname):
j.core.specparser.app_actornames[self.appname]=[]
if spec.actorname not in j.core.specparser.app_actornames[self.appname]:
j.core.specparser.app_actornames[self.appname].append(spec.actorname)
currentitemClass=SpecActorMethod
elif self.type=="enumeration":
ttypeId="enumeration"
spec=SpecEnum(self.name,self.parser.path,self.startline)
spec.actorname=self.actorname
currentitemClass=None
elif self.type=="model" or self.type=="rootmodel":
ttypeId="prop"
spec=SpecModel(self.name,self.descr,self.tags,self.parser.path,self.startline)
spec.actorname=self.actorname
spec.appname=self.appname
spec.name=self.name
if self.type=="rootmodel":
spec.rootobject=True
#print "found model %s %s"%(self.name,self.parser.path)
#print self.content
#print "###########"
currentitemClass=SpecModelProperty
else:
return self.parser.raiseError("Invalid type '%s' could not find right type of spec doc, only supported model,actor,enum :" % self.type, self.content,self.startline)
#find the items in the block
linenr=self.startline
state="start"
currentitemContent=""
currentitem=None
if self.type=="enumeration":
currentitemContent=self.content
self.content=""
currentitem=spec
for line in self.content.split("\n"):
linenr+=1
line=line.rstrip()
#print "line:%s state:%s" % (line,state)
if line.strip()=="":
if currentitem<>None and currentitemContent=="":
currentitem.linenr=linenr+1
continue
if state=="description" and line.strip().find("\"\"\"")==0:
#end of description
state="blockfound"
currentitem.linenr=linenr+1
continue
if state=="description":
currentitem.description+="%s\n" % line.strip()
if (state=="start" or state=="blockfound") and line.strip().find("\"\"\"")==0:
#found description
state="description"
continue
if state=="blockfound" and line.strip().find("@")==0:
#found labels tags on right level
tmp1,currentitem.tags,tmp2=self.parser.getTagsComment(line)
currentitem.linenr=linenr+1
continue
if state=="blockfound" and line[0]==" ":
#we are in block & no block descr
currentitemContent+="%s\n" % line
if (state=="start" or state=="blockfound") and line[0]<>" " and line.find(":")<>-1:
typeOnLine= line.split(":",1)[0].strip()
if ttypeId==typeOnLine:
state="blockfound"
if currentitemContent<>"":
currentitem._parse(self.parser,currentitemContent)
currentitemContent=""
currentitem=currentitemClass(linenr)
comment,tags,line=self.parser.getTagsComment(line)
currentitem._parseFirstLine(self.parser,line.split(":",1)[1].strip())
if comment<>"":
currentitem.comment=comment
if tags<>"":
currentitem.tags=tags
spec._addItem(currentitem)
currentitemContent=""
else:
self.parser.raiseError("Found item %s, only %s supported." % (typeOnLine,ttypeId),line,linenr)
#are at end of file make sure last item is processed
if currentitemContent<>"":
currentitem._parse(self.parser,currentitemContent)
#spec.appname=self.appname
#spec.actorname=self.actorname
spec.type=self.type
spec.addDefaults()
j.core.specparser.addSpec(spec)
def __str__(self):
s="name:%s\n" % self.name
s+="type:%s\n" % self.type
s+="descr:%s\n" % self.descr
s+="tags:%s\n" % self.tags
s+="content:\n%s\n" % self.content
return s
__repr__=__str__
class SpecDirParser():
def __init__(self,path,appname,actorname):
self.appname=appname
self.actorname=actorname
self.path=path
files=j.system.fs.listFilesInDir(self.path,True,"*.spec")
def sortFilesFollowingLength(files):
r={}
result=[]
for item in ["actor","enum","model"]:
for p in files:
pp=j.system.fs.getBaseName(p)
if pp.find(item)==0:
result.append(p)
files.pop(files.index(p))
for p in files:
if not r.has_key(len(p)):
r[len(p)]=[]
r[len(p)].append(p)
lkeysSorted=r.keys()
lkeysSorted.sort()
for lkey in lkeysSorted:
result=result+r[lkey]
return result
files=sortFilesFollowingLength(files)
self.specblocks={}
for path in files:
if j.system.fs.getBaseName(path).find("example__")==0:
continue
parser=j.core.specparser._getSpecFileParser(path,self.appname,self.actorname)
for key in parser.specblocks.keys():
block=parser.specblocks[key]
self.specblocks[block.type+"_"+block.name]=block
def getSpecBlock(self,type,name):
key=type+"_"+name
if self.specblocks.has_key(key):
return self.specblocks[key]
else:
return False
def __str__(self):
s="path:%s\n" % self.path
for key in self.specblocks.keys():
block=self.specblocks[key]
s+="%s\n\n" % block
return s
__repr__=__str__
class SpecFileParser():
def __init__(self,path,appname,actorname):
"""
find blocks in file
"""
self.path=path
self.appname=appname
self.actorname=actorname
if self.appname<>self.appname.lower().strip():
emsg="appname %s for specs should be lowercase & no spaces" % self.appname
raise RuntimeError(emsg+" {category:spec.nameerror}")
self.contentin=j.system.fs.fileGetContents(path)
self.contentout=""
self.specblocks={} #key is name
state="start"
#a block starts with [...] and ends with next [] or end of file
state="start"
linenr=0
currentblock=None
##content=self.contentin+"\n***END***\n"
for line in self.contentin.split("\n"):
linenr+=1
line=line.rstrip()
#remove empty lines
line=line.replace("\t"," ")
if line.strip()=="" or line.strip()[0]=="#":
if currentblock<>None and currentblock.content=="":
currentblock.startline=linenr+1
continue
##remove comments from line
#if line.find("#")>0:
#line=line.split("#",1)[0]
if state=="blockfound" and line[0]=="[":
#block ended
state="start"
if state=="blockdescription" and line.strip().find("\"\"\"")==0:
#end of description
state="blockfound"
self.contentout+="%s\n" % line
currentblock.startline=linenr+2
continue
if state=="blockdescription":
currentblock.descr+="%s\n" % line.strip()
if state=="blockfound" and line.strip().find("\"\"\"")==0 and currentblock.descr=="":
#found description
state="blockdescription"
self.contentout+="%s\n" % line
continue
#if state=="blockfound" and self._checkIdentation(line,linenr,1,1) and line.strip().find("@")<>-1:
##found labels tags on right level
#if currentblock<>None:
#comments,currentblock.tags,tmp=self.getTagsComment(line)
#currentblock.startline=linenr
#else:
#self.raiseError("Cannot find label & tags when there is no specblock opened [...]",line,linenr)
#self.contentout+="%s\n" % line
#continue
if state=="blockfound":
#we are in block & no block descr
currentblock.content+="%s\n" % line
if state=="start" and line[0]=="[":
state="blockfound"
#line2=line
#if line2.find("#")>0:
#from JumpScale.core.Shell import ipshellDebug,ipshell
#print "DEBUG NOW jjj"
#ipshell()
#line2=line.split("#",1)[0]
currentblock=SpecBlock(self,line,linenr+1,appname=self.appname,actorname=self.actorname)
self.specblocks[currentblock.name]=currentblock
self.contentout+="%s\n" % line
for key in self.specblocks.keys():
block=self.specblocks[key]
block.parse()
#print block.name
def getTagsComment(self,line):
"""
return comment,tags,line
"""
if line.find("#")<>-1:
comment=line.split("#",1)[1]
line=line.split("#",1)[0]
else:
comment=""
tags=None
if line.find("@")<>-1:
tags=line.split("@",1)[1]
line=line.split("@",1)[0]
if comment.find("@")<>-1:
tags=comment.split("@",1)[1]
comment=comment.split("@")[0]
if comment<>None:
comment=comment.strip()
if tags<>None:
tags=tags.strip()
return comment,tags,line
def deIndent(self,content,startline):
#remove garbage & fix identation
content2=""
linenr=startline
for line in content.split("\n"):
linenr+=1
if line.strip()=="":
continue
else:
if line.find(" ")<>0:
return self.raiseError("identation error.",line,linenr)
content2+="%s\n" % line[4:]
return content2
def _checkIdentation(self,line,linenr,minLevel=1,maxLevel=1):
"""
"""
line=line.replace("\t"," ")
ok=True
if(len(line)<maxLevel*4):
self.raiseError("line is too small, there should be max identation of %s" % maxLevel,line,linenr)
for i in range(0,minLevel):
if line[i*4:(i+1)*4]<>" ":
ok=False
if line[maxLevel*4+1]==" ":
ok=False
return ok
def raiseError(self, msg,line="",linenr=0):
j.errorconditionhandler.raiseInputError("Cannot parse file %s\nError on line:%s\n%s\n%s\n" % (self.path,linenr,line,msg),"specparser.input")
class Role(j.code.classGetBase()):
def __init__(self,name,actors=[]):
self.actors=actors
self.name=name
class SpecParserFactory():
def __init__(self):
self.specs={}
self.appnames=[]
self.actornames=[]
self.app_actornames={}
self.modelnames={} #key = appname_actorname
self.roles={} #key is appname_rolename
#self.codepath=j.system.fs.joinPaths( j.dirs.varDir,"actorscode")
def getEnumerationSpec(self,app,actorname,name,die=True):
key="enumeration_%s_%s_%s"%(app,actorname,name)
if self.specs.has_key(key):
return self.specs[key]
else:
if die:
emsg="Cannot find enumeration with name %s for app %s" % (name,app)
raise RuntimeError(emsg+" {category:specs.enumeration.notfound}")
else:
return False
def getActorSpec(self,app,name,raiseError=True):
key="actor_%s_%s_%s"%(app,name,"")
if self.specs.has_key(key):
return self.specs[key]
else:
if raiseError:
raise RuntimeError("Cannot find actor with name %s for app %s" % (name,app)+" {category:specs.actor.notfound}")
else:
return None
def getModelSpec(self,app,actorname,name,die=True):
key="model_%s_%s_%s"%(app,actorname,name)
key = key.lower()
if self.specs.has_key(key):
return self.specs[key]
else:
if die:
emsg="Cannot find model with name %s for app %s" % (name,app)
raise RuntimeError(emsg+" {category:specs.model.notfound}")
else:
return False
def getModelNames(self,appname,actorname):
key="%s_%s"%(appname,actorname)
if j.core.specparser.modelnames.has_key(key):
return self.modelnames[key]
else:
return []
def addSpec(self,spec):
if spec.type=="rootmodel":
spec.type="model"
key="%s_%s"%(spec.appname,spec.actorname)
if not self.modelnames.has_key(key):
self.modelnames[key]=[]
if spec.name not in self.modelnames[key]:
self.modelnames[key].append(spec.name)
if spec.name==spec.actorname:
specname=""
else:
specname=spec.name
if spec.type=="actor" and specname<>"":
from JumpScale.core.Shell import ipshell
print "DEBUG NOW addSpec in specparser, cannot have actor with specname<>empty"
ipshell()
key="%s_%s_%s_%s"%(spec.type,spec.appname,spec.actorname,specname)
if spec.type<>spec.type.lower().strip():
emsg="type %s of spec %s should be lowercase & no spaces" % (spec.type,key)
raise RuntimeError(emsg+" {category:specs.input}")
if spec.name<>spec.name.lower().strip():
emsg="name %s of spec %s should be lowercase & no spaces" % (spec.name,key)
raise RuntimeError(emsg+" {category:specs.input}")
if spec.appname not in self.appnames:
self.appnames.append(spec.appname)
if spec.actorname=="":
emsg="actorname cannot be empty for spec:%s %s starting line %s" % (spec.name,spec.specpath,spec.linenr)
raise RuntimeError(emsg+" {category:specs.input}")
if "%s_%s"%(spec.appname,spec.actorname) not in self.actornames:
self.actornames.append("%s_%s"%(spec.appname,spec.actorname))
self.specs[key]=spec
def findSpec(self,query="",appname="",actorname="",specname="",type="",findFromSpec=None,findOnlyOne=True):
"""
do not specify query with one of the other filter criteria
@param query is in dot notation e.g. $appname.$actorname.$modelname ... the items in front are optional
"""
spec=findFromSpec
if query<>"":
type=""
if query[0]=="E":
type="enumeration"
if query[0]=="M":
type="model"
if query.find("(") <>-1 and query.find(")")<>-1:
query=query.split("(",1)[1]
query=query.split(")")[0]
splitted=query.split(".")
#see if we can find an appname
appname=""
if len(splitted)>1:
possibleappname=splitted[0]
if possibleappname in j.core.specparser.appnames:
appname=possibleappname
splitted=splitted[1:] #remove the already matched item
#see if we can find an actor
actorname=""
if len(splitted)>1:
possibleactor=splitted[0]
if possibleactor in j.core.specparser.actornames:
actorname=possibleactor
splitted=splitted[1:] #remove the already matched item
query=".".join(splitted)
if query.strip()<>"." and query.strip()<>"":
specname=query
if actorname=="" and spec<>None:
#no specificiation of actor or app so needs to be local to this spec
actorname=spec.actorname
if appname=="" and spec<>None:
#no specificiation of actor or app so needs to be local to this spec
appname=spec.appname
result=[]
if actorname==specname:
specname=""
else:
specname=specname
if actorname<>"" and appname<>"" and specname<>"" and type<>"":
key="%s_%s_%s_%s" % (type,appname,actorname,specname)
if j.core.specparser.specs.has_key(key):
result=[j.core.specparser.specs[key]]
else:
#not enough specified need to walk over all
for key in j.core.specparser.specs.keys():
spec=j.core.specparser.specs[key]
found=True
if actorname<>"" and spec.actorname<>actorname:
found=False
if appname<>"" and spec.appname<>appname:
found=False
if specname<>"" and spec.name<>specname:
found=False
if type<>"" and spec.type<>type:
found=False
if found:
result.append(spec)
if len(result)==0:
if spec<>None:
emsg="Could not find spec with query:%s appname:%s actorname:%s name:%s (spec info: %s_%s_%s)" % \
(query,appname,actorname,specname,spec.name,spec.specpath,spec.linenr)
else:
emsg="Could not find spec with query:%s appname:%s actorname:%s name:%s " % \
(query,appname,actorname,specname)
raise RuntimeError(emsg+" {category:specs.finderror}")
if findOnlyOne:
if len(result)<>1:
if spec<>None:
emsg="Found more than 1 spec for search query:%s appname:%s actorname:%s name:%s (spec info: %s_%s_%s)" % \
(query,appname,actorname,specname,spec.name,spec.specpath,spec.linenr)
else:
emsg="Found more than 1 spec for search query:%s appname:%s actorname:%s name:%s " % \
(query,appname,actorname,specname)
raise RuntimeError(emsg+" {category:specs.finderror}")
else:
result=result[0]
return result
def _getSpecFileParser(self,path,appname,actorname):
return SpecFileParser(path,appname,actorname)
def init(self):
self.__init__()
def removeSpecsForActor(self,appname,actorname):
appname=appname.lower()
actorname=actorname.lower()
if appname in self.appnames:
i=self.appnames.index(appname)
self.appnames.pop(i)
key="%s_%s"%(appname,actorname)
if key in self.actornames:
#found actor remove the specs
for key2 in self.specs.keys():
type,app,item,remaining=key2.split("_",3)
if app==appname and item.find(actorname)==0:
print "remove specs %s from memory" % key
self.specs.pop(key2)
i=self.actornames.index(key)
self.actornames.pop(i)
def resetMemNonSystem(self):
self.appnames=["system"]
for key2 in self.specs.keys():
type,app,item,remaining=key2.split("_",3)
if app<>"system":
self.specs.pop(key2)
for key in self.actornames:
appname,actorname=key.split("_",1)
if appname<>"system":
i=self.actornames.index(key)
self.actornames.pop(i)
def parseSpecs(self,specpath,appname,actorname):
"""
@param specpath if empty will look for path specs in current dir
"""
if not j.system.fs.exists(specpath):
raise RuntimeError("Cannot find specs on path %s"%specpath)
SpecDirParser(specpath,appname,actorname=actorname)
#generate specs for model actors
smg=SpecModelActorsGenerator(appname,actorname,specpath)
smg.generate()
#parse again to include the just generated specs
SpecDirParser(specpath,appname,actorname=actorname)
def getSpecFromTypeStr(self,appname,actorname,typestr):
"""
@param typestr e.g list(machine.status)
@return $returntype,$spec $returntype=list,dict,object,enum (list & dict can be of primitive types or objects (NOT enums))
"""
if typestr in ["int","str","float","bool"]:
return None,None
elif typestr.find("list")==0 or typestr.find("dict")==0:
if typestr.find("list")==0:
returntype="list"
else:
returntype="dict"
typestr=typestr.split("(")[1]
typestr=typestr.split(")")[0]
#print "typestr:%s" % typestr
else:
returntype="object"
if typestr in ["int","str","float","bool","list","dict"]:
spec=typestr
else:
result=self.getEnumerationSpec(appname,actorname,typestr,die=False)
if result==False:
result=self.getModelSpec(appname,actorname,typestr,die=False)
if result==False:
if returntype not in ["list","dict"]:
returntype="enum"
if result==False:
raise RuntimeError("Cannot find spec for app:%s, actor:%s, with typestr:%s" % (appname,actorname,typestr))
else:
spec=result
return returntype,spec
#raise RuntimeError("Could not find type:%s in getSpecFromTypeStr" % type) | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/chart.js/Chart.min.js | !function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e(function(){try{return require("moment")}catch(t){}}()):"function"==typeof define&&define.amd?define(["require"],(function(t){return e(function(){try{return t("moment")}catch(t){}}())})):(t=t||self).Chart=e(t.moment)}(this,(function(t){"use strict";t=t&&t.hasOwnProperty("default")?t.default:t;var e={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]},n=function(t,e){return t(e={exports:{}},e.exports),e.exports}((function(t){var n={};for(var i in e)e.hasOwnProperty(i)&&(n[e[i]]=i);var a=t.exports={rgb:{channels:3,labels:"rgb"},hsl:{channels:3,labels:"hsl"},hsv:{channels:3,labels:"hsv"},hwb:{channels:3,labels:"hwb"},cmyk:{channels:4,labels:"cmyk"},xyz:{channels:3,labels:"xyz"},lab:{channels:3,labels:"lab"},lch:{channels:3,labels:"lch"},hex:{channels:1,labels:["hex"]},keyword:{channels:1,labels:["keyword"]},ansi16:{channels:1,labels:["ansi16"]},ansi256:{channels:1,labels:["ansi256"]},hcg:{channels:3,labels:["h","c","g"]},apple:{channels:3,labels:["r16","g16","b16"]},gray:{channels:1,labels:["gray"]}};for(var r in a)if(a.hasOwnProperty(r)){if(!("channels"in a[r]))throw new Error("missing channels property: "+r);if(!("labels"in a[r]))throw new Error("missing channel labels property: "+r);if(a[r].labels.length!==a[r].channels)throw new Error("channel and label counts mismatch: "+r);var o=a[r].channels,s=a[r].labels;delete a[r].channels,delete a[r].labels,Object.defineProperty(a[r],"channels",{value:o}),Object.defineProperty(a[r],"labels",{value:s})}a.rgb.hsl=function(t){var e,n,i=t[0]/255,a=t[1]/255,r=t[2]/255,o=Math.min(i,a,r),s=Math.max(i,a,r),l=s-o;return s===o?e=0:i===s?e=(a-r)/l:a===s?e=2+(r-i)/l:r===s&&(e=4+(i-a)/l),(e=Math.min(60*e,360))<0&&(e+=360),n=(o+s)/2,[e,100*(s===o?0:n<=.5?l/(s+o):l/(2-s-o)),100*n]},a.rgb.hsv=function(t){var e,n,i,a,r,o=t[0]/255,s=t[1]/255,l=t[2]/255,u=Math.max(o,s,l),d=u-Math.min(o,s,l),h=function(t){return(u-t)/6/d+.5};return 0===d?a=r=0:(r=d/u,e=h(o),n=h(s),i=h(l),o===u?a=i-n:s===u?a=1/3+e-i:l===u&&(a=2/3+n-e),a<0?a+=1:a>1&&(a-=1)),[360*a,100*r,100*u]},a.rgb.hwb=function(t){var e=t[0],n=t[1],i=t[2];return[a.rgb.hsl(t)[0],100*(1/255*Math.min(e,Math.min(n,i))),100*(i=1-1/255*Math.max(e,Math.max(n,i)))]},a.rgb.cmyk=function(t){var e,n=t[0]/255,i=t[1]/255,a=t[2]/255;return[100*((1-n-(e=Math.min(1-n,1-i,1-a)))/(1-e)||0),100*((1-i-e)/(1-e)||0),100*((1-a-e)/(1-e)||0),100*e]},a.rgb.keyword=function(t){var i=n[t];if(i)return i;var a,r,o,s=1/0;for(var l in e)if(e.hasOwnProperty(l)){var u=e[l],d=(r=t,o=u,Math.pow(r[0]-o[0],2)+Math.pow(r[1]-o[1],2)+Math.pow(r[2]-o[2],2));d<s&&(s=d,a=l)}return a},a.keyword.rgb=function(t){return e[t]},a.rgb.xyz=function(t){var e=t[0]/255,n=t[1]/255,i=t[2]/255;return[100*(.4124*(e=e>.04045?Math.pow((e+.055)/1.055,2.4):e/12.92)+.3576*(n=n>.04045?Math.pow((n+.055)/1.055,2.4):n/12.92)+.1805*(i=i>.04045?Math.pow((i+.055)/1.055,2.4):i/12.92)),100*(.2126*e+.7152*n+.0722*i),100*(.0193*e+.1192*n+.9505*i)]},a.rgb.lab=function(t){var e=a.rgb.xyz(t),n=e[0],i=e[1],r=e[2];return i/=100,r/=108.883,n=(n/=95.047)>.008856?Math.pow(n,1/3):7.787*n+16/116,[116*(i=i>.008856?Math.pow(i,1/3):7.787*i+16/116)-16,500*(n-i),200*(i-(r=r>.008856?Math.pow(r,1/3):7.787*r+16/116))]},a.hsl.rgb=function(t){var e,n,i,a,r,o=t[0]/360,s=t[1]/100,l=t[2]/100;if(0===s)return[r=255*l,r,r];e=2*l-(n=l<.5?l*(1+s):l+s-l*s),a=[0,0,0];for(var u=0;u<3;u++)(i=o+1/3*-(u-1))<0&&i++,i>1&&i--,r=6*i<1?e+6*(n-e)*i:2*i<1?n:3*i<2?e+(n-e)*(2/3-i)*6:e,a[u]=255*r;return a},a.hsl.hsv=function(t){var e=t[0],n=t[1]/100,i=t[2]/100,a=n,r=Math.max(i,.01);return n*=(i*=2)<=1?i:2-i,a*=r<=1?r:2-r,[e,100*(0===i?2*a/(r+a):2*n/(i+n)),100*((i+n)/2)]},a.hsv.rgb=function(t){var e=t[0]/60,n=t[1]/100,i=t[2]/100,a=Math.floor(e)%6,r=e-Math.floor(e),o=255*i*(1-n),s=255*i*(1-n*r),l=255*i*(1-n*(1-r));switch(i*=255,a){case 0:return[i,l,o];case 1:return[s,i,o];case 2:return[o,i,l];case 3:return[o,s,i];case 4:return[l,o,i];case 5:return[i,o,s]}},a.hsv.hsl=function(t){var e,n,i,a=t[0],r=t[1]/100,o=t[2]/100,s=Math.max(o,.01);return i=(2-r)*o,n=r*s,[a,100*(n=(n/=(e=(2-r)*s)<=1?e:2-e)||0),100*(i/=2)]},a.hwb.rgb=function(t){var e,n,i,a,r,o,s,l=t[0]/360,u=t[1]/100,d=t[2]/100,h=u+d;switch(h>1&&(u/=h,d/=h),i=6*l-(e=Math.floor(6*l)),0!=(1&e)&&(i=1-i),a=u+i*((n=1-d)-u),e){default:case 6:case 0:r=n,o=a,s=u;break;case 1:r=a,o=n,s=u;break;case 2:r=u,o=n,s=a;break;case 3:r=u,o=a,s=n;break;case 4:r=a,o=u,s=n;break;case 5:r=n,o=u,s=a}return[255*r,255*o,255*s]},a.cmyk.rgb=function(t){var e=t[0]/100,n=t[1]/100,i=t[2]/100,a=t[3]/100;return[255*(1-Math.min(1,e*(1-a)+a)),255*(1-Math.min(1,n*(1-a)+a)),255*(1-Math.min(1,i*(1-a)+a))]},a.xyz.rgb=function(t){var e,n,i,a=t[0]/100,r=t[1]/100,o=t[2]/100;return n=-.9689*a+1.8758*r+.0415*o,i=.0557*a+-.204*r+1.057*o,e=(e=3.2406*a+-1.5372*r+-.4986*o)>.0031308?1.055*Math.pow(e,1/2.4)-.055:12.92*e,n=n>.0031308?1.055*Math.pow(n,1/2.4)-.055:12.92*n,i=i>.0031308?1.055*Math.pow(i,1/2.4)-.055:12.92*i,[255*(e=Math.min(Math.max(0,e),1)),255*(n=Math.min(Math.max(0,n),1)),255*(i=Math.min(Math.max(0,i),1))]},a.xyz.lab=function(t){var e=t[0],n=t[1],i=t[2];return n/=100,i/=108.883,e=(e/=95.047)>.008856?Math.pow(e,1/3):7.787*e+16/116,[116*(n=n>.008856?Math.pow(n,1/3):7.787*n+16/116)-16,500*(e-n),200*(n-(i=i>.008856?Math.pow(i,1/3):7.787*i+16/116))]},a.lab.xyz=function(t){var e,n,i,a=t[0];e=t[1]/500+(n=(a+16)/116),i=n-t[2]/200;var r=Math.pow(n,3),o=Math.pow(e,3),s=Math.pow(i,3);return n=r>.008856?r:(n-16/116)/7.787,e=o>.008856?o:(e-16/116)/7.787,i=s>.008856?s:(i-16/116)/7.787,[e*=95.047,n*=100,i*=108.883]},a.lab.lch=function(t){var e,n=t[0],i=t[1],a=t[2];return(e=360*Math.atan2(a,i)/2/Math.PI)<0&&(e+=360),[n,Math.sqrt(i*i+a*a),e]},a.lch.lab=function(t){var e,n=t[0],i=t[1];return e=t[2]/360*2*Math.PI,[n,i*Math.cos(e),i*Math.sin(e)]},a.rgb.ansi16=function(t){var e=t[0],n=t[1],i=t[2],r=1 in arguments?arguments[1]:a.rgb.hsv(t)[2];if(0===(r=Math.round(r/50)))return 30;var o=30+(Math.round(i/255)<<2|Math.round(n/255)<<1|Math.round(e/255));return 2===r&&(o+=60),o},a.hsv.ansi16=function(t){return a.rgb.ansi16(a.hsv.rgb(t),t[2])},a.rgb.ansi256=function(t){var e=t[0],n=t[1],i=t[2];return e===n&&n===i?e<8?16:e>248?231:Math.round((e-8)/247*24)+232:16+36*Math.round(e/255*5)+6*Math.round(n/255*5)+Math.round(i/255*5)},a.ansi16.rgb=function(t){var e=t%10;if(0===e||7===e)return t>50&&(e+=3.5),[e=e/10.5*255,e,e];var n=.5*(1+~~(t>50));return[(1&e)*n*255,(e>>1&1)*n*255,(e>>2&1)*n*255]},a.ansi256.rgb=function(t){if(t>=232){var e=10*(t-232)+8;return[e,e,e]}var n;return t-=16,[Math.floor(t/36)/5*255,Math.floor((n=t%36)/6)/5*255,n%6/5*255]},a.rgb.hex=function(t){var e=(((255&Math.round(t[0]))<<16)+((255&Math.round(t[1]))<<8)+(255&Math.round(t[2]))).toString(16).toUpperCase();return"000000".substring(e.length)+e},a.hex.rgb=function(t){var e=t.toString(16).match(/[a-f0-9]{6}|[a-f0-9]{3}/i);if(!e)return[0,0,0];var n=e[0];3===e[0].length&&(n=n.split("").map((function(t){return t+t})).join(""));var i=parseInt(n,16);return[i>>16&255,i>>8&255,255&i]},a.rgb.hcg=function(t){var e,n=t[0]/255,i=t[1]/255,a=t[2]/255,r=Math.max(Math.max(n,i),a),o=Math.min(Math.min(n,i),a),s=r-o;return e=s<=0?0:r===n?(i-a)/s%6:r===i?2+(a-n)/s:4+(n-i)/s+4,e/=6,[360*(e%=1),100*s,100*(s<1?o/(1-s):0)]},a.hsl.hcg=function(t){var e=t[1]/100,n=t[2]/100,i=1,a=0;return(i=n<.5?2*e*n:2*e*(1-n))<1&&(a=(n-.5*i)/(1-i)),[t[0],100*i,100*a]},a.hsv.hcg=function(t){var e=t[1]/100,n=t[2]/100,i=e*n,a=0;return i<1&&(a=(n-i)/(1-i)),[t[0],100*i,100*a]},a.hcg.rgb=function(t){var e=t[0]/360,n=t[1]/100,i=t[2]/100;if(0===n)return[255*i,255*i,255*i];var a,r=[0,0,0],o=e%1*6,s=o%1,l=1-s;switch(Math.floor(o)){case 0:r[0]=1,r[1]=s,r[2]=0;break;case 1:r[0]=l,r[1]=1,r[2]=0;break;case 2:r[0]=0,r[1]=1,r[2]=s;break;case 3:r[0]=0,r[1]=l,r[2]=1;break;case 4:r[0]=s,r[1]=0,r[2]=1;break;default:r[0]=1,r[1]=0,r[2]=l}return a=(1-n)*i,[255*(n*r[0]+a),255*(n*r[1]+a),255*(n*r[2]+a)]},a.hcg.hsv=function(t){var e=t[1]/100,n=e+t[2]/100*(1-e),i=0;return n>0&&(i=e/n),[t[0],100*i,100*n]},a.hcg.hsl=function(t){var e=t[1]/100,n=t[2]/100*(1-e)+.5*e,i=0;return n>0&&n<.5?i=e/(2*n):n>=.5&&n<1&&(i=e/(2*(1-n))),[t[0],100*i,100*n]},a.hcg.hwb=function(t){var e=t[1]/100,n=e+t[2]/100*(1-e);return[t[0],100*(n-e),100*(1-n)]},a.hwb.hcg=function(t){var e=t[1]/100,n=1-t[2]/100,i=n-e,a=0;return i<1&&(a=(n-i)/(1-i)),[t[0],100*i,100*a]},a.apple.rgb=function(t){return[t[0]/65535*255,t[1]/65535*255,t[2]/65535*255]},a.rgb.apple=function(t){return[t[0]/255*65535,t[1]/255*65535,t[2]/255*65535]},a.gray.rgb=function(t){return[t[0]/100*255,t[0]/100*255,t[0]/100*255]},a.gray.hsl=a.gray.hsv=function(t){return[0,0,t[0]]},a.gray.hwb=function(t){return[0,100,t[0]]},a.gray.cmyk=function(t){return[0,0,0,t[0]]},a.gray.lab=function(t){return[t[0],0,0]},a.gray.hex=function(t){var e=255&Math.round(t[0]/100*255),n=((e<<16)+(e<<8)+e).toString(16).toUpperCase();return"000000".substring(n.length)+n},a.rgb.gray=function(t){return[(t[0]+t[1]+t[2])/3/255*100]}}));n.rgb,n.hsl,n.hsv,n.hwb,n.cmyk,n.xyz,n.lab,n.lch,n.hex,n.keyword,n.ansi16,n.ansi256,n.hcg,n.apple,n.gray;function i(t){var e=function(){for(var t={},e=Object.keys(n),i=e.length,a=0;a<i;a++)t[e[a]]={distance:-1,parent:null};return t}(),i=[t];for(e[t].distance=0;i.length;)for(var a=i.pop(),r=Object.keys(n[a]),o=r.length,s=0;s<o;s++){var l=r[s],u=e[l];-1===u.distance&&(u.distance=e[a].distance+1,u.parent=a,i.unshift(l))}return e}function a(t,e){return function(n){return e(t(n))}}function r(t,e){for(var i=[e[t].parent,t],r=n[e[t].parent][t],o=e[t].parent;e[o].parent;)i.unshift(e[o].parent),r=a(n[e[o].parent][o],r),o=e[o].parent;return r.conversion=i,r}var o={};Object.keys(n).forEach((function(t){o[t]={},Object.defineProperty(o[t],"channels",{value:n[t].channels}),Object.defineProperty(o[t],"labels",{value:n[t].labels});var e=function(t){for(var e=i(t),n={},a=Object.keys(e),o=a.length,s=0;s<o;s++){var l=a[s];null!==e[l].parent&&(n[l]=r(l,e))}return n}(t);Object.keys(e).forEach((function(n){var i=e[n];o[t][n]=function(t){var e=function(e){if(null==e)return e;arguments.length>1&&(e=Array.prototype.slice.call(arguments));var n=t(e);if("object"==typeof n)for(var i=n.length,a=0;a<i;a++)n[a]=Math.round(n[a]);return n};return"conversion"in t&&(e.conversion=t.conversion),e}(i),o[t][n].raw=function(t){var e=function(e){return null==e?e:(arguments.length>1&&(e=Array.prototype.slice.call(arguments)),t(e))};return"conversion"in t&&(e.conversion=t.conversion),e}(i)}))}));var s=o,l={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]},u={getRgba:d,getHsla:h,getRgb:function(t){var e=d(t);return e&&e.slice(0,3)},getHsl:function(t){var e=h(t);return e&&e.slice(0,3)},getHwb:c,getAlpha:function(t){var e=d(t);if(e)return e[3];if(e=h(t))return e[3];if(e=c(t))return e[3]},hexString:function(t,e){e=void 0!==e&&3===t.length?e:t[3];return"#"+v(t[0])+v(t[1])+v(t[2])+(e>=0&&e<1?v(Math.round(255*e)):"")},rgbString:function(t,e){if(e<1||t[3]&&t[3]<1)return f(t,e);return"rgb("+t[0]+", "+t[1]+", "+t[2]+")"},rgbaString:f,percentString:function(t,e){if(e<1||t[3]&&t[3]<1)return g(t,e);var n=Math.round(t[0]/255*100),i=Math.round(t[1]/255*100),a=Math.round(t[2]/255*100);return"rgb("+n+"%, "+i+"%, "+a+"%)"},percentaString:g,hslString:function(t,e){if(e<1||t[3]&&t[3]<1)return p(t,e);return"hsl("+t[0]+", "+t[1]+"%, "+t[2]+"%)"},hslaString:p,hwbString:function(t,e){void 0===e&&(e=void 0!==t[3]?t[3]:1);return"hwb("+t[0]+", "+t[1]+"%, "+t[2]+"%"+(void 0!==e&&1!==e?", "+e:"")+")"},keyword:function(t){return b[t.slice(0,3)]}};function d(t){if(t){var e=[0,0,0],n=1,i=t.match(/^#([a-fA-F0-9]{3,4})$/i),a="";if(i){a=(i=i[1])[3];for(var r=0;r<e.length;r++)e[r]=parseInt(i[r]+i[r],16);a&&(n=Math.round(parseInt(a+a,16)/255*100)/100)}else if(i=t.match(/^#([a-fA-F0-9]{6}([a-fA-F0-9]{2})?)$/i)){a=i[2],i=i[1];for(r=0;r<e.length;r++)e[r]=parseInt(i.slice(2*r,2*r+2),16);a&&(n=Math.round(parseInt(a,16)/255*100)/100)}else if(i=t.match(/^rgba?\(\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)$/i)){for(r=0;r<e.length;r++)e[r]=parseInt(i[r+1]);n=parseFloat(i[4])}else if(i=t.match(/^rgba?\(\s*([+-]?[\d\.]+)\%\s*,\s*([+-]?[\d\.]+)\%\s*,\s*([+-]?[\d\.]+)\%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)$/i)){for(r=0;r<e.length;r++)e[r]=Math.round(2.55*parseFloat(i[r+1]));n=parseFloat(i[4])}else if(i=t.match(/(\w+)/)){if("transparent"==i[1])return[0,0,0,0];if(!(e=l[i[1]]))return}for(r=0;r<e.length;r++)e[r]=m(e[r],0,255);return n=n||0==n?m(n,0,1):1,e[3]=n,e}}function h(t){if(t){var e=t.match(/^hsla?\(\s*([+-]?\d+)(?:deg)?\s*,\s*([+-]?[\d\.]+)%\s*,\s*([+-]?[\d\.]+)%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)/);if(e){var n=parseFloat(e[4]);return[m(parseInt(e[1]),0,360),m(parseFloat(e[2]),0,100),m(parseFloat(e[3]),0,100),m(isNaN(n)?1:n,0,1)]}}}function c(t){if(t){var e=t.match(/^hwb\(\s*([+-]?\d+)(?:deg)?\s*,\s*([+-]?[\d\.]+)%\s*,\s*([+-]?[\d\.]+)%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)/);if(e){var n=parseFloat(e[4]);return[m(parseInt(e[1]),0,360),m(parseFloat(e[2]),0,100),m(parseFloat(e[3]),0,100),m(isNaN(n)?1:n,0,1)]}}}function f(t,e){return void 0===e&&(e=void 0!==t[3]?t[3]:1),"rgba("+t[0]+", "+t[1]+", "+t[2]+", "+e+")"}function g(t,e){return"rgba("+Math.round(t[0]/255*100)+"%, "+Math.round(t[1]/255*100)+"%, "+Math.round(t[2]/255*100)+"%, "+(e||t[3]||1)+")"}function p(t,e){return void 0===e&&(e=void 0!==t[3]?t[3]:1),"hsla("+t[0]+", "+t[1]+"%, "+t[2]+"%, "+e+")"}function m(t,e,n){return Math.min(Math.max(e,t),n)}function v(t){var e=t.toString(16).toUpperCase();return e.length<2?"0"+e:e}var b={};for(var x in l)b[l[x]]=x;var y=function(t){return t instanceof y?t:this instanceof y?(this.valid=!1,this.values={rgb:[0,0,0],hsl:[0,0,0],hsv:[0,0,0],hwb:[0,0,0],cmyk:[0,0,0,0],alpha:1},void("string"==typeof t?(e=u.getRgba(t))?this.setValues("rgb",e):(e=u.getHsla(t))?this.setValues("hsl",e):(e=u.getHwb(t))&&this.setValues("hwb",e):"object"==typeof t&&(void 0!==(e=t).r||void 0!==e.red?this.setValues("rgb",e):void 0!==e.l||void 0!==e.lightness?this.setValues("hsl",e):void 0!==e.v||void 0!==e.value?this.setValues("hsv",e):void 0!==e.w||void 0!==e.whiteness?this.setValues("hwb",e):void 0===e.c&&void 0===e.cyan||this.setValues("cmyk",e)))):new y(t);var e};y.prototype={isValid:function(){return this.valid},rgb:function(){return this.setSpace("rgb",arguments)},hsl:function(){return this.setSpace("hsl",arguments)},hsv:function(){return this.setSpace("hsv",arguments)},hwb:function(){return this.setSpace("hwb",arguments)},cmyk:function(){return this.setSpace("cmyk",arguments)},rgbArray:function(){return this.values.rgb},hslArray:function(){return this.values.hsl},hsvArray:function(){return this.values.hsv},hwbArray:function(){var t=this.values;return 1!==t.alpha?t.hwb.concat([t.alpha]):t.hwb},cmykArray:function(){return this.values.cmyk},rgbaArray:function(){var t=this.values;return t.rgb.concat([t.alpha])},hslaArray:function(){var t=this.values;return t.hsl.concat([t.alpha])},alpha:function(t){return void 0===t?this.values.alpha:(this.setValues("alpha",t),this)},red:function(t){return this.setChannel("rgb",0,t)},green:function(t){return this.setChannel("rgb",1,t)},blue:function(t){return this.setChannel("rgb",2,t)},hue:function(t){return t&&(t=(t%=360)<0?360+t:t),this.setChannel("hsl",0,t)},saturation:function(t){return this.setChannel("hsl",1,t)},lightness:function(t){return this.setChannel("hsl",2,t)},saturationv:function(t){return this.setChannel("hsv",1,t)},whiteness:function(t){return this.setChannel("hwb",1,t)},blackness:function(t){return this.setChannel("hwb",2,t)},value:function(t){return this.setChannel("hsv",2,t)},cyan:function(t){return this.setChannel("cmyk",0,t)},magenta:function(t){return this.setChannel("cmyk",1,t)},yellow:function(t){return this.setChannel("cmyk",2,t)},black:function(t){return this.setChannel("cmyk",3,t)},hexString:function(){return u.hexString(this.values.rgb)},rgbString:function(){return u.rgbString(this.values.rgb,this.values.alpha)},rgbaString:function(){return u.rgbaString(this.values.rgb,this.values.alpha)},percentString:function(){return u.percentString(this.values.rgb,this.values.alpha)},hslString:function(){return u.hslString(this.values.hsl,this.values.alpha)},hslaString:function(){return u.hslaString(this.values.hsl,this.values.alpha)},hwbString:function(){return u.hwbString(this.values.hwb,this.values.alpha)},keyword:function(){return u.keyword(this.values.rgb,this.values.alpha)},rgbNumber:function(){var t=this.values.rgb;return t[0]<<16|t[1]<<8|t[2]},luminosity:function(){for(var t=this.values.rgb,e=[],n=0;n<t.length;n++){var i=t[n]/255;e[n]=i<=.03928?i/12.92:Math.pow((i+.055)/1.055,2.4)}return.2126*e[0]+.7152*e[1]+.0722*e[2]},contrast:function(t){var e=this.luminosity(),n=t.luminosity();return e>n?(e+.05)/(n+.05):(n+.05)/(e+.05)},level:function(t){var e=this.contrast(t);return e>=7.1?"AAA":e>=4.5?"AA":""},dark:function(){var t=this.values.rgb;return(299*t[0]+587*t[1]+114*t[2])/1e3<128},light:function(){return!this.dark()},negate:function(){for(var t=[],e=0;e<3;e++)t[e]=255-this.values.rgb[e];return this.setValues("rgb",t),this},lighten:function(t){var e=this.values.hsl;return e[2]+=e[2]*t,this.setValues("hsl",e),this},darken:function(t){var e=this.values.hsl;return e[2]-=e[2]*t,this.setValues("hsl",e),this},saturate:function(t){var e=this.values.hsl;return e[1]+=e[1]*t,this.setValues("hsl",e),this},desaturate:function(t){var e=this.values.hsl;return e[1]-=e[1]*t,this.setValues("hsl",e),this},whiten:function(t){var e=this.values.hwb;return e[1]+=e[1]*t,this.setValues("hwb",e),this},blacken:function(t){var e=this.values.hwb;return e[2]+=e[2]*t,this.setValues("hwb",e),this},greyscale:function(){var t=this.values.rgb,e=.3*t[0]+.59*t[1]+.11*t[2];return this.setValues("rgb",[e,e,e]),this},clearer:function(t){var e=this.values.alpha;return this.setValues("alpha",e-e*t),this},opaquer:function(t){var e=this.values.alpha;return this.setValues("alpha",e+e*t),this},rotate:function(t){var e=this.values.hsl,n=(e[0]+t)%360;return e[0]=n<0?360+n:n,this.setValues("hsl",e),this},mix:function(t,e){var n=t,i=void 0===e?.5:e,a=2*i-1,r=this.alpha()-n.alpha(),o=((a*r==-1?a:(a+r)/(1+a*r))+1)/2,s=1-o;return this.rgb(o*this.red()+s*n.red(),o*this.green()+s*n.green(),o*this.blue()+s*n.blue()).alpha(this.alpha()*i+n.alpha()*(1-i))},toJSON:function(){return this.rgb()},clone:function(){var t,e,n=new y,i=this.values,a=n.values;for(var r in i)i.hasOwnProperty(r)&&(t=i[r],"[object Array]"===(e={}.toString.call(t))?a[r]=t.slice(0):"[object Number]"===e?a[r]=t:console.error("unexpected color value:",t));return n}},y.prototype.spaces={rgb:["red","green","blue"],hsl:["hue","saturation","lightness"],hsv:["hue","saturation","value"],hwb:["hue","whiteness","blackness"],cmyk:["cyan","magenta","yellow","black"]},y.prototype.maxes={rgb:[255,255,255],hsl:[360,100,100],hsv:[360,100,100],hwb:[360,100,100],cmyk:[100,100,100,100]},y.prototype.getValues=function(t){for(var e=this.values,n={},i=0;i<t.length;i++)n[t.charAt(i)]=e[t][i];return 1!==e.alpha&&(n.a=e.alpha),n},y.prototype.setValues=function(t,e){var n,i,a=this.values,r=this.spaces,o=this.maxes,l=1;if(this.valid=!0,"alpha"===t)l=e;else if(e.length)a[t]=e.slice(0,t.length),l=e[t.length];else if(void 0!==e[t.charAt(0)]){for(n=0;n<t.length;n++)a[t][n]=e[t.charAt(n)];l=e.a}else if(void 0!==e[r[t][0]]){var u=r[t];for(n=0;n<t.length;n++)a[t][n]=e[u[n]];l=e.alpha}if(a.alpha=Math.max(0,Math.min(1,void 0===l?a.alpha:l)),"alpha"===t)return!1;for(n=0;n<t.length;n++)i=Math.max(0,Math.min(o[t][n],a[t][n])),a[t][n]=Math.round(i);for(var d in r)d!==t&&(a[d]=s[t][d](a[t]));return!0},y.prototype.setSpace=function(t,e){var n=e[0];return void 0===n?this.getValues(t):("number"==typeof n&&(n=Array.prototype.slice.call(e)),this.setValues(t,n),this)},y.prototype.setChannel=function(t,e,n){var i=this.values[t];return void 0===n?i[e]:n===i[e]?this:(i[e]=n,this.setValues(t,i),this)},"undefined"!=typeof window&&(window.Color=y);var _=y;function k(t){return-1===["__proto__","prototype","constructor"].indexOf(t)}var w,M={noop:function(){},uid:(w=0,function(){return w++}),isNullOrUndef:function(t){return null==t},isArray:function(t){if(Array.isArray&&Array.isArray(t))return!0;var e=Object.prototype.toString.call(t);return"[object"===e.substr(0,7)&&"Array]"===e.substr(-6)},isObject:function(t){return null!==t&&"[object Object]"===Object.prototype.toString.call(t)},isFinite:function(t){return("number"==typeof t||t instanceof Number)&&isFinite(t)},valueOrDefault:function(t,e){return void 0===t?e:t},valueAtIndexOrDefault:function(t,e,n){return M.valueOrDefault(M.isArray(t)?t[e]:t,n)},callback:function(t,e,n){if(t&&"function"==typeof t.call)return t.apply(n,e)},each:function(t,e,n,i){var a,r,o;if(M.isArray(t))if(r=t.length,i)for(a=r-1;a>=0;a--)e.call(n,t[a],a);else for(a=0;a<r;a++)e.call(n,t[a],a);else if(M.isObject(t))for(r=(o=Object.keys(t)).length,a=0;a<r;a++)e.call(n,t[o[a]],o[a])},arrayEquals:function(t,e){var n,i,a,r;if(!t||!e||t.length!==e.length)return!1;for(n=0,i=t.length;n<i;++n)if(a=t[n],r=e[n],a instanceof Array&&r instanceof Array){if(!M.arrayEquals(a,r))return!1}else if(a!==r)return!1;return!0},clone:function(t){if(M.isArray(t))return t.map(M.clone);if(M.isObject(t)){for(var e=Object.create(t),n=Object.keys(t),i=n.length,a=0;a<i;++a)e[n[a]]=M.clone(t[n[a]]);return e}return t},_merger:function(t,e,n,i){if(k(t)){var a=e[t],r=n[t];M.isObject(a)&&M.isObject(r)?M.merge(a,r,i):e[t]=M.clone(r)}},_mergerIf:function(t,e,n){if(k(t)){var i=e[t],a=n[t];M.isObject(i)&&M.isObject(a)?M.mergeIf(i,a):e.hasOwnProperty(t)||(e[t]=M.clone(a))}},merge:function(t,e,n){var i,a,r,o,s,l=M.isArray(e)?e:[e],u=l.length;if(!M.isObject(t))return t;for(i=(n=n||{}).merger||M._merger,a=0;a<u;++a)if(e=l[a],M.isObject(e))for(s=0,o=(r=Object.keys(e)).length;s<o;++s)i(r[s],t,e,n);return t},mergeIf:function(t,e){return M.merge(t,e,{merger:M._mergerIf})},extend:Object.assign||function(t){return M.merge(t,[].slice.call(arguments,1),{merger:function(t,e,n){e[t]=n[t]}})},inherits:function(t){var e=this,n=t&&t.hasOwnProperty("constructor")?t.constructor:function(){return e.apply(this,arguments)},i=function(){this.constructor=n};return i.prototype=e.prototype,n.prototype=new i,n.extend=M.inherits,t&&M.extend(n.prototype,t),n.__super__=e.prototype,n},_deprecated:function(t,e,n,i){void 0!==e&&console.warn(t+': "'+n+'" is deprecated. Please use "'+i+'" instead')}},S=M;M.callCallback=M.callback,M.indexOf=function(t,e,n){return Array.prototype.indexOf.call(t,e,n)},M.getValueOrDefault=M.valueOrDefault,M.getValueAtIndexOrDefault=M.valueAtIndexOrDefault;var C={linear:function(t){return t},easeInQuad:function(t){return t*t},easeOutQuad:function(t){return-t*(t-2)},easeInOutQuad:function(t){return(t/=.5)<1?.5*t*t:-.5*(--t*(t-2)-1)},easeInCubic:function(t){return t*t*t},easeOutCubic:function(t){return(t-=1)*t*t+1},easeInOutCubic:function(t){return(t/=.5)<1?.5*t*t*t:.5*((t-=2)*t*t+2)},easeInQuart:function(t){return t*t*t*t},easeOutQuart:function(t){return-((t-=1)*t*t*t-1)},easeInOutQuart:function(t){return(t/=.5)<1?.5*t*t*t*t:-.5*((t-=2)*t*t*t-2)},easeInQuint:function(t){return t*t*t*t*t},easeOutQuint:function(t){return(t-=1)*t*t*t*t+1},easeInOutQuint:function(t){return(t/=.5)<1?.5*t*t*t*t*t:.5*((t-=2)*t*t*t*t+2)},easeInSine:function(t){return 1-Math.cos(t*(Math.PI/2))},easeOutSine:function(t){return Math.sin(t*(Math.PI/2))},easeInOutSine:function(t){return-.5*(Math.cos(Math.PI*t)-1)},easeInExpo:function(t){return 0===t?0:Math.pow(2,10*(t-1))},easeOutExpo:function(t){return 1===t?1:1-Math.pow(2,-10*t)},easeInOutExpo:function(t){return 0===t?0:1===t?1:(t/=.5)<1?.5*Math.pow(2,10*(t-1)):.5*(2-Math.pow(2,-10*--t))},easeInCirc:function(t){return t>=1?t:-(Math.sqrt(1-t*t)-1)},easeOutCirc:function(t){return Math.sqrt(1-(t-=1)*t)},easeInOutCirc:function(t){return(t/=.5)<1?-.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1)},easeInElastic:function(t){var e=1.70158,n=0,i=1;return 0===t?0:1===t?1:(n||(n=.3),i<1?(i=1,e=n/4):e=n/(2*Math.PI)*Math.asin(1/i),-i*Math.pow(2,10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/n))},easeOutElastic:function(t){var e=1.70158,n=0,i=1;return 0===t?0:1===t?1:(n||(n=.3),i<1?(i=1,e=n/4):e=n/(2*Math.PI)*Math.asin(1/i),i*Math.pow(2,-10*t)*Math.sin((t-e)*(2*Math.PI)/n)+1)},easeInOutElastic:function(t){var e=1.70158,n=0,i=1;return 0===t?0:2==(t/=.5)?1:(n||(n=.45),i<1?(i=1,e=n/4):e=n/(2*Math.PI)*Math.asin(1/i),t<1?i*Math.pow(2,10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/n)*-.5:i*Math.pow(2,-10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/n)*.5+1)},easeInBack:function(t){var e=1.70158;return t*t*((e+1)*t-e)},easeOutBack:function(t){var e=1.70158;return(t-=1)*t*((e+1)*t+e)+1},easeInOutBack:function(t){var e=1.70158;return(t/=.5)<1?t*t*((1+(e*=1.525))*t-e)*.5:.5*((t-=2)*t*((1+(e*=1.525))*t+e)+2)},easeInBounce:function(t){return 1-C.easeOutBounce(1-t)},easeOutBounce:function(t){return t<1/2.75?7.5625*t*t:t<2/2.75?7.5625*(t-=1.5/2.75)*t+.75:t<2.5/2.75?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375},easeInOutBounce:function(t){return t<.5?.5*C.easeInBounce(2*t):.5*C.easeOutBounce(2*t-1)+.5}},P={effects:C};S.easingEffects=C;var A=Math.PI,D=A/180,T=2*A,I=A/2,F=A/4,O=2*A/3,L={clear:function(t){t.ctx.clearRect(0,0,t.width,t.height)},roundedRect:function(t,e,n,i,a,r){if(r){var o=Math.min(r,a/2,i/2),s=e+o,l=n+o,u=e+i-o,d=n+a-o;t.moveTo(e,l),s<u&&l<d?(t.arc(s,l,o,-A,-I),t.arc(u,l,o,-I,0),t.arc(u,d,o,0,I),t.arc(s,d,o,I,A)):s<u?(t.moveTo(s,n),t.arc(u,l,o,-I,I),t.arc(s,l,o,I,A+I)):l<d?(t.arc(s,l,o,-A,0),t.arc(s,d,o,0,A)):t.arc(s,l,o,-A,A),t.closePath(),t.moveTo(e,n)}else t.rect(e,n,i,a)},drawPoint:function(t,e,n,i,a,r){var o,s,l,u,d,h=(r||0)*D;if(e&&"object"==typeof e&&("[object HTMLImageElement]"===(o=e.toString())||"[object HTMLCanvasElement]"===o))return t.save(),t.translate(i,a),t.rotate(h),t.drawImage(e,-e.width/2,-e.height/2,e.width,e.height),void t.restore();if(!(isNaN(n)||n<=0)){switch(t.beginPath(),e){default:t.arc(i,a,n,0,T),t.closePath();break;case"triangle":t.moveTo(i+Math.sin(h)*n,a-Math.cos(h)*n),h+=O,t.lineTo(i+Math.sin(h)*n,a-Math.cos(h)*n),h+=O,t.lineTo(i+Math.sin(h)*n,a-Math.cos(h)*n),t.closePath();break;case"rectRounded":u=n-(d=.516*n),s=Math.cos(h+F)*u,l=Math.sin(h+F)*u,t.arc(i-s,a-l,d,h-A,h-I),t.arc(i+l,a-s,d,h-I,h),t.arc(i+s,a+l,d,h,h+I),t.arc(i-l,a+s,d,h+I,h+A),t.closePath();break;case"rect":if(!r){u=Math.SQRT1_2*n,t.rect(i-u,a-u,2*u,2*u);break}h+=F;case"rectRot":s=Math.cos(h)*n,l=Math.sin(h)*n,t.moveTo(i-s,a-l),t.lineTo(i+l,a-s),t.lineTo(i+s,a+l),t.lineTo(i-l,a+s),t.closePath();break;case"crossRot":h+=F;case"cross":s=Math.cos(h)*n,l=Math.sin(h)*n,t.moveTo(i-s,a-l),t.lineTo(i+s,a+l),t.moveTo(i+l,a-s),t.lineTo(i-l,a+s);break;case"star":s=Math.cos(h)*n,l=Math.sin(h)*n,t.moveTo(i-s,a-l),t.lineTo(i+s,a+l),t.moveTo(i+l,a-s),t.lineTo(i-l,a+s),h+=F,s=Math.cos(h)*n,l=Math.sin(h)*n,t.moveTo(i-s,a-l),t.lineTo(i+s,a+l),t.moveTo(i+l,a-s),t.lineTo(i-l,a+s);break;case"line":s=Math.cos(h)*n,l=Math.sin(h)*n,t.moveTo(i-s,a-l),t.lineTo(i+s,a+l);break;case"dash":t.moveTo(i,a),t.lineTo(i+Math.cos(h)*n,a+Math.sin(h)*n)}t.fill(),t.stroke()}},_isPointInArea:function(t,e){return t.x>e.left-1e-6&&t.x<e.right+1e-6&&t.y>e.top-1e-6&&t.y<e.bottom+1e-6},clipArea:function(t,e){t.save(),t.beginPath(),t.rect(e.left,e.top,e.right-e.left,e.bottom-e.top),t.clip()},unclipArea:function(t){t.restore()},lineTo:function(t,e,n,i){var a=n.steppedLine;if(a){if("middle"===a){var r=(e.x+n.x)/2;t.lineTo(r,i?n.y:e.y),t.lineTo(r,i?e.y:n.y)}else"after"===a&&!i||"after"!==a&&i?t.lineTo(e.x,n.y):t.lineTo(n.x,e.y);t.lineTo(n.x,n.y)}else n.tension?t.bezierCurveTo(i?e.controlPointPreviousX:e.controlPointNextX,i?e.controlPointPreviousY:e.controlPointNextY,i?n.controlPointNextX:n.controlPointPreviousX,i?n.controlPointNextY:n.controlPointPreviousY,n.x,n.y):t.lineTo(n.x,n.y)}},R=L;S.clear=L.clear,S.drawRoundedRectangle=function(t){t.beginPath(),L.roundedRect.apply(L,arguments)};var z={_set:function(t,e){return S.merge(this[t]||(this[t]={}),e)}};z._set("global",{defaultColor:"rgba(0,0,0,0.1)",defaultFontColor:"#666",defaultFontFamily:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",defaultFontSize:12,defaultFontStyle:"normal",defaultLineHeight:1.2,showLines:!0});var N=z,B=S.valueOrDefault;var E={toLineHeight:function(t,e){var n=(""+t).match(/^(normal|(\d+(?:\.\d+)?)(px|em|%)?)$/);if(!n||"normal"===n[1])return 1.2*e;switch(t=+n[2],n[3]){case"px":return t;case"%":t/=100}return e*t},toPadding:function(t){var e,n,i,a;return S.isObject(t)?(e=+t.top||0,n=+t.right||0,i=+t.bottom||0,a=+t.left||0):e=n=i=a=+t||0,{top:e,right:n,bottom:i,left:a,height:e+i,width:a+n}},_parseFont:function(t){var e=N.global,n=B(t.fontSize,e.defaultFontSize),i={family:B(t.fontFamily,e.defaultFontFamily),lineHeight:S.options.toLineHeight(B(t.lineHeight,e.defaultLineHeight),n),size:n,style:B(t.fontStyle,e.defaultFontStyle),weight:null,string:""};return i.string=function(t){return!t||S.isNullOrUndef(t.size)||S.isNullOrUndef(t.family)?null:(t.style?t.style+" ":"")+(t.weight?t.weight+" ":"")+t.size+"px "+t.family}(i),i},resolve:function(t,e,n,i){var a,r,o,s=!0;for(a=0,r=t.length;a<r;++a)if(void 0!==(o=t[a])&&(void 0!==e&&"function"==typeof o&&(o=o(e),s=!1),void 0!==n&&S.isArray(o)&&(o=o[n],s=!1),void 0!==o))return i&&!s&&(i.cacheable=!1),o}},W={_factorize:function(t){var e,n=[],i=Math.sqrt(t);for(e=1;e<i;e++)t%e==0&&(n.push(e),n.push(t/e));return i===(0|i)&&n.push(i),n.sort((function(t,e){return t-e})).pop(),n},log10:Math.log10||function(t){var e=Math.log(t)*Math.LOG10E,n=Math.round(e);return t===Math.pow(10,n)?n:e}},V=W;S.log10=W.log10;var H=S,j=P,q=R,U=E,Y=V,G={getRtlAdapter:function(t,e,n){return t?function(t,e){return{x:function(n){return t+t+e-n},setWidth:function(t){e=t},textAlign:function(t){return"center"===t?t:"right"===t?"left":"right"},xPlus:function(t,e){return t-e},leftForLtr:function(t,e){return t-e}}}(e,n):{x:function(t){return t},setWidth:function(t){},textAlign:function(t){return t},xPlus:function(t,e){return t+e},leftForLtr:function(t,e){return t}}},overrideTextDirection:function(t,e){var n,i;"ltr"!==e&&"rtl"!==e||(i=[(n=t.canvas.style).getPropertyValue("direction"),n.getPropertyPriority("direction")],n.setProperty("direction",e,"important"),t.prevTextDirection=i)},restoreTextDirection:function(t){var e=t.prevTextDirection;void 0!==e&&(delete t.prevTextDirection,t.canvas.style.setProperty("direction",e[0],e[1]))}};H.easing=j,H.canvas=q,H.options=U,H.math=Y,H.rtl=G;var X=function(t){H.extend(this,t),this.initialize.apply(this,arguments)};H.extend(X.prototype,{_type:void 0,initialize:function(){this.hidden=!1},pivot:function(){var t=this;return t._view||(t._view=H.extend({},t._model)),t._start={},t},transition:function(t){var e=this,n=e._model,i=e._start,a=e._view;return n&&1!==t?(a||(a=e._view={}),i||(i=e._start={}),function(t,e,n,i){var a,r,o,s,l,u,d,h,c,f=Object.keys(n);for(a=0,r=f.length;a<r;++a)if(u=n[o=f[a]],e.hasOwnProperty(o)||(e[o]=u),(s=e[o])!==u&&"_"!==o[0]){if(t.hasOwnProperty(o)||(t[o]=s),(d=typeof u)===typeof(l=t[o]))if("string"===d){if((h=_(l)).valid&&(c=_(u)).valid){e[o]=c.mix(h,i).rgbString();continue}}else if(H.isFinite(l)&&H.isFinite(u)){e[o]=l+(u-l)*i;continue}e[o]=u}}(i,a,n,t),e):(e._view=H.extend({},n),e._start=null,e)},tooltipPosition:function(){return{x:this._model.x,y:this._model.y}},hasValue:function(){return H.isNumber(this._model.x)&&H.isNumber(this._model.y)}}),X.extend=H.inherits;var K=X,Z=K.extend({chart:null,currentStep:0,numSteps:60,easing:"",render:null,onAnimationProgress:null,onAnimationComplete:null}),$=Z;Object.defineProperty(Z.prototype,"animationObject",{get:function(){return this}}),Object.defineProperty(Z.prototype,"chartInstance",{get:function(){return this.chart},set:function(t){this.chart=t}}),N._set("global",{animation:{duration:1e3,easing:"easeOutQuart",onProgress:H.noop,onComplete:H.noop}});var J={animations:[],request:null,addAnimation:function(t,e,n,i){var a,r,o=this.animations;for(e.chart=t,e.startTime=Date.now(),e.duration=n,i||(t.animating=!0),a=0,r=o.length;a<r;++a)if(o[a].chart===t)return void(o[a]=e);o.push(e),1===o.length&&this.requestAnimationFrame()},cancelAnimation:function(t){var e=H.findIndex(this.animations,(function(e){return e.chart===t}));-1!==e&&(this.animations.splice(e,1),t.animating=!1)},requestAnimationFrame:function(){var t=this;null===t.request&&(t.request=H.requestAnimFrame.call(window,(function(){t.request=null,t.startDigest()})))},startDigest:function(){this.advance(),this.animations.length>0&&this.requestAnimationFrame()},advance:function(){for(var t,e,n,i,a=this.animations,r=0;r<a.length;)e=(t=a[r]).chart,n=t.numSteps,i=Math.floor((Date.now()-t.startTime)/t.duration*n)+1,t.currentStep=Math.min(i,n),H.callback(t.render,[e,t],e),H.callback(t.onAnimationProgress,[t],e),t.currentStep>=n?(H.callback(t.onAnimationComplete,[t],e),e.animating=!1,a.splice(r,1)):++r}},Q=H.options.resolve,tt=["push","pop","shift","splice","unshift"];function et(t,e){var n=t._chartjs;if(n){var i=n.listeners,a=i.indexOf(e);-1!==a&&i.splice(a,1),i.length>0||(tt.forEach((function(e){delete t[e]})),delete t._chartjs)}}var nt=function(t,e){this.initialize(t,e)};H.extend(nt.prototype,{datasetElementType:null,dataElementType:null,_datasetElementOptions:["backgroundColor","borderCapStyle","borderColor","borderDash","borderDashOffset","borderJoinStyle","borderWidth"],_dataElementOptions:["backgroundColor","borderColor","borderWidth","pointStyle"],initialize:function(t,e){var n=this;n.chart=t,n.index=e,n.linkScales(),n.addElements(),n._type=n.getMeta().type},updateIndex:function(t){this.index=t},linkScales:function(){var t=this.getMeta(),e=this.chart,n=e.scales,i=this.getDataset(),a=e.options.scales;null!==t.xAxisID&&t.xAxisID in n&&!i.xAxisID||(t.xAxisID=i.xAxisID||a.xAxes[0].id),null!==t.yAxisID&&t.yAxisID in n&&!i.yAxisID||(t.yAxisID=i.yAxisID||a.yAxes[0].id)},getDataset:function(){return this.chart.data.datasets[this.index]},getMeta:function(){return this.chart.getDatasetMeta(this.index)},getScaleForId:function(t){return this.chart.scales[t]},_getValueScaleId:function(){return this.getMeta().yAxisID},_getIndexScaleId:function(){return this.getMeta().xAxisID},_getValueScale:function(){return this.getScaleForId(this._getValueScaleId())},_getIndexScale:function(){return this.getScaleForId(this._getIndexScaleId())},reset:function(){this._update(!0)},destroy:function(){this._data&&et(this._data,this)},createMetaDataset:function(){var t=this.datasetElementType;return t&&new t({_chart:this.chart,_datasetIndex:this.index})},createMetaData:function(t){var e=this.dataElementType;return e&&new e({_chart:this.chart,_datasetIndex:this.index,_index:t})},addElements:function(){var t,e,n=this.getMeta(),i=this.getDataset().data||[],a=n.data;for(t=0,e=i.length;t<e;++t)a[t]=a[t]||this.createMetaData(t);n.dataset=n.dataset||this.createMetaDataset()},addElementAndReset:function(t){var e=this.createMetaData(t);this.getMeta().data.splice(t,0,e),this.updateElement(e,t,!0)},buildOrUpdateElements:function(){var t,e,n=this,i=n.getDataset(),a=i.data||(i.data=[]);n._data!==a&&(n._data&&et(n._data,n),a&&Object.isExtensible(a)&&(e=n,(t=a)._chartjs?t._chartjs.listeners.push(e):(Object.defineProperty(t,"_chartjs",{configurable:!0,enumerable:!1,value:{listeners:[e]}}),tt.forEach((function(e){var n="onData"+e.charAt(0).toUpperCase()+e.slice(1),i=t[e];Object.defineProperty(t,e,{configurable:!0,enumerable:!1,value:function(){var e=Array.prototype.slice.call(arguments),a=i.apply(this,e);return H.each(t._chartjs.listeners,(function(t){"function"==typeof t[n]&&t[n].apply(t,e)})),a}})})))),n._data=a),n.resyncElements()},_configure:function(){this._config=H.merge(Object.create(null),[this.chart.options.datasets[this._type],this.getDataset()],{merger:function(t,e,n){"_meta"!==t&&"data"!==t&&H._merger(t,e,n)}})},_update:function(t){this._configure(),this._cachedDataOpts=null,this.update(t)},update:H.noop,transition:function(t){for(var e=this.getMeta(),n=e.data||[],i=n.length,a=0;a<i;++a)n[a].transition(t);e.dataset&&e.dataset.transition(t)},draw:function(){var t=this.getMeta(),e=t.data||[],n=e.length,i=0;for(t.dataset&&t.dataset.draw();i<n;++i)e[i].draw()},getStyle:function(t){var e,n=this.getMeta(),i=n.dataset;return this._configure(),i&&void 0===t?e=this._resolveDatasetElementOptions(i||{}):(t=t||0,e=this._resolveDataElementOptions(n.data[t]||{},t)),!1!==e.fill&&null!==e.fill||(e.backgroundColor=e.borderColor),e},_resolveDatasetElementOptions:function(t,e){var n,i,a,r,o=this,s=o.chart,l=o._config,u=t.custom||{},d=s.options.elements[o.datasetElementType.prototype._type]||{},h=o._datasetElementOptions,c={},f={chart:s,dataset:o.getDataset(),datasetIndex:o.index,hover:e};for(n=0,i=h.length;n<i;++n)a=h[n],r=e?"hover"+a.charAt(0).toUpperCase()+a.slice(1):a,c[a]=Q([u[r],l[r],d[r]],f);return c},_resolveDataElementOptions:function(t,e){var n=this,i=t&&t.custom,a=n._cachedDataOpts;if(a&&!i)return a;var r,o,s,l,u=n.chart,d=n._config,h=u.options.elements[n.dataElementType.prototype._type]||{},c=n._dataElementOptions,f={},g={chart:u,dataIndex:e,dataset:n.getDataset(),datasetIndex:n.index},p={cacheable:!i};if(i=i||{},H.isArray(c))for(o=0,s=c.length;o<s;++o)f[l=c[o]]=Q([i[l],d[l],h[l]],g,e,p);else for(o=0,s=(r=Object.keys(c)).length;o<s;++o)f[l=r[o]]=Q([i[l],d[c[l]],d[l],h[l]],g,e,p);return p.cacheable&&(n._cachedDataOpts=Object.freeze(f)),f},removeHoverStyle:function(t){H.merge(t._model,t.$previousStyle||{}),delete t.$previousStyle},setHoverStyle:function(t){var e=this.chart.data.datasets[t._datasetIndex],n=t._index,i=t.custom||{},a=t._model,r=H.getHoverColor;t.$previousStyle={backgroundColor:a.backgroundColor,borderColor:a.borderColor,borderWidth:a.borderWidth},a.backgroundColor=Q([i.hoverBackgroundColor,e.hoverBackgroundColor,r(a.backgroundColor)],void 0,n),a.borderColor=Q([i.hoverBorderColor,e.hoverBorderColor,r(a.borderColor)],void 0,n),a.borderWidth=Q([i.hoverBorderWidth,e.hoverBorderWidth,a.borderWidth],void 0,n)},_removeDatasetHoverStyle:function(){var t=this.getMeta().dataset;t&&this.removeHoverStyle(t)},_setDatasetHoverStyle:function(){var t,e,n,i,a,r,o=this.getMeta().dataset,s={};if(o){for(r=o._model,a=this._resolveDatasetElementOptions(o,!0),t=0,e=(i=Object.keys(a)).length;t<e;++t)s[n=i[t]]=r[n],r[n]=a[n];o.$previousStyle=s}},resyncElements:function(){var t=this.getMeta(),e=this.getDataset().data,n=t.data.length,i=e.length;i<n?t.data.splice(i,n-i):i>n&&this.insertElements(n,i-n)},insertElements:function(t,e){for(var n=0;n<e;++n)this.addElementAndReset(t+n)},onDataPush:function(){var t=arguments.length;this.insertElements(this.getDataset().data.length-t,t)},onDataPop:function(){this.getMeta().data.pop()},onDataShift:function(){this.getMeta().data.shift()},onDataSplice:function(t,e){this.getMeta().data.splice(t,e),this.insertElements(t,arguments.length-2)},onDataUnshift:function(){this.insertElements(0,arguments.length)}}),nt.extend=H.inherits;var it=nt,at=2*Math.PI;function rt(t,e){var n=e.startAngle,i=e.endAngle,a=e.pixelMargin,r=a/e.outerRadius,o=e.x,s=e.y;t.beginPath(),t.arc(o,s,e.outerRadius,n-r,i+r),e.innerRadius>a?(r=a/e.innerRadius,t.arc(o,s,e.innerRadius-a,i+r,n-r,!0)):t.arc(o,s,a,i+Math.PI/2,n-Math.PI/2),t.closePath(),t.clip()}function ot(t,e,n){var i="inner"===e.borderAlign;i?(t.lineWidth=2*e.borderWidth,t.lineJoin="round"):(t.lineWidth=e.borderWidth,t.lineJoin="bevel"),n.fullCircles&&function(t,e,n,i){var a,r=n.endAngle;for(i&&(n.endAngle=n.startAngle+at,rt(t,n),n.endAngle=r,n.endAngle===n.startAngle&&n.fullCircles&&(n.endAngle+=at,n.fullCircles--)),t.beginPath(),t.arc(n.x,n.y,n.innerRadius,n.startAngle+at,n.startAngle,!0),a=0;a<n.fullCircles;++a)t.stroke();for(t.beginPath(),t.arc(n.x,n.y,e.outerRadius,n.startAngle,n.startAngle+at),a=0;a<n.fullCircles;++a)t.stroke()}(t,e,n,i),i&&rt(t,n),t.beginPath(),t.arc(n.x,n.y,e.outerRadius,n.startAngle,n.endAngle),t.arc(n.x,n.y,n.innerRadius,n.endAngle,n.startAngle,!0),t.closePath(),t.stroke()}N._set("global",{elements:{arc:{backgroundColor:N.global.defaultColor,borderColor:"#fff",borderWidth:2,borderAlign:"center"}}});var st=K.extend({_type:"arc",inLabelRange:function(t){var e=this._view;return!!e&&Math.pow(t-e.x,2)<Math.pow(e.radius+e.hoverRadius,2)},inRange:function(t,e){var n=this._view;if(n){for(var i=H.getAngleFromPoint(n,{x:t,y:e}),a=i.angle,r=i.distance,o=n.startAngle,s=n.endAngle;s<o;)s+=at;for(;a>s;)a-=at;for(;a<o;)a+=at;var l=a>=o&&a<=s,u=r>=n.innerRadius&&r<=n.outerRadius;return l&&u}return!1},getCenterPoint:function(){var t=this._view,e=(t.startAngle+t.endAngle)/2,n=(t.innerRadius+t.outerRadius)/2;return{x:t.x+Math.cos(e)*n,y:t.y+Math.sin(e)*n}},getArea:function(){var t=this._view;return Math.PI*((t.endAngle-t.startAngle)/(2*Math.PI))*(Math.pow(t.outerRadius,2)-Math.pow(t.innerRadius,2))},tooltipPosition:function(){var t=this._view,e=t.startAngle+(t.endAngle-t.startAngle)/2,n=(t.outerRadius-t.innerRadius)/2+t.innerRadius;return{x:t.x+Math.cos(e)*n,y:t.y+Math.sin(e)*n}},draw:function(){var t,e=this._chart.ctx,n=this._view,i="inner"===n.borderAlign?.33:0,a={x:n.x,y:n.y,innerRadius:n.innerRadius,outerRadius:Math.max(n.outerRadius-i,0),pixelMargin:i,startAngle:n.startAngle,endAngle:n.endAngle,fullCircles:Math.floor(n.circumference/at)};if(e.save(),e.fillStyle=n.backgroundColor,e.strokeStyle=n.borderColor,a.fullCircles){for(a.endAngle=a.startAngle+at,e.beginPath(),e.arc(a.x,a.y,a.outerRadius,a.startAngle,a.endAngle),e.arc(a.x,a.y,a.innerRadius,a.endAngle,a.startAngle,!0),e.closePath(),t=0;t<a.fullCircles;++t)e.fill();a.endAngle=a.startAngle+n.circumference%at}e.beginPath(),e.arc(a.x,a.y,a.outerRadius,a.startAngle,a.endAngle),e.arc(a.x,a.y,a.innerRadius,a.endAngle,a.startAngle,!0),e.closePath(),e.fill(),n.borderWidth&&ot(e,n,a),e.restore()}}),lt=H.valueOrDefault,ut=N.global.defaultColor;N._set("global",{elements:{line:{tension:.4,backgroundColor:ut,borderWidth:3,borderColor:ut,borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",capBezierPoints:!0,fill:!0}}});var dt=K.extend({_type:"line",draw:function(){var t,e,n,i=this,a=i._view,r=i._chart.ctx,o=a.spanGaps,s=i._children.slice(),l=N.global,u=l.elements.line,d=-1,h=i._loop;if(s.length){if(i._loop){for(t=0;t<s.length;++t)if(e=H.previousItem(s,t),!s[t]._view.skip&&e._view.skip){s=s.slice(t).concat(s.slice(0,t)),h=o;break}h&&s.push(s[0])}for(r.save(),r.lineCap=a.borderCapStyle||u.borderCapStyle,r.setLineDash&&r.setLineDash(a.borderDash||u.borderDash),r.lineDashOffset=lt(a.borderDashOffset,u.borderDashOffset),r.lineJoin=a.borderJoinStyle||u.borderJoinStyle,r.lineWidth=lt(a.borderWidth,u.borderWidth),r.strokeStyle=a.borderColor||l.defaultColor,r.beginPath(),(n=s[0]._view).skip||(r.moveTo(n.x,n.y),d=0),t=1;t<s.length;++t)n=s[t]._view,e=-1===d?H.previousItem(s,t):s[d],n.skip||(d!==t-1&&!o||-1===d?r.moveTo(n.x,n.y):H.canvas.lineTo(r,e._view,n),d=t);h&&r.closePath(),r.stroke(),r.restore()}}}),ht=H.valueOrDefault,ct=N.global.defaultColor;function ft(t){var e=this._view;return!!e&&Math.abs(t-e.x)<e.radius+e.hitRadius}N._set("global",{elements:{point:{radius:3,pointStyle:"circle",backgroundColor:ct,borderColor:ct,borderWidth:1,hitRadius:1,hoverRadius:4,hoverBorderWidth:1}}});var gt=K.extend({_type:"point",inRange:function(t,e){var n=this._view;return!!n&&Math.pow(t-n.x,2)+Math.pow(e-n.y,2)<Math.pow(n.hitRadius+n.radius,2)},inLabelRange:ft,inXRange:ft,inYRange:function(t){var e=this._view;return!!e&&Math.abs(t-e.y)<e.radius+e.hitRadius},getCenterPoint:function(){var t=this._view;return{x:t.x,y:t.y}},getArea:function(){return Math.PI*Math.pow(this._view.radius,2)},tooltipPosition:function(){var t=this._view;return{x:t.x,y:t.y,padding:t.radius+t.borderWidth}},draw:function(t){var e=this._view,n=this._chart.ctx,i=e.pointStyle,a=e.rotation,r=e.radius,o=e.x,s=e.y,l=N.global,u=l.defaultColor;e.skip||(void 0===t||H.canvas._isPointInArea(e,t))&&(n.strokeStyle=e.borderColor||u,n.lineWidth=ht(e.borderWidth,l.elements.point.borderWidth),n.fillStyle=e.backgroundColor||u,H.canvas.drawPoint(n,i,r,o,s,a))}}),pt=N.global.defaultColor;function mt(t){return t&&void 0!==t.width}function vt(t){var e,n,i,a,r;return mt(t)?(r=t.width/2,e=t.x-r,n=t.x+r,i=Math.min(t.y,t.base),a=Math.max(t.y,t.base)):(r=t.height/2,e=Math.min(t.x,t.base),n=Math.max(t.x,t.base),i=t.y-r,a=t.y+r),{left:e,top:i,right:n,bottom:a}}function bt(t,e,n){return t===e?n:t===n?e:t}function xt(t,e,n){var i,a,r,o,s=t.borderWidth,l=function(t){var e=t.borderSkipped,n={};return e?(t.horizontal?t.base>t.x&&(e=bt(e,"left","right")):t.base<t.y&&(e=bt(e,"bottom","top")),n[e]=!0,n):n}(t);return H.isObject(s)?(i=+s.top||0,a=+s.right||0,r=+s.bottom||0,o=+s.left||0):i=a=r=o=+s||0,{t:l.top||i<0?0:i>n?n:i,r:l.right||a<0?0:a>e?e:a,b:l.bottom||r<0?0:r>n?n:r,l:l.left||o<0?0:o>e?e:o}}function yt(t,e,n){var i=null===e,a=null===n,r=!(!t||i&&a)&&vt(t);return r&&(i||e>=r.left&&e<=r.right)&&(a||n>=r.top&&n<=r.bottom)}N._set("global",{elements:{rectangle:{backgroundColor:pt,borderColor:pt,borderSkipped:"bottom",borderWidth:0}}});var _t=K.extend({_type:"rectangle",draw:function(){var t=this._chart.ctx,e=this._view,n=function(t){var e=vt(t),n=e.right-e.left,i=e.bottom-e.top,a=xt(t,n/2,i/2);return{outer:{x:e.left,y:e.top,w:n,h:i},inner:{x:e.left+a.l,y:e.top+a.t,w:n-a.l-a.r,h:i-a.t-a.b}}}(e),i=n.outer,a=n.inner;t.fillStyle=e.backgroundColor,t.fillRect(i.x,i.y,i.w,i.h),i.w===a.w&&i.h===a.h||(t.save(),t.beginPath(),t.rect(i.x,i.y,i.w,i.h),t.clip(),t.fillStyle=e.borderColor,t.rect(a.x,a.y,a.w,a.h),t.fill("evenodd"),t.restore())},height:function(){var t=this._view;return t.base-t.y},inRange:function(t,e){return yt(this._view,t,e)},inLabelRange:function(t,e){var n=this._view;return mt(n)?yt(n,t,null):yt(n,null,e)},inXRange:function(t){return yt(this._view,t,null)},inYRange:function(t){return yt(this._view,null,t)},getCenterPoint:function(){var t,e,n=this._view;return mt(n)?(t=n.x,e=(n.y+n.base)/2):(t=(n.x+n.base)/2,e=n.y),{x:t,y:e}},getArea:function(){var t=this._view;return mt(t)?t.width*Math.abs(t.y-t.base):t.height*Math.abs(t.x-t.base)},tooltipPosition:function(){var t=this._view;return{x:t.x,y:t.y}}}),kt={},wt=st,Mt=dt,St=gt,Ct=_t;kt.Arc=wt,kt.Line=Mt,kt.Point=St,kt.Rectangle=Ct;var Pt=H._deprecated,At=H.valueOrDefault;function Dt(t,e,n){var i,a,r=n.barThickness,o=e.stackCount,s=e.pixels[t],l=H.isNullOrUndef(r)?function(t,e){var n,i,a,r,o=t._length;for(a=1,r=e.length;a<r;++a)o=Math.min(o,Math.abs(e[a]-e[a-1]));for(a=0,r=t.getTicks().length;a<r;++a)i=t.getPixelForTick(a),o=a>0?Math.min(o,Math.abs(i-n)):o,n=i;return o}(e.scale,e.pixels):-1;return H.isNullOrUndef(r)?(i=l*n.categoryPercentage,a=n.barPercentage):(i=r*o,a=1),{chunk:i/o,ratio:a,start:s-i/2}}N._set("bar",{hover:{mode:"label"},scales:{xAxes:[{type:"category",offset:!0,gridLines:{offsetGridLines:!0}}],yAxes:[{type:"linear"}]}}),N._set("global",{datasets:{bar:{categoryPercentage:.8,barPercentage:.9}}});var Tt=it.extend({dataElementType:kt.Rectangle,_dataElementOptions:["backgroundColor","borderColor","borderSkipped","borderWidth","barPercentage","barThickness","categoryPercentage","maxBarThickness","minBarLength"],initialize:function(){var t,e,n=this;it.prototype.initialize.apply(n,arguments),(t=n.getMeta()).stack=n.getDataset().stack,t.bar=!0,e=n._getIndexScale().options,Pt("bar chart",e.barPercentage,"scales.[x/y]Axes.barPercentage","dataset.barPercentage"),Pt("bar chart",e.barThickness,"scales.[x/y]Axes.barThickness","dataset.barThickness"),Pt("bar chart",e.categoryPercentage,"scales.[x/y]Axes.categoryPercentage","dataset.categoryPercentage"),Pt("bar chart",n._getValueScale().options.minBarLength,"scales.[x/y]Axes.minBarLength","dataset.minBarLength"),Pt("bar chart",e.maxBarThickness,"scales.[x/y]Axes.maxBarThickness","dataset.maxBarThickness")},update:function(t){var e,n,i=this.getMeta().data;for(this._ruler=this.getRuler(),e=0,n=i.length;e<n;++e)this.updateElement(i[e],e,t)},updateElement:function(t,e,n){var i=this,a=i.getMeta(),r=i.getDataset(),o=i._resolveDataElementOptions(t,e);t._xScale=i.getScaleForId(a.xAxisID),t._yScale=i.getScaleForId(a.yAxisID),t._datasetIndex=i.index,t._index=e,t._model={backgroundColor:o.backgroundColor,borderColor:o.borderColor,borderSkipped:o.borderSkipped,borderWidth:o.borderWidth,datasetLabel:r.label,label:i.chart.data.labels[e]},H.isArray(r.data[e])&&(t._model.borderSkipped=null),i._updateElementGeometry(t,e,n,o),t.pivot()},_updateElementGeometry:function(t,e,n,i){var a=this,r=t._model,o=a._getValueScale(),s=o.getBasePixel(),l=o.isHorizontal(),u=a._ruler||a.getRuler(),d=a.calculateBarValuePixels(a.index,e,i),h=a.calculateBarIndexPixels(a.index,e,u,i);r.horizontal=l,r.base=n?s:d.base,r.x=l?n?s:d.head:h.center,r.y=l?h.center:n?s:d.head,r.height=l?h.size:void 0,r.width=l?void 0:h.size},_getStacks:function(t){var e,n,i=this._getIndexScale(),a=i._getMatchingVisibleMetas(this._type),r=i.options.stacked,o=a.length,s=[];for(e=0;e<o&&(n=a[e],(!1===r||-1===s.indexOf(n.stack)||void 0===r&&void 0===n.stack)&&s.push(n.stack),n.index!==t);++e);return s},getStackCount:function(){return this._getStacks().length},getStackIndex:function(t,e){var n=this._getStacks(t),i=void 0!==e?n.indexOf(e):-1;return-1===i?n.length-1:i},getRuler:function(){var t,e,n=this._getIndexScale(),i=[];for(t=0,e=this.getMeta().data.length;t<e;++t)i.push(n.getPixelForValue(null,t,this.index));return{pixels:i,start:n._startPixel,end:n._endPixel,stackCount:this.getStackCount(),scale:n}},calculateBarValuePixels:function(t,e,n){var i,a,r,o,s,l,u,d=this.chart,h=this._getValueScale(),c=h.isHorizontal(),f=d.data.datasets,g=h._getMatchingVisibleMetas(this._type),p=h._parseValue(f[t].data[e]),m=n.minBarLength,v=h.options.stacked,b=this.getMeta().stack,x=void 0===p.start?0:p.max>=0&&p.min>=0?p.min:p.max,y=void 0===p.start?p.end:p.max>=0&&p.min>=0?p.max-p.min:p.min-p.max,_=g.length;if(v||void 0===v&&void 0!==b)for(i=0;i<_&&(a=g[i]).index!==t;++i)a.stack===b&&(r=void 0===(u=h._parseValue(f[a.index].data[e])).start?u.end:u.min>=0&&u.max>=0?u.max:u.min,(p.min<0&&r<0||p.max>=0&&r>0)&&(x+=r));return o=h.getPixelForValue(x),l=(s=h.getPixelForValue(x+y))-o,void 0!==m&&Math.abs(l)<m&&(l=m,s=y>=0&&!c||y<0&&c?o-m:o+m),{size:l,base:o,head:s,center:s+l/2}},calculateBarIndexPixels:function(t,e,n,i){var a="flex"===i.barThickness?function(t,e,n){var i,a=e.pixels,r=a[t],o=t>0?a[t-1]:null,s=t<a.length-1?a[t+1]:null,l=n.categoryPercentage;return null===o&&(o=r-(null===s?e.end-e.start:s-r)),null===s&&(s=r+r-o),i=r-(r-Math.min(o,s))/2*l,{chunk:Math.abs(s-o)/2*l/e.stackCount,ratio:n.barPercentage,start:i}}(e,n,i):Dt(e,n,i),r=this.getStackIndex(t,this.getMeta().stack),o=a.start+a.chunk*r+a.chunk/2,s=Math.min(At(i.maxBarThickness,1/0),a.chunk*a.ratio);return{base:o-s/2,head:o+s/2,center:o,size:s}},draw:function(){var t=this.chart,e=this._getValueScale(),n=this.getMeta().data,i=this.getDataset(),a=n.length,r=0;for(H.canvas.clipArea(t.ctx,t.chartArea);r<a;++r){var o=e._parseValue(i.data[r]);isNaN(o.min)||isNaN(o.max)||n[r].draw()}H.canvas.unclipArea(t.ctx)},_resolveDataElementOptions:function(){var t=this,e=H.extend({},it.prototype._resolveDataElementOptions.apply(t,arguments)),n=t._getIndexScale().options,i=t._getValueScale().options;return e.barPercentage=At(n.barPercentage,e.barPercentage),e.barThickness=At(n.barThickness,e.barThickness),e.categoryPercentage=At(n.categoryPercentage,e.categoryPercentage),e.maxBarThickness=At(n.maxBarThickness,e.maxBarThickness),e.minBarLength=At(i.minBarLength,e.minBarLength),e}}),It=H.valueOrDefault,Ft=H.options.resolve;N._set("bubble",{hover:{mode:"single"},scales:{xAxes:[{type:"linear",position:"bottom",id:"x-axis-0"}],yAxes:[{type:"linear",position:"left",id:"y-axis-0"}]},tooltips:{callbacks:{title:function(){return""},label:function(t,e){var n=e.datasets[t.datasetIndex].label||"",i=e.datasets[t.datasetIndex].data[t.index];return n+": ("+t.xLabel+", "+t.yLabel+", "+i.r+")"}}}});var Ot=it.extend({dataElementType:kt.Point,_dataElementOptions:["backgroundColor","borderColor","borderWidth","hoverBackgroundColor","hoverBorderColor","hoverBorderWidth","hoverRadius","hitRadius","pointStyle","rotation"],update:function(t){var e=this,n=e.getMeta().data;H.each(n,(function(n,i){e.updateElement(n,i,t)}))},updateElement:function(t,e,n){var i=this,a=i.getMeta(),r=t.custom||{},o=i.getScaleForId(a.xAxisID),s=i.getScaleForId(a.yAxisID),l=i._resolveDataElementOptions(t,e),u=i.getDataset().data[e],d=i.index,h=n?o.getPixelForDecimal(.5):o.getPixelForValue("object"==typeof u?u:NaN,e,d),c=n?s.getBasePixel():s.getPixelForValue(u,e,d);t._xScale=o,t._yScale=s,t._options=l,t._datasetIndex=d,t._index=e,t._model={backgroundColor:l.backgroundColor,borderColor:l.borderColor,borderWidth:l.borderWidth,hitRadius:l.hitRadius,pointStyle:l.pointStyle,rotation:l.rotation,radius:n?0:l.radius,skip:r.skip||isNaN(h)||isNaN(c),x:h,y:c},t.pivot()},setHoverStyle:function(t){var e=t._model,n=t._options,i=H.getHoverColor;t.$previousStyle={backgroundColor:e.backgroundColor,borderColor:e.borderColor,borderWidth:e.borderWidth,radius:e.radius},e.backgroundColor=It(n.hoverBackgroundColor,i(n.backgroundColor)),e.borderColor=It(n.hoverBorderColor,i(n.borderColor)),e.borderWidth=It(n.hoverBorderWidth,n.borderWidth),e.radius=n.radius+n.hoverRadius},_resolveDataElementOptions:function(t,e){var n=this,i=n.chart,a=n.getDataset(),r=t.custom||{},o=a.data[e]||{},s=it.prototype._resolveDataElementOptions.apply(n,arguments),l={chart:i,dataIndex:e,dataset:a,datasetIndex:n.index};return n._cachedDataOpts===s&&(s=H.extend({},s)),s.radius=Ft([r.radius,o.r,n._config.radius,i.options.elements.point.radius],l,e),s}}),Lt=H.valueOrDefault,Rt=Math.PI,zt=2*Rt,Nt=Rt/2;N._set("doughnut",{animation:{animateRotate:!0,animateScale:!1},hover:{mode:"single"},legendCallback:function(t){var e,n,i,a=document.createElement("ul"),r=t.data,o=r.datasets,s=r.labels;if(a.setAttribute("class",t.id+"-legend"),o.length)for(e=0,n=o[0].data.length;e<n;++e)(i=a.appendChild(document.createElement("li"))).appendChild(document.createElement("span")).style.backgroundColor=o[0].backgroundColor[e],s[e]&&i.appendChild(document.createTextNode(s[e]));return a.outerHTML},legend:{labels:{generateLabels:function(t){var e=t.data;return e.labels.length&&e.datasets.length?e.labels.map((function(n,i){var a=t.getDatasetMeta(0),r=a.controller.getStyle(i);return{text:n,fillStyle:r.backgroundColor,strokeStyle:r.borderColor,lineWidth:r.borderWidth,hidden:isNaN(e.datasets[0].data[i])||a.data[i].hidden,index:i}})):[]}},onClick:function(t,e){var n,i,a,r=e.index,o=this.chart;for(n=0,i=(o.data.datasets||[]).length;n<i;++n)(a=o.getDatasetMeta(n)).data[r]&&(a.data[r].hidden=!a.data[r].hidden);o.update()}},cutoutPercentage:50,rotation:-Nt,circumference:zt,tooltips:{callbacks:{title:function(){return""},label:function(t,e){var n=e.labels[t.index],i=": "+e.datasets[t.datasetIndex].data[t.index];return H.isArray(n)?(n=n.slice())[0]+=i:n+=i,n}}}});var Bt=it.extend({dataElementType:kt.Arc,linkScales:H.noop,_dataElementOptions:["backgroundColor","borderColor","borderWidth","borderAlign","hoverBackgroundColor","hoverBorderColor","hoverBorderWidth"],getRingIndex:function(t){for(var e=0,n=0;n<t;++n)this.chart.isDatasetVisible(n)&&++e;return e},update:function(t){var e,n,i,a,r=this,o=r.chart,s=o.chartArea,l=o.options,u=1,d=1,h=0,c=0,f=r.getMeta(),g=f.data,p=l.cutoutPercentage/100||0,m=l.circumference,v=r._getRingWeight(r.index);if(m<zt){var b=l.rotation%zt,x=(b+=b>=Rt?-zt:b<-Rt?zt:0)+m,y=Math.cos(b),_=Math.sin(b),k=Math.cos(x),w=Math.sin(x),M=b<=0&&x>=0||x>=zt,S=b<=Nt&&x>=Nt||x>=zt+Nt,C=b<=-Nt&&x>=-Nt||x>=Rt+Nt,P=b===-Rt||x>=Rt?-1:Math.min(y,y*p,k,k*p),A=C?-1:Math.min(_,_*p,w,w*p),D=M?1:Math.max(y,y*p,k,k*p),T=S?1:Math.max(_,_*p,w,w*p);u=(D-P)/2,d=(T-A)/2,h=-(D+P)/2,c=-(T+A)/2}for(i=0,a=g.length;i<a;++i)g[i]._options=r._resolveDataElementOptions(g[i],i);for(o.borderWidth=r.getMaxBorderWidth(),e=(s.right-s.left-o.borderWidth)/u,n=(s.bottom-s.top-o.borderWidth)/d,o.outerRadius=Math.max(Math.min(e,n)/2,0),o.innerRadius=Math.max(o.outerRadius*p,0),o.radiusLength=(o.outerRadius-o.innerRadius)/(r._getVisibleDatasetWeightTotal()||1),o.offsetX=h*o.outerRadius,o.offsetY=c*o.outerRadius,f.total=r.calculateTotal(),r.outerRadius=o.outerRadius-o.radiusLength*r._getRingWeightOffset(r.index),r.innerRadius=Math.max(r.outerRadius-o.radiusLength*v,0),i=0,a=g.length;i<a;++i)r.updateElement(g[i],i,t)},updateElement:function(t,e,n){var i=this,a=i.chart,r=a.chartArea,o=a.options,s=o.animation,l=(r.left+r.right)/2,u=(r.top+r.bottom)/2,d=o.rotation,h=o.rotation,c=i.getDataset(),f=n&&s.animateRotate?0:t.hidden?0:i.calculateCircumference(c.data[e])*(o.circumference/zt),g=n&&s.animateScale?0:i.innerRadius,p=n&&s.animateScale?0:i.outerRadius,m=t._options||{};H.extend(t,{_datasetIndex:i.index,_index:e,_model:{backgroundColor:m.backgroundColor,borderColor:m.borderColor,borderWidth:m.borderWidth,borderAlign:m.borderAlign,x:l+a.offsetX,y:u+a.offsetY,startAngle:d,endAngle:h,circumference:f,outerRadius:p,innerRadius:g,label:H.valueAtIndexOrDefault(c.label,e,a.data.labels[e])}});var v=t._model;n&&s.animateRotate||(v.startAngle=0===e?o.rotation:i.getMeta().data[e-1]._model.endAngle,v.endAngle=v.startAngle+v.circumference),t.pivot()},calculateTotal:function(){var t,e=this.getDataset(),n=this.getMeta(),i=0;return H.each(n.data,(function(n,a){t=e.data[a],isNaN(t)||n.hidden||(i+=Math.abs(t))})),i},calculateCircumference:function(t){var e=this.getMeta().total;return e>0&&!isNaN(t)?zt*(Math.abs(t)/e):0},getMaxBorderWidth:function(t){var e,n,i,a,r,o,s,l,u=0,d=this.chart;if(!t)for(e=0,n=d.data.datasets.length;e<n;++e)if(d.isDatasetVisible(e)){t=(i=d.getDatasetMeta(e)).data,e!==this.index&&(r=i.controller);break}if(!t)return 0;for(e=0,n=t.length;e<n;++e)a=t[e],r?(r._configure(),o=r._resolveDataElementOptions(a,e)):o=a._options,"inner"!==o.borderAlign&&(s=o.borderWidth,u=(l=o.hoverBorderWidth)>(u=s>u?s:u)?l:u);return u},setHoverStyle:function(t){var e=t._model,n=t._options,i=H.getHoverColor;t.$previousStyle={backgroundColor:e.backgroundColor,borderColor:e.borderColor,borderWidth:e.borderWidth},e.backgroundColor=Lt(n.hoverBackgroundColor,i(n.backgroundColor)),e.borderColor=Lt(n.hoverBorderColor,i(n.borderColor)),e.borderWidth=Lt(n.hoverBorderWidth,n.borderWidth)},_getRingWeightOffset:function(t){for(var e=0,n=0;n<t;++n)this.chart.isDatasetVisible(n)&&(e+=this._getRingWeight(n));return e},_getRingWeight:function(t){return Math.max(Lt(this.chart.data.datasets[t].weight,1),0)},_getVisibleDatasetWeightTotal:function(){return this._getRingWeightOffset(this.chart.data.datasets.length)}});N._set("horizontalBar",{hover:{mode:"index",axis:"y"},scales:{xAxes:[{type:"linear",position:"bottom"}],yAxes:[{type:"category",position:"left",offset:!0,gridLines:{offsetGridLines:!0}}]},elements:{rectangle:{borderSkipped:"left"}},tooltips:{mode:"index",axis:"y"}}),N._set("global",{datasets:{horizontalBar:{categoryPercentage:.8,barPercentage:.9}}});var Et=Tt.extend({_getValueScaleId:function(){return this.getMeta().xAxisID},_getIndexScaleId:function(){return this.getMeta().yAxisID}}),Wt=H.valueOrDefault,Vt=H.options.resolve,Ht=H.canvas._isPointInArea;function jt(t,e){var n=t&&t.options.ticks||{},i=n.reverse,a=void 0===n.min?e:0,r=void 0===n.max?e:0;return{start:i?r:a,end:i?a:r}}function qt(t,e,n){var i=n/2,a=jt(t,i),r=jt(e,i);return{top:r.end,right:a.end,bottom:r.start,left:a.start}}function Ut(t){var e,n,i,a;return H.isObject(t)?(e=t.top,n=t.right,i=t.bottom,a=t.left):e=n=i=a=t,{top:e,right:n,bottom:i,left:a}}N._set("line",{showLines:!0,spanGaps:!1,hover:{mode:"label"},scales:{xAxes:[{type:"category",id:"x-axis-0"}],yAxes:[{type:"linear",id:"y-axis-0"}]}});var Yt=it.extend({datasetElementType:kt.Line,dataElementType:kt.Point,_datasetElementOptions:["backgroundColor","borderCapStyle","borderColor","borderDash","borderDashOffset","borderJoinStyle","borderWidth","cubicInterpolationMode","fill"],_dataElementOptions:{backgroundColor:"pointBackgroundColor",borderColor:"pointBorderColor",borderWidth:"pointBorderWidth",hitRadius:"pointHitRadius",hoverBackgroundColor:"pointHoverBackgroundColor",hoverBorderColor:"pointHoverBorderColor",hoverBorderWidth:"pointHoverBorderWidth",hoverRadius:"pointHoverRadius",pointStyle:"pointStyle",radius:"pointRadius",rotation:"pointRotation"},update:function(t){var e,n,i=this,a=i.getMeta(),r=a.dataset,o=a.data||[],s=i.chart.options,l=i._config,u=i._showLine=Wt(l.showLine,s.showLines);for(i._xScale=i.getScaleForId(a.xAxisID),i._yScale=i.getScaleForId(a.yAxisID),u&&(void 0!==l.tension&&void 0===l.lineTension&&(l.lineTension=l.tension),r._scale=i._yScale,r._datasetIndex=i.index,r._children=o,r._model=i._resolveDatasetElementOptions(r),r.pivot()),e=0,n=o.length;e<n;++e)i.updateElement(o[e],e,t);for(u&&0!==r._model.tension&&i.updateBezierControlPoints(),e=0,n=o.length;e<n;++e)o[e].pivot()},updateElement:function(t,e,n){var i,a,r=this,o=r.getMeta(),s=t.custom||{},l=r.getDataset(),u=r.index,d=l.data[e],h=r._xScale,c=r._yScale,f=o.dataset._model,g=r._resolveDataElementOptions(t,e);i=h.getPixelForValue("object"==typeof d?d:NaN,e,u),a=n?c.getBasePixel():r.calculatePointY(d,e,u),t._xScale=h,t._yScale=c,t._options=g,t._datasetIndex=u,t._index=e,t._model={x:i,y:a,skip:s.skip||isNaN(i)||isNaN(a),radius:g.radius,pointStyle:g.pointStyle,rotation:g.rotation,backgroundColor:g.backgroundColor,borderColor:g.borderColor,borderWidth:g.borderWidth,tension:Wt(s.tension,f?f.tension:0),steppedLine:!!f&&f.steppedLine,hitRadius:g.hitRadius}},_resolveDatasetElementOptions:function(t){var e=this,n=e._config,i=t.custom||{},a=e.chart.options,r=a.elements.line,o=it.prototype._resolveDatasetElementOptions.apply(e,arguments);return o.spanGaps=Wt(n.spanGaps,a.spanGaps),o.tension=Wt(n.lineTension,r.tension),o.steppedLine=Vt([i.steppedLine,n.steppedLine,r.stepped]),o.clip=Ut(Wt(n.clip,qt(e._xScale,e._yScale,o.borderWidth))),o},calculatePointY:function(t,e,n){var i,a,r,o,s,l,u,d=this.chart,h=this._yScale,c=0,f=0;if(h.options.stacked){for(s=+h.getRightValue(t),u=(l=d._getSortedVisibleDatasetMetas()).length,i=0;i<u&&(r=l[i]).index!==n;++i)a=d.data.datasets[r.index],"line"===r.type&&r.yAxisID===h.id&&((o=+h.getRightValue(a.data[e]))<0?f+=o||0:c+=o||0);return s<0?h.getPixelForValue(f+s):h.getPixelForValue(c+s)}return h.getPixelForValue(t)},updateBezierControlPoints:function(){var t,e,n,i,a=this.chart,r=this.getMeta(),o=r.dataset._model,s=a.chartArea,l=r.data||[];function u(t,e,n){return Math.max(Math.min(t,n),e)}if(o.spanGaps&&(l=l.filter((function(t){return!t._model.skip}))),"monotone"===o.cubicInterpolationMode)H.splineCurveMonotone(l);else for(t=0,e=l.length;t<e;++t)n=l[t]._model,i=H.splineCurve(H.previousItem(l,t)._model,n,H.nextItem(l,t)._model,o.tension),n.controlPointPreviousX=i.previous.x,n.controlPointPreviousY=i.previous.y,n.controlPointNextX=i.next.x,n.controlPointNextY=i.next.y;if(a.options.elements.line.capBezierPoints)for(t=0,e=l.length;t<e;++t)n=l[t]._model,Ht(n,s)&&(t>0&&Ht(l[t-1]._model,s)&&(n.controlPointPreviousX=u(n.controlPointPreviousX,s.left,s.right),n.controlPointPreviousY=u(n.controlPointPreviousY,s.top,s.bottom)),t<l.length-1&&Ht(l[t+1]._model,s)&&(n.controlPointNextX=u(n.controlPointNextX,s.left,s.right),n.controlPointNextY=u(n.controlPointNextY,s.top,s.bottom)))},draw:function(){var t,e=this.chart,n=this.getMeta(),i=n.data||[],a=e.chartArea,r=e.canvas,o=0,s=i.length;for(this._showLine&&(t=n.dataset._model.clip,H.canvas.clipArea(e.ctx,{left:!1===t.left?0:a.left-t.left,right:!1===t.right?r.width:a.right+t.right,top:!1===t.top?0:a.top-t.top,bottom:!1===t.bottom?r.height:a.bottom+t.bottom}),n.dataset.draw(),H.canvas.unclipArea(e.ctx));o<s;++o)i[o].draw(a)},setHoverStyle:function(t){var e=t._model,n=t._options,i=H.getHoverColor;t.$previousStyle={backgroundColor:e.backgroundColor,borderColor:e.borderColor,borderWidth:e.borderWidth,radius:e.radius},e.backgroundColor=Wt(n.hoverBackgroundColor,i(n.backgroundColor)),e.borderColor=Wt(n.hoverBorderColor,i(n.borderColor)),e.borderWidth=Wt(n.hoverBorderWidth,n.borderWidth),e.radius=Wt(n.hoverRadius,n.radius)}}),Gt=H.options.resolve;N._set("polarArea",{scale:{type:"radialLinear",angleLines:{display:!1},gridLines:{circular:!0},pointLabels:{display:!1},ticks:{beginAtZero:!0}},animation:{animateRotate:!0,animateScale:!0},startAngle:-.5*Math.PI,legendCallback:function(t){var e,n,i,a=document.createElement("ul"),r=t.data,o=r.datasets,s=r.labels;if(a.setAttribute("class",t.id+"-legend"),o.length)for(e=0,n=o[0].data.length;e<n;++e)(i=a.appendChild(document.createElement("li"))).appendChild(document.createElement("span")).style.backgroundColor=o[0].backgroundColor[e],s[e]&&i.appendChild(document.createTextNode(s[e]));return a.outerHTML},legend:{labels:{generateLabels:function(t){var e=t.data;return e.labels.length&&e.datasets.length?e.labels.map((function(n,i){var a=t.getDatasetMeta(0),r=a.controller.getStyle(i);return{text:n,fillStyle:r.backgroundColor,strokeStyle:r.borderColor,lineWidth:r.borderWidth,hidden:isNaN(e.datasets[0].data[i])||a.data[i].hidden,index:i}})):[]}},onClick:function(t,e){var n,i,a,r=e.index,o=this.chart;for(n=0,i=(o.data.datasets||[]).length;n<i;++n)(a=o.getDatasetMeta(n)).data[r].hidden=!a.data[r].hidden;o.update()}},tooltips:{callbacks:{title:function(){return""},label:function(t,e){return e.labels[t.index]+": "+t.yLabel}}}});var Xt=it.extend({dataElementType:kt.Arc,linkScales:H.noop,_dataElementOptions:["backgroundColor","borderColor","borderWidth","borderAlign","hoverBackgroundColor","hoverBorderColor","hoverBorderWidth"],_getIndexScaleId:function(){return this.chart.scale.id},_getValueScaleId:function(){return this.chart.scale.id},update:function(t){var e,n,i,a=this,r=a.getDataset(),o=a.getMeta(),s=a.chart.options.startAngle||0,l=a._starts=[],u=a._angles=[],d=o.data;for(a._updateRadius(),o.count=a.countVisibleElements(),e=0,n=r.data.length;e<n;e++)l[e]=s,i=a._computeAngle(e),u[e]=i,s+=i;for(e=0,n=d.length;e<n;++e)d[e]._options=a._resolveDataElementOptions(d[e],e),a.updateElement(d[e],e,t)},_updateRadius:function(){var t=this,e=t.chart,n=e.chartArea,i=e.options,a=Math.min(n.right-n.left,n.bottom-n.top);e.outerRadius=Math.max(a/2,0),e.innerRadius=Math.max(i.cutoutPercentage?e.outerRadius/100*i.cutoutPercentage:1,0),e.radiusLength=(e.outerRadius-e.innerRadius)/e.getVisibleDatasetCount(),t.outerRadius=e.outerRadius-e.radiusLength*t.index,t.innerRadius=t.outerRadius-e.radiusLength},updateElement:function(t,e,n){var i=this,a=i.chart,r=i.getDataset(),o=a.options,s=o.animation,l=a.scale,u=a.data.labels,d=l.xCenter,h=l.yCenter,c=o.startAngle,f=t.hidden?0:l.getDistanceFromCenterForValue(r.data[e]),g=i._starts[e],p=g+(t.hidden?0:i._angles[e]),m=s.animateScale?0:l.getDistanceFromCenterForValue(r.data[e]),v=t._options||{};H.extend(t,{_datasetIndex:i.index,_index:e,_scale:l,_model:{backgroundColor:v.backgroundColor,borderColor:v.borderColor,borderWidth:v.borderWidth,borderAlign:v.borderAlign,x:d,y:h,innerRadius:0,outerRadius:n?m:f,startAngle:n&&s.animateRotate?c:g,endAngle:n&&s.animateRotate?c:p,label:H.valueAtIndexOrDefault(u,e,u[e])}}),t.pivot()},countVisibleElements:function(){var t=this.getDataset(),e=this.getMeta(),n=0;return H.each(e.data,(function(e,i){isNaN(t.data[i])||e.hidden||n++})),n},setHoverStyle:function(t){var e=t._model,n=t._options,i=H.getHoverColor,a=H.valueOrDefault;t.$previousStyle={backgroundColor:e.backgroundColor,borderColor:e.borderColor,borderWidth:e.borderWidth},e.backgroundColor=a(n.hoverBackgroundColor,i(n.backgroundColor)),e.borderColor=a(n.hoverBorderColor,i(n.borderColor)),e.borderWidth=a(n.hoverBorderWidth,n.borderWidth)},_computeAngle:function(t){var e=this,n=this.getMeta().count,i=e.getDataset(),a=e.getMeta();if(isNaN(i.data[t])||a.data[t].hidden)return 0;var r={chart:e.chart,dataIndex:t,dataset:i,datasetIndex:e.index};return Gt([e.chart.options.elements.arc.angle,2*Math.PI/n],r,t)}});N._set("pie",H.clone(N.doughnut)),N._set("pie",{cutoutPercentage:0});var Kt=Bt,Zt=H.valueOrDefault;N._set("radar",{spanGaps:!1,scale:{type:"radialLinear"},elements:{line:{fill:"start",tension:0}}});var $t=it.extend({datasetElementType:kt.Line,dataElementType:kt.Point,linkScales:H.noop,_datasetElementOptions:["backgroundColor","borderWidth","borderColor","borderCapStyle","borderDash","borderDashOffset","borderJoinStyle","fill"],_dataElementOptions:{backgroundColor:"pointBackgroundColor",borderColor:"pointBorderColor",borderWidth:"pointBorderWidth",hitRadius:"pointHitRadius",hoverBackgroundColor:"pointHoverBackgroundColor",hoverBorderColor:"pointHoverBorderColor",hoverBorderWidth:"pointHoverBorderWidth",hoverRadius:"pointHoverRadius",pointStyle:"pointStyle",radius:"pointRadius",rotation:"pointRotation"},_getIndexScaleId:function(){return this.chart.scale.id},_getValueScaleId:function(){return this.chart.scale.id},update:function(t){var e,n,i=this,a=i.getMeta(),r=a.dataset,o=a.data||[],s=i.chart.scale,l=i._config;for(void 0!==l.tension&&void 0===l.lineTension&&(l.lineTension=l.tension),r._scale=s,r._datasetIndex=i.index,r._children=o,r._loop=!0,r._model=i._resolveDatasetElementOptions(r),r.pivot(),e=0,n=o.length;e<n;++e)i.updateElement(o[e],e,t);for(i.updateBezierControlPoints(),e=0,n=o.length;e<n;++e)o[e].pivot()},updateElement:function(t,e,n){var i=this,a=t.custom||{},r=i.getDataset(),o=i.chart.scale,s=o.getPointPositionForValue(e,r.data[e]),l=i._resolveDataElementOptions(t,e),u=i.getMeta().dataset._model,d=n?o.xCenter:s.x,h=n?o.yCenter:s.y;t._scale=o,t._options=l,t._datasetIndex=i.index,t._index=e,t._model={x:d,y:h,skip:a.skip||isNaN(d)||isNaN(h),radius:l.radius,pointStyle:l.pointStyle,rotation:l.rotation,backgroundColor:l.backgroundColor,borderColor:l.borderColor,borderWidth:l.borderWidth,tension:Zt(a.tension,u?u.tension:0),hitRadius:l.hitRadius}},_resolveDatasetElementOptions:function(){var t=this,e=t._config,n=t.chart.options,i=it.prototype._resolveDatasetElementOptions.apply(t,arguments);return i.spanGaps=Zt(e.spanGaps,n.spanGaps),i.tension=Zt(e.lineTension,n.elements.line.tension),i},updateBezierControlPoints:function(){var t,e,n,i,a=this.getMeta(),r=this.chart.chartArea,o=a.data||[];function s(t,e,n){return Math.max(Math.min(t,n),e)}for(a.dataset._model.spanGaps&&(o=o.filter((function(t){return!t._model.skip}))),t=0,e=o.length;t<e;++t)n=o[t]._model,i=H.splineCurve(H.previousItem(o,t,!0)._model,n,H.nextItem(o,t,!0)._model,n.tension),n.controlPointPreviousX=s(i.previous.x,r.left,r.right),n.controlPointPreviousY=s(i.previous.y,r.top,r.bottom),n.controlPointNextX=s(i.next.x,r.left,r.right),n.controlPointNextY=s(i.next.y,r.top,r.bottom)},setHoverStyle:function(t){var e=t._model,n=t._options,i=H.getHoverColor;t.$previousStyle={backgroundColor:e.backgroundColor,borderColor:e.borderColor,borderWidth:e.borderWidth,radius:e.radius},e.backgroundColor=Zt(n.hoverBackgroundColor,i(n.backgroundColor)),e.borderColor=Zt(n.hoverBorderColor,i(n.borderColor)),e.borderWidth=Zt(n.hoverBorderWidth,n.borderWidth),e.radius=Zt(n.hoverRadius,n.radius)}});N._set("scatter",{hover:{mode:"single"},scales:{xAxes:[{id:"x-axis-1",type:"linear",position:"bottom"}],yAxes:[{id:"y-axis-1",type:"linear",position:"left"}]},tooltips:{callbacks:{title:function(){return""},label:function(t){return"("+t.xLabel+", "+t.yLabel+")"}}}}),N._set("global",{datasets:{scatter:{showLine:!1}}});var Jt={bar:Tt,bubble:Ot,doughnut:Bt,horizontalBar:Et,line:Yt,polarArea:Xt,pie:Kt,radar:$t,scatter:Yt};function Qt(t,e){return t.native?{x:t.x,y:t.y}:H.getRelativePosition(t,e)}function te(t,e){var n,i,a,r,o,s,l=t._getSortedVisibleDatasetMetas();for(i=0,r=l.length;i<r;++i)for(a=0,o=(n=l[i].data).length;a<o;++a)(s=n[a])._view.skip||e(s)}function ee(t,e){var n=[];return te(t,(function(t){t.inRange(e.x,e.y)&&n.push(t)})),n}function ne(t,e,n,i){var a=Number.POSITIVE_INFINITY,r=[];return te(t,(function(t){if(!n||t.inRange(e.x,e.y)){var o=t.getCenterPoint(),s=i(e,o);s<a?(r=[t],a=s):s===a&&r.push(t)}})),r}function ie(t){var e=-1!==t.indexOf("x"),n=-1!==t.indexOf("y");return function(t,i){var a=e?Math.abs(t.x-i.x):0,r=n?Math.abs(t.y-i.y):0;return Math.sqrt(Math.pow(a,2)+Math.pow(r,2))}}function ae(t,e,n){var i=Qt(e,t);n.axis=n.axis||"x";var a=ie(n.axis),r=n.intersect?ee(t,i):ne(t,i,!1,a),o=[];return r.length?(t._getSortedVisibleDatasetMetas().forEach((function(t){var e=t.data[r[0]._index];e&&!e._view.skip&&o.push(e)})),o):[]}var re={modes:{single:function(t,e){var n=Qt(e,t),i=[];return te(t,(function(t){if(t.inRange(n.x,n.y))return i.push(t),i})),i.slice(0,1)},label:ae,index:ae,dataset:function(t,e,n){var i=Qt(e,t);n.axis=n.axis||"xy";var a=ie(n.axis),r=n.intersect?ee(t,i):ne(t,i,!1,a);return r.length>0&&(r=t.getDatasetMeta(r[0]._datasetIndex).data),r},"x-axis":function(t,e){return ae(t,e,{intersect:!1})},point:function(t,e){return ee(t,Qt(e,t))},nearest:function(t,e,n){var i=Qt(e,t);n.axis=n.axis||"xy";var a=ie(n.axis);return ne(t,i,n.intersect,a)},x:function(t,e,n){var i=Qt(e,t),a=[],r=!1;return te(t,(function(t){t.inXRange(i.x)&&a.push(t),t.inRange(i.x,i.y)&&(r=!0)})),n.intersect&&!r&&(a=[]),a},y:function(t,e,n){var i=Qt(e,t),a=[],r=!1;return te(t,(function(t){t.inYRange(i.y)&&a.push(t),t.inRange(i.x,i.y)&&(r=!0)})),n.intersect&&!r&&(a=[]),a}}},oe=H.extend;function se(t,e){return H.where(t,(function(t){return t.pos===e}))}function le(t,e){return t.sort((function(t,n){var i=e?n:t,a=e?t:n;return i.weight===a.weight?i.index-a.index:i.weight-a.weight}))}function ue(t,e,n,i){return Math.max(t[n],e[n])+Math.max(t[i],e[i])}function de(t,e,n){var i,a,r=n.box,o=t.maxPadding;if(n.size&&(t[n.pos]-=n.size),n.size=n.horizontal?r.height:r.width,t[n.pos]+=n.size,r.getPadding){var s=r.getPadding();o.top=Math.max(o.top,s.top),o.left=Math.max(o.left,s.left),o.bottom=Math.max(o.bottom,s.bottom),o.right=Math.max(o.right,s.right)}if(i=e.outerWidth-ue(o,t,"left","right"),a=e.outerHeight-ue(o,t,"top","bottom"),i!==t.w||a!==t.h){t.w=i,t.h=a;var l=n.horizontal?[i,t.w]:[a,t.h];return!(l[0]===l[1]||isNaN(l[0])&&isNaN(l[1]))}}function he(t,e){var n=e.maxPadding;function i(t){var i={left:0,top:0,right:0,bottom:0};return t.forEach((function(t){i[t]=Math.max(e[t],n[t])})),i}return i(t?["left","right"]:["top","bottom"])}function ce(t,e,n){var i,a,r,o,s,l,u=[];for(i=0,a=t.length;i<a;++i)(o=(r=t[i]).box).update(r.width||e.w,r.height||e.h,he(r.horizontal,e)),de(e,n,r)&&(l=!0,u.length&&(s=!0)),o.fullWidth||u.push(r);return s&&ce(u,e,n)||l}function fe(t,e,n){var i,a,r,o,s=n.padding,l=e.x,u=e.y;for(i=0,a=t.length;i<a;++i)o=(r=t[i]).box,r.horizontal?(o.left=o.fullWidth?s.left:e.left,o.right=o.fullWidth?n.outerWidth-s.right:e.left+e.w,o.top=u,o.bottom=u+o.height,o.width=o.right-o.left,u=o.bottom):(o.left=l,o.right=l+o.width,o.top=e.top,o.bottom=e.top+e.h,o.height=o.bottom-o.top,l=o.right);e.x=l,e.y=u}N._set("global",{layout:{padding:{top:0,right:0,bottom:0,left:0}}});var ge,pe={defaults:{},addBox:function(t,e){t.boxes||(t.boxes=[]),e.fullWidth=e.fullWidth||!1,e.position=e.position||"top",e.weight=e.weight||0,e._layers=e._layers||function(){return[{z:0,draw:function(){e.draw.apply(e,arguments)}}]},t.boxes.push(e)},removeBox:function(t,e){var n=t.boxes?t.boxes.indexOf(e):-1;-1!==n&&t.boxes.splice(n,1)},configure:function(t,e,n){for(var i,a=["fullWidth","position","weight"],r=a.length,o=0;o<r;++o)i=a[o],n.hasOwnProperty(i)&&(e[i]=n[i])},update:function(t,e,n){if(t){var i=t.options.layout||{},a=H.options.toPadding(i.padding),r=e-a.width,o=n-a.height,s=function(t){var e=function(t){var e,n,i,a=[];for(e=0,n=(t||[]).length;e<n;++e)i=t[e],a.push({index:e,box:i,pos:i.position,horizontal:i.isHorizontal(),weight:i.weight});return a}(t),n=le(se(e,"left"),!0),i=le(se(e,"right")),a=le(se(e,"top"),!0),r=le(se(e,"bottom"));return{leftAndTop:n.concat(a),rightAndBottom:i.concat(r),chartArea:se(e,"chartArea"),vertical:n.concat(i),horizontal:a.concat(r)}}(t.boxes),l=s.vertical,u=s.horizontal,d=Object.freeze({outerWidth:e,outerHeight:n,padding:a,availableWidth:r,vBoxMaxWidth:r/2/l.length,hBoxMaxHeight:o/2}),h=oe({maxPadding:oe({},a),w:r,h:o,x:a.left,y:a.top},a);!function(t,e){var n,i,a;for(n=0,i=t.length;n<i;++n)(a=t[n]).width=a.horizontal?a.box.fullWidth&&e.availableWidth:e.vBoxMaxWidth,a.height=a.horizontal&&e.hBoxMaxHeight}(l.concat(u),d),ce(l,h,d),ce(u,h,d)&&ce(l,h,d),function(t){var e=t.maxPadding;function n(n){var i=Math.max(e[n]-t[n],0);return t[n]+=i,i}t.y+=n("top"),t.x+=n("left"),n("right"),n("bottom")}(h),fe(s.leftAndTop,h,d),h.x+=h.w,h.y+=h.h,fe(s.rightAndBottom,h,d),t.chartArea={left:h.left,top:h.top,right:h.left+h.w,bottom:h.top+h.h},H.each(s.chartArea,(function(e){var n=e.box;oe(n,t.chartArea),n.update(h.w,h.h)}))}}},me=(ge=Object.freeze({__proto__:null,default:"@keyframes chartjs-render-animation{from{opacity:.99}to{opacity:1}}.chartjs-render-monitor{animation:chartjs-render-animation 1ms}.chartjs-size-monitor,.chartjs-size-monitor-expand,.chartjs-size-monitor-shrink{position:absolute;direction:ltr;left:0;top:0;right:0;bottom:0;overflow:hidden;pointer-events:none;visibility:hidden;z-index:-1}.chartjs-size-monitor-expand>div{position:absolute;width:1000000px;height:1000000px;left:0;top:0}.chartjs-size-monitor-shrink>div{position:absolute;width:200%;height:200%;left:0;top:0}"}))&&ge.default||ge,ve="$chartjs",be="chartjs-size-monitor",xe="chartjs-render-monitor",ye="chartjs-render-animation",_e=["animationstart","webkitAnimationStart"],ke={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"};function we(t,e){var n=H.getStyle(t,e),i=n&&n.match(/^(\d+)(\.\d+)?px$/);return i?Number(i[1]):void 0}var Me=!!function(){var t=!1;try{var e=Object.defineProperty({},"passive",{get:function(){t=!0}});window.addEventListener("e",null,e)}catch(t){}return t}()&&{passive:!0};function Se(t,e,n){t.addEventListener(e,n,Me)}function Ce(t,e,n){t.removeEventListener(e,n,Me)}function Pe(t,e,n,i,a){return{type:t,chart:e,native:a||null,x:void 0!==n?n:null,y:void 0!==i?i:null}}function Ae(t){var e=document.createElement("div");return e.className=t||"",e}function De(t,e,n){var i,a,r,o,s=t[ve]||(t[ve]={}),l=s.resizer=function(t){var e=Ae(be),n=Ae(be+"-expand"),i=Ae(be+"-shrink");n.appendChild(Ae()),i.appendChild(Ae()),e.appendChild(n),e.appendChild(i),e._reset=function(){n.scrollLeft=1e6,n.scrollTop=1e6,i.scrollLeft=1e6,i.scrollTop=1e6};var a=function(){e._reset(),t()};return Se(n,"scroll",a.bind(n,"expand")),Se(i,"scroll",a.bind(i,"shrink")),e}((i=function(){if(s.resizer){var i=n.options.maintainAspectRatio&&t.parentNode,a=i?i.clientWidth:0;e(Pe("resize",n)),i&&i.clientWidth<a&&n.canvas&&e(Pe("resize",n))}},r=!1,o=[],function(){o=Array.prototype.slice.call(arguments),a=a||this,r||(r=!0,H.requestAnimFrame.call(window,(function(){r=!1,i.apply(a,o)})))}));!function(t,e){var n=t[ve]||(t[ve]={}),i=n.renderProxy=function(t){t.animationName===ye&&e()};H.each(_e,(function(e){Se(t,e,i)})),n.reflow=!!t.offsetParent,t.classList.add(xe)}(t,(function(){if(s.resizer){var e=t.parentNode;e&&e!==l.parentNode&&e.insertBefore(l,e.firstChild),l._reset()}}))}function Te(t){var e=t[ve]||{},n=e.resizer;delete e.resizer,function(t){var e=t[ve]||{},n=e.renderProxy;n&&(H.each(_e,(function(e){Ce(t,e,n)})),delete e.renderProxy),t.classList.remove(xe)}(t),n&&n.parentNode&&n.parentNode.removeChild(n)}var Ie={disableCSSInjection:!1,_enabled:"undefined"!=typeof window&&"undefined"!=typeof document,_ensureLoaded:function(t){if(!this.disableCSSInjection){var e=t.getRootNode?t.getRootNode():document;!function(t,e){var n=t[ve]||(t[ve]={});if(!n.containsStyles){n.containsStyles=!0,e="/* Chart.js */\n"+e;var i=document.createElement("style");i.setAttribute("type","text/css"),i.appendChild(document.createTextNode(e)),t.appendChild(i)}}(e.host?e:document.head,me)}},acquireContext:function(t,e){"string"==typeof t?t=document.getElementById(t):t.length&&(t=t[0]),t&&t.canvas&&(t=t.canvas);var n=t&&t.getContext&&t.getContext("2d");return n&&n.canvas===t?(this._ensureLoaded(t),function(t,e){var n=t.style,i=t.getAttribute("height"),a=t.getAttribute("width");if(t[ve]={initial:{height:i,width:a,style:{display:n.display,height:n.height,width:n.width}}},n.display=n.display||"block",null===a||""===a){var r=we(t,"width");void 0!==r&&(t.width=r)}if(null===i||""===i)if(""===t.style.height)t.height=t.width/(e.options.aspectRatio||2);else{var o=we(t,"height");void 0!==r&&(t.height=o)}}(t,e),n):null},releaseContext:function(t){var e=t.canvas;if(e[ve]){var n=e[ve].initial;["height","width"].forEach((function(t){var i=n[t];H.isNullOrUndef(i)?e.removeAttribute(t):e.setAttribute(t,i)})),H.each(n.style||{},(function(t,n){e.style[n]=t})),e.width=e.width,delete e[ve]}},addEventListener:function(t,e,n){var i=t.canvas;if("resize"!==e){var a=n[ve]||(n[ve]={});Se(i,e,(a.proxies||(a.proxies={}))[t.id+"_"+e]=function(e){n(function(t,e){var n=ke[t.type]||t.type,i=H.getRelativePosition(t,e);return Pe(n,e,i.x,i.y,t)}(e,t))})}else De(i,n,t)},removeEventListener:function(t,e,n){var i=t.canvas;if("resize"!==e){var a=((n[ve]||{}).proxies||{})[t.id+"_"+e];a&&Ce(i,e,a)}else Te(i)}};H.addEvent=Se,H.removeEvent=Ce;var Fe=Ie._enabled?Ie:{acquireContext:function(t){return t&&t.canvas&&(t=t.canvas),t&&t.getContext("2d")||null}},Oe=H.extend({initialize:function(){},acquireContext:function(){},releaseContext:function(){},addEventListener:function(){},removeEventListener:function(){}},Fe);N._set("global",{plugins:{}});var Le={_plugins:[],_cacheId:0,register:function(t){var e=this._plugins;[].concat(t).forEach((function(t){-1===e.indexOf(t)&&e.push(t)})),this._cacheId++},unregister:function(t){var e=this._plugins;[].concat(t).forEach((function(t){var n=e.indexOf(t);-1!==n&&e.splice(n,1)})),this._cacheId++},clear:function(){this._plugins=[],this._cacheId++},count:function(){return this._plugins.length},getAll:function(){return this._plugins},notify:function(t,e,n){var i,a,r,o,s,l=this.descriptors(t),u=l.length;for(i=0;i<u;++i)if("function"==typeof(s=(r=(a=l[i]).plugin)[e])&&((o=[t].concat(n||[])).push(a.options),!1===s.apply(r,o)))return!1;return!0},descriptors:function(t){var e=t.$plugins||(t.$plugins={});if(e.id===this._cacheId)return e.descriptors;var n=[],i=[],a=t&&t.config||{},r=a.options&&a.options.plugins||{};return this._plugins.concat(a.plugins||[]).forEach((function(t){if(-1===n.indexOf(t)){var e=t.id,a=r[e];!1!==a&&(!0===a&&(a=H.clone(N.global.plugins[e])),n.push(t),i.push({plugin:t,options:a||{}}))}})),e.descriptors=i,e.id=this._cacheId,i},_invalidate:function(t){delete t.$plugins}},Re={constructors:{},defaults:{},registerScaleType:function(t,e,n){this.constructors[t]=e,this.defaults[t]=H.clone(n)},getScaleConstructor:function(t){return this.constructors.hasOwnProperty(t)?this.constructors[t]:void 0},getScaleDefaults:function(t){return this.defaults.hasOwnProperty(t)?H.merge(Object.create(null),[N.scale,this.defaults[t]]):{}},updateScaleDefaults:function(t,e){this.defaults.hasOwnProperty(t)&&(this.defaults[t]=H.extend(this.defaults[t],e))},addScalesToLayout:function(t){H.each(t.scales,(function(e){e.fullWidth=e.options.fullWidth,e.position=e.options.position,e.weight=e.options.weight,pe.addBox(t,e)}))}},ze=H.valueOrDefault,Ne=H.rtl.getRtlAdapter;N._set("global",{tooltips:{enabled:!0,custom:null,mode:"nearest",position:"average",intersect:!0,backgroundColor:"rgba(0,0,0,0.8)",titleFontStyle:"bold",titleSpacing:2,titleMarginBottom:6,titleFontColor:"#fff",titleAlign:"left",bodySpacing:2,bodyFontColor:"#fff",bodyAlign:"left",footerFontStyle:"bold",footerSpacing:2,footerMarginTop:6,footerFontColor:"#fff",footerAlign:"left",yPadding:6,xPadding:6,caretPadding:2,caretSize:5,cornerRadius:6,multiKeyBackground:"#fff",displayColors:!0,borderColor:"rgba(0,0,0,0)",borderWidth:0,callbacks:{beforeTitle:H.noop,title:function(t,e){var n="",i=e.labels,a=i?i.length:0;if(t.length>0){var r=t[0];r.label?n=r.label:r.xLabel?n=r.xLabel:a>0&&r.index<a&&(n=i[r.index])}return n},afterTitle:H.noop,beforeBody:H.noop,beforeLabel:H.noop,label:function(t,e){var n=e.datasets[t.datasetIndex].label||"";return n&&(n+=": "),H.isNullOrUndef(t.value)?n+=t.yLabel:n+=t.value,n},labelColor:function(t,e){var n=e.getDatasetMeta(t.datasetIndex).data[t.index]._view;return{borderColor:n.borderColor,backgroundColor:n.backgroundColor}},labelTextColor:function(){return this._options.bodyFontColor},afterLabel:H.noop,afterBody:H.noop,beforeFooter:H.noop,footer:H.noop,afterFooter:H.noop}}});var Be={average:function(t){if(!t.length)return!1;var e,n,i=0,a=0,r=0;for(e=0,n=t.length;e<n;++e){var o=t[e];if(o&&o.hasValue()){var s=o.tooltipPosition();i+=s.x,a+=s.y,++r}}return{x:i/r,y:a/r}},nearest:function(t,e){var n,i,a,r=e.x,o=e.y,s=Number.POSITIVE_INFINITY;for(n=0,i=t.length;n<i;++n){var l=t[n];if(l&&l.hasValue()){var u=l.getCenterPoint(),d=H.distanceBetweenPoints(e,u);d<s&&(s=d,a=l)}}if(a){var h=a.tooltipPosition();r=h.x,o=h.y}return{x:r,y:o}}};function Ee(t,e){return e&&(H.isArray(e)?Array.prototype.push.apply(t,e):t.push(e)),t}function We(t){return("string"==typeof t||t instanceof String)&&t.indexOf("\n")>-1?t.split("\n"):t}function Ve(t){var e=N.global;return{xPadding:t.xPadding,yPadding:t.yPadding,xAlign:t.xAlign,yAlign:t.yAlign,rtl:t.rtl,textDirection:t.textDirection,bodyFontColor:t.bodyFontColor,_bodyFontFamily:ze(t.bodyFontFamily,e.defaultFontFamily),_bodyFontStyle:ze(t.bodyFontStyle,e.defaultFontStyle),_bodyAlign:t.bodyAlign,bodyFontSize:ze(t.bodyFontSize,e.defaultFontSize),bodySpacing:t.bodySpacing,titleFontColor:t.titleFontColor,_titleFontFamily:ze(t.titleFontFamily,e.defaultFontFamily),_titleFontStyle:ze(t.titleFontStyle,e.defaultFontStyle),titleFontSize:ze(t.titleFontSize,e.defaultFontSize),_titleAlign:t.titleAlign,titleSpacing:t.titleSpacing,titleMarginBottom:t.titleMarginBottom,footerFontColor:t.footerFontColor,_footerFontFamily:ze(t.footerFontFamily,e.defaultFontFamily),_footerFontStyle:ze(t.footerFontStyle,e.defaultFontStyle),footerFontSize:ze(t.footerFontSize,e.defaultFontSize),_footerAlign:t.footerAlign,footerSpacing:t.footerSpacing,footerMarginTop:t.footerMarginTop,caretSize:t.caretSize,cornerRadius:t.cornerRadius,backgroundColor:t.backgroundColor,opacity:0,legendColorBackground:t.multiKeyBackground,displayColors:t.displayColors,borderColor:t.borderColor,borderWidth:t.borderWidth}}function He(t,e){return"center"===e?t.x+t.width/2:"right"===e?t.x+t.width-t.xPadding:t.x+t.xPadding}function je(t){return Ee([],We(t))}var qe=K.extend({initialize:function(){this._model=Ve(this._options),this._lastActive=[]},getTitle:function(){var t=this,e=t._options,n=e.callbacks,i=n.beforeTitle.apply(t,arguments),a=n.title.apply(t,arguments),r=n.afterTitle.apply(t,arguments),o=[];return o=Ee(o,We(i)),o=Ee(o,We(a)),o=Ee(o,We(r))},getBeforeBody:function(){return je(this._options.callbacks.beforeBody.apply(this,arguments))},getBody:function(t,e){var n=this,i=n._options.callbacks,a=[];return H.each(t,(function(t){var r={before:[],lines:[],after:[]};Ee(r.before,We(i.beforeLabel.call(n,t,e))),Ee(r.lines,i.label.call(n,t,e)),Ee(r.after,We(i.afterLabel.call(n,t,e))),a.push(r)})),a},getAfterBody:function(){return je(this._options.callbacks.afterBody.apply(this,arguments))},getFooter:function(){var t=this,e=t._options.callbacks,n=e.beforeFooter.apply(t,arguments),i=e.footer.apply(t,arguments),a=e.afterFooter.apply(t,arguments),r=[];return r=Ee(r,We(n)),r=Ee(r,We(i)),r=Ee(r,We(a))},update:function(t){var e,n,i,a,r,o,s,l,u,d,h=this,c=h._options,f=h._model,g=h._model=Ve(c),p=h._active,m=h._data,v={xAlign:f.xAlign,yAlign:f.yAlign},b={x:f.x,y:f.y},x={width:f.width,height:f.height},y={x:f.caretX,y:f.caretY};if(p.length){g.opacity=1;var _=[],k=[];y=Be[c.position].call(h,p,h._eventPosition);var w=[];for(e=0,n=p.length;e<n;++e)w.push((i=p[e],a=void 0,r=void 0,o=void 0,s=void 0,l=void 0,u=void 0,d=void 0,a=i._xScale,r=i._yScale||i._scale,o=i._index,s=i._datasetIndex,l=i._chart.getDatasetMeta(s).controller,u=l._getIndexScale(),d=l._getValueScale(),{xLabel:a?a.getLabelForIndex(o,s):"",yLabel:r?r.getLabelForIndex(o,s):"",label:u?""+u.getLabelForIndex(o,s):"",value:d?""+d.getLabelForIndex(o,s):"",index:o,datasetIndex:s,x:i._model.x,y:i._model.y}));c.filter&&(w=w.filter((function(t){return c.filter(t,m)}))),c.itemSort&&(w=w.sort((function(t,e){return c.itemSort(t,e,m)}))),H.each(w,(function(t){_.push(c.callbacks.labelColor.call(h,t,h._chart)),k.push(c.callbacks.labelTextColor.call(h,t,h._chart))})),g.title=h.getTitle(w,m),g.beforeBody=h.getBeforeBody(w,m),g.body=h.getBody(w,m),g.afterBody=h.getAfterBody(w,m),g.footer=h.getFooter(w,m),g.x=y.x,g.y=y.y,g.caretPadding=c.caretPadding,g.labelColors=_,g.labelTextColors=k,g.dataPoints=w,x=function(t,e){var n=t._chart.ctx,i=2*e.yPadding,a=0,r=e.body,o=r.reduce((function(t,e){return t+e.before.length+e.lines.length+e.after.length}),0);o+=e.beforeBody.length+e.afterBody.length;var s=e.title.length,l=e.footer.length,u=e.titleFontSize,d=e.bodyFontSize,h=e.footerFontSize;i+=s*u,i+=s?(s-1)*e.titleSpacing:0,i+=s?e.titleMarginBottom:0,i+=o*d,i+=o?(o-1)*e.bodySpacing:0,i+=l?e.footerMarginTop:0,i+=l*h,i+=l?(l-1)*e.footerSpacing:0;var c=0,f=function(t){a=Math.max(a,n.measureText(t).width+c)};return n.font=H.fontString(u,e._titleFontStyle,e._titleFontFamily),H.each(e.title,f),n.font=H.fontString(d,e._bodyFontStyle,e._bodyFontFamily),H.each(e.beforeBody.concat(e.afterBody),f),c=e.displayColors?d+2:0,H.each(r,(function(t){H.each(t.before,f),H.each(t.lines,f),H.each(t.after,f)})),c=0,n.font=H.fontString(h,e._footerFontStyle,e._footerFontFamily),H.each(e.footer,f),{width:a+=2*e.xPadding,height:i}}(this,g),b=function(t,e,n,i){var a=t.x,r=t.y,o=t.caretSize,s=t.caretPadding,l=t.cornerRadius,u=n.xAlign,d=n.yAlign,h=o+s,c=l+s;return"right"===u?a-=e.width:"center"===u&&((a-=e.width/2)+e.width>i.width&&(a=i.width-e.width),a<0&&(a=0)),"top"===d?r+=h:r-="bottom"===d?e.height+h:e.height/2,"center"===d?"left"===u?a+=h:"right"===u&&(a-=h):"left"===u?a-=c:"right"===u&&(a+=c),{x:a,y:r}}(g,x,v=function(t,e){var n,i,a,r,o,s=t._model,l=t._chart,u=t._chart.chartArea,d="center",h="center";s.y<e.height?h="top":s.y>l.height-e.height&&(h="bottom");var c=(u.left+u.right)/2,f=(u.top+u.bottom)/2;"center"===h?(n=function(t){return t<=c},i=function(t){return t>c}):(n=function(t){return t<=e.width/2},i=function(t){return t>=l.width-e.width/2}),a=function(t){return t+e.width+s.caretSize+s.caretPadding>l.width},r=function(t){return t-e.width-s.caretSize-s.caretPadding<0},o=function(t){return t<=f?"top":"bottom"},n(s.x)?(d="left",a(s.x)&&(d="center",h=o(s.y))):i(s.x)&&(d="right",r(s.x)&&(d="center",h=o(s.y)));var g=t._options;return{xAlign:g.xAlign?g.xAlign:d,yAlign:g.yAlign?g.yAlign:h}}(this,x),h._chart)}else g.opacity=0;return g.xAlign=v.xAlign,g.yAlign=v.yAlign,g.x=b.x,g.y=b.y,g.width=x.width,g.height=x.height,g.caretX=y.x,g.caretY=y.y,h._model=g,t&&c.custom&&c.custom.call(h,g),h},drawCaret:function(t,e){var n=this._chart.ctx,i=this._view,a=this.getCaretPosition(t,e,i);n.lineTo(a.x1,a.y1),n.lineTo(a.x2,a.y2),n.lineTo(a.x3,a.y3)},getCaretPosition:function(t,e,n){var i,a,r,o,s,l,u=n.caretSize,d=n.cornerRadius,h=n.xAlign,c=n.yAlign,f=t.x,g=t.y,p=e.width,m=e.height;if("center"===c)s=g+m/2,"left"===h?(a=(i=f)-u,r=i,o=s+u,l=s-u):(a=(i=f+p)+u,r=i,o=s-u,l=s+u);else if("left"===h?(i=(a=f+d+u)-u,r=a+u):"right"===h?(i=(a=f+p-d-u)-u,r=a+u):(i=(a=n.caretX)-u,r=a+u),"top"===c)s=(o=g)-u,l=o;else{s=(o=g+m)+u,l=o;var v=r;r=i,i=v}return{x1:i,x2:a,x3:r,y1:o,y2:s,y3:l}},drawTitle:function(t,e,n){var i,a,r,o=e.title,s=o.length;if(s){var l=Ne(e.rtl,e.x,e.width);for(t.x=He(e,e._titleAlign),n.textAlign=l.textAlign(e._titleAlign),n.textBaseline="middle",i=e.titleFontSize,a=e.titleSpacing,n.fillStyle=e.titleFontColor,n.font=H.fontString(i,e._titleFontStyle,e._titleFontFamily),r=0;r<s;++r)n.fillText(o[r],l.x(t.x),t.y+i/2),t.y+=i+a,r+1===s&&(t.y+=e.titleMarginBottom-a)}},drawBody:function(t,e,n){var i,a,r,o,s,l,u,d,h=e.bodyFontSize,c=e.bodySpacing,f=e._bodyAlign,g=e.body,p=e.displayColors,m=0,v=p?He(e,"left"):0,b=Ne(e.rtl,e.x,e.width),x=function(e){n.fillText(e,b.x(t.x+m),t.y+h/2),t.y+=h+c},y=b.textAlign(f);for(n.textAlign=f,n.textBaseline="middle",n.font=H.fontString(h,e._bodyFontStyle,e._bodyFontFamily),t.x=He(e,y),n.fillStyle=e.bodyFontColor,H.each(e.beforeBody,x),m=p&&"right"!==y?"center"===f?h/2+1:h+2:0,s=0,u=g.length;s<u;++s){for(i=g[s],a=e.labelTextColors[s],r=e.labelColors[s],n.fillStyle=a,H.each(i.before,x),l=0,d=(o=i.lines).length;l<d;++l){if(p){var _=b.x(v);n.fillStyle=e.legendColorBackground,n.fillRect(b.leftForLtr(_,h),t.y,h,h),n.lineWidth=1,n.strokeStyle=r.borderColor,n.strokeRect(b.leftForLtr(_,h),t.y,h,h),n.fillStyle=r.backgroundColor,n.fillRect(b.leftForLtr(b.xPlus(_,1),h-2),t.y+1,h-2,h-2),n.fillStyle=a}x(o[l])}H.each(i.after,x)}m=0,H.each(e.afterBody,x),t.y-=c},drawFooter:function(t,e,n){var i,a,r=e.footer,o=r.length;if(o){var s=Ne(e.rtl,e.x,e.width);for(t.x=He(e,e._footerAlign),t.y+=e.footerMarginTop,n.textAlign=s.textAlign(e._footerAlign),n.textBaseline="middle",i=e.footerFontSize,n.fillStyle=e.footerFontColor,n.font=H.fontString(i,e._footerFontStyle,e._footerFontFamily),a=0;a<o;++a)n.fillText(r[a],s.x(t.x),t.y+i/2),t.y+=i+e.footerSpacing}},drawBackground:function(t,e,n,i){n.fillStyle=e.backgroundColor,n.strokeStyle=e.borderColor,n.lineWidth=e.borderWidth;var a=e.xAlign,r=e.yAlign,o=t.x,s=t.y,l=i.width,u=i.height,d=e.cornerRadius;n.beginPath(),n.moveTo(o+d,s),"top"===r&&this.drawCaret(t,i),n.lineTo(o+l-d,s),n.quadraticCurveTo(o+l,s,o+l,s+d),"center"===r&&"right"===a&&this.drawCaret(t,i),n.lineTo(o+l,s+u-d),n.quadraticCurveTo(o+l,s+u,o+l-d,s+u),"bottom"===r&&this.drawCaret(t,i),n.lineTo(o+d,s+u),n.quadraticCurveTo(o,s+u,o,s+u-d),"center"===r&&"left"===a&&this.drawCaret(t,i),n.lineTo(o,s+d),n.quadraticCurveTo(o,s,o+d,s),n.closePath(),n.fill(),e.borderWidth>0&&n.stroke()},draw:function(){var t=this._chart.ctx,e=this._view;if(0!==e.opacity){var n={width:e.width,height:e.height},i={x:e.x,y:e.y},a=Math.abs(e.opacity<.001)?0:e.opacity,r=e.title.length||e.beforeBody.length||e.body.length||e.afterBody.length||e.footer.length;this._options.enabled&&r&&(t.save(),t.globalAlpha=a,this.drawBackground(i,e,t,n),i.y+=e.yPadding,H.rtl.overrideTextDirection(t,e.textDirection),this.drawTitle(i,e,t),this.drawBody(i,e,t),this.drawFooter(i,e,t),H.rtl.restoreTextDirection(t,e.textDirection),t.restore())}},handleEvent:function(t){var e,n=this,i=n._options;return n._lastActive=n._lastActive||[],"mouseout"===t.type?n._active=[]:(n._active=n._chart.getElementsAtEventForMode(t,i.mode,i),i.reverse&&n._active.reverse()),(e=!H.arrayEquals(n._active,n._lastActive))&&(n._lastActive=n._active,(i.enabled||i.custom)&&(n._eventPosition={x:t.x,y:t.y},n.update(!0),n.pivot())),e}}),Ue=Be,Ye=qe;Ye.positioners=Ue;var Ge=H.valueOrDefault;function Xe(){return H.merge(Object.create(null),[].slice.call(arguments),{merger:function(t,e,n,i){if("xAxes"===t||"yAxes"===t){var a,r,o,s=n[t].length;for(e[t]||(e[t]=[]),a=0;a<s;++a)o=n[t][a],r=Ge(o.type,"xAxes"===t?"category":"linear"),a>=e[t].length&&e[t].push({}),!e[t][a].type||o.type&&o.type!==e[t][a].type?H.merge(e[t][a],[Re.getScaleDefaults(r),o]):H.merge(e[t][a],o)}else H._merger(t,e,n,i)}})}function Ke(){return H.merge(Object.create(null),[].slice.call(arguments),{merger:function(t,e,n,i){var a=e[t]||Object.create(null),r=n[t];"scales"===t?e[t]=Xe(a,r):"scale"===t?e[t]=H.merge(a,[Re.getScaleDefaults(r.type),r]):H._merger(t,e,n,i)}})}function Ze(t){var e=t.options;H.each(t.scales,(function(e){pe.removeBox(t,e)})),e=Ke(N.global,N[t.config.type],e),t.options=t.config.options=e,t.ensureScalesHaveIDs(),t.buildOrUpdateScales(),t.tooltip._options=e.tooltips,t.tooltip.initialize()}function $e(t,e,n){var i,a=function(t){return t.id===i};do{i=e+n++}while(H.findIndex(t,a)>=0);return i}function Je(t){return"top"===t||"bottom"===t}function Qe(t,e){return function(n,i){return n[t]===i[t]?n[e]-i[e]:n[t]-i[t]}}N._set("global",{elements:{},events:["mousemove","mouseout","click","touchstart","touchmove"],hover:{onHover:null,mode:"nearest",intersect:!0,animationDuration:400},onClick:null,maintainAspectRatio:!0,responsive:!0,responsiveAnimationDuration:0});var tn=function(t,e){return this.construct(t,e),this};H.extend(tn.prototype,{construct:function(t,e){var n=this;e=function(t){var e=(t=t||Object.create(null)).data=t.data||{};return e.datasets=e.datasets||[],e.labels=e.labels||[],t.options=Ke(N.global,N[t.type],t.options||{}),t}(e);var i=Oe.acquireContext(t,e),a=i&&i.canvas,r=a&&a.height,o=a&&a.width;n.id=H.uid(),n.ctx=i,n.canvas=a,n.config=e,n.width=o,n.height=r,n.aspectRatio=r?o/r:null,n.options=e.options,n._bufferedRender=!1,n._layers=[],n.chart=n,n.controller=n,tn.instances[n.id]=n,Object.defineProperty(n,"data",{get:function(){return n.config.data},set:function(t){n.config.data=t}}),i&&a?(n.initialize(),n.update()):console.error("Failed to create chart: can't acquire context from the given item")},initialize:function(){var t=this;return Le.notify(t,"beforeInit"),H.retinaScale(t,t.options.devicePixelRatio),t.bindEvents(),t.options.responsive&&t.resize(!0),t.initToolTip(),Le.notify(t,"afterInit"),t},clear:function(){return H.canvas.clear(this),this},stop:function(){return J.cancelAnimation(this),this},resize:function(t){var e=this,n=e.options,i=e.canvas,a=n.maintainAspectRatio&&e.aspectRatio||null,r=Math.max(0,Math.floor(H.getMaximumWidth(i))),o=Math.max(0,Math.floor(a?r/a:H.getMaximumHeight(i)));if((e.width!==r||e.height!==o)&&(i.width=e.width=r,i.height=e.height=o,i.style.width=r+"px",i.style.height=o+"px",H.retinaScale(e,n.devicePixelRatio),!t)){var s={width:r,height:o};Le.notify(e,"resize",[s]),n.onResize&&n.onResize(e,s),e.stop(),e.update({duration:n.responsiveAnimationDuration})}},ensureScalesHaveIDs:function(){var t=this.options,e=t.scales||{},n=t.scale;H.each(e.xAxes,(function(t,n){t.id||(t.id=$e(e.xAxes,"x-axis-",n))})),H.each(e.yAxes,(function(t,n){t.id||(t.id=$e(e.yAxes,"y-axis-",n))})),n&&(n.id=n.id||"scale")},buildOrUpdateScales:function(){var t=this,e=t.options,n=t.scales||{},i=[],a=Object.keys(n).reduce((function(t,e){return t[e]=!1,t}),{});e.scales&&(i=i.concat((e.scales.xAxes||[]).map((function(t){return{options:t,dtype:"category",dposition:"bottom"}})),(e.scales.yAxes||[]).map((function(t){return{options:t,dtype:"linear",dposition:"left"}})))),e.scale&&i.push({options:e.scale,dtype:"radialLinear",isDefault:!0,dposition:"chartArea"}),H.each(i,(function(e){var i=e.options,r=i.id,o=Ge(i.type,e.dtype);Je(i.position)!==Je(e.dposition)&&(i.position=e.dposition),a[r]=!0;var s=null;if(r in n&&n[r].type===o)(s=n[r]).options=i,s.ctx=t.ctx,s.chart=t;else{var l=Re.getScaleConstructor(o);if(!l)return;s=new l({id:r,type:o,options:i,ctx:t.ctx,chart:t}),n[s.id]=s}s.mergeTicksOptions(),e.isDefault&&(t.scale=s)})),H.each(a,(function(t,e){t||delete n[e]})),t.scales=n,Re.addScalesToLayout(this)},buildOrUpdateControllers:function(){var t,e,n=this,i=[],a=n.data.datasets;for(t=0,e=a.length;t<e;t++){var r=a[t],o=n.getDatasetMeta(t),s=r.type||n.config.type;if(o.type&&o.type!==s&&(n.destroyDatasetMeta(t),o=n.getDatasetMeta(t)),o.type=s,o.order=r.order||0,o.index=t,o.controller)o.controller.updateIndex(t),o.controller.linkScales();else{var l=Jt[o.type];if(void 0===l)throw new Error('"'+o.type+'" is not a chart type.');o.controller=new l(n,t),i.push(o.controller)}}return i},resetElements:function(){var t=this;H.each(t.data.datasets,(function(e,n){t.getDatasetMeta(n).controller.reset()}),t)},reset:function(){this.resetElements(),this.tooltip.initialize()},update:function(t){var e,n,i=this;if(t&&"object"==typeof t||(t={duration:t,lazy:arguments[1]}),Ze(i),Le._invalidate(i),!1!==Le.notify(i,"beforeUpdate")){i.tooltip._data=i.data;var a=i.buildOrUpdateControllers();for(e=0,n=i.data.datasets.length;e<n;e++)i.getDatasetMeta(e).controller.buildOrUpdateElements();i.updateLayout(),i.options.animation&&i.options.animation.duration&&H.each(a,(function(t){t.reset()})),i.updateDatasets(),i.tooltip.initialize(),i.lastActive=[],Le.notify(i,"afterUpdate"),i._layers.sort(Qe("z","_idx")),i._bufferedRender?i._bufferedRequest={duration:t.duration,easing:t.easing,lazy:t.lazy}:i.render(t)}},updateLayout:function(){var t=this;!1!==Le.notify(t,"beforeLayout")&&(pe.update(this,this.width,this.height),t._layers=[],H.each(t.boxes,(function(e){e._configure&&e._configure(),t._layers.push.apply(t._layers,e._layers())}),t),t._layers.forEach((function(t,e){t._idx=e})),Le.notify(t,"afterScaleUpdate"),Le.notify(t,"afterLayout"))},updateDatasets:function(){if(!1!==Le.notify(this,"beforeDatasetsUpdate")){for(var t=0,e=this.data.datasets.length;t<e;++t)this.updateDataset(t);Le.notify(this,"afterDatasetsUpdate")}},updateDataset:function(t){var e=this.getDatasetMeta(t),n={meta:e,index:t};!1!==Le.notify(this,"beforeDatasetUpdate",[n])&&(e.controller._update(),Le.notify(this,"afterDatasetUpdate",[n]))},render:function(t){var e=this;t&&"object"==typeof t||(t={duration:t,lazy:arguments[1]});var n=e.options.animation,i=Ge(t.duration,n&&n.duration),a=t.lazy;if(!1!==Le.notify(e,"beforeRender")){var r=function(t){Le.notify(e,"afterRender"),H.callback(n&&n.onComplete,[t],e)};if(n&&i){var o=new $({numSteps:i/16.66,easing:t.easing||n.easing,render:function(t,e){var n=H.easing.effects[e.easing],i=e.currentStep,a=i/e.numSteps;t.draw(n(a),a,i)},onAnimationProgress:n.onProgress,onAnimationComplete:r});J.addAnimation(e,o,i,a)}else e.draw(),r(new $({numSteps:0,chart:e}));return e}},draw:function(t){var e,n,i=this;if(i.clear(),H.isNullOrUndef(t)&&(t=1),i.transition(t),!(i.width<=0||i.height<=0)&&!1!==Le.notify(i,"beforeDraw",[t])){for(n=i._layers,e=0;e<n.length&&n[e].z<=0;++e)n[e].draw(i.chartArea);for(i.drawDatasets(t);e<n.length;++e)n[e].draw(i.chartArea);i._drawTooltip(t),Le.notify(i,"afterDraw",[t])}},transition:function(t){for(var e=0,n=(this.data.datasets||[]).length;e<n;++e)this.isDatasetVisible(e)&&this.getDatasetMeta(e).controller.transition(t);this.tooltip.transition(t)},_getSortedDatasetMetas:function(t){var e,n,i=[];for(e=0,n=(this.data.datasets||[]).length;e<n;++e)t&&!this.isDatasetVisible(e)||i.push(this.getDatasetMeta(e));return i.sort(Qe("order","index")),i},_getSortedVisibleDatasetMetas:function(){return this._getSortedDatasetMetas(!0)},drawDatasets:function(t){var e,n;if(!1!==Le.notify(this,"beforeDatasetsDraw",[t])){for(n=(e=this._getSortedVisibleDatasetMetas()).length-1;n>=0;--n)this.drawDataset(e[n],t);Le.notify(this,"afterDatasetsDraw",[t])}},drawDataset:function(t,e){var n={meta:t,index:t.index,easingValue:e};!1!==Le.notify(this,"beforeDatasetDraw",[n])&&(t.controller.draw(e),Le.notify(this,"afterDatasetDraw",[n]))},_drawTooltip:function(t){var e=this.tooltip,n={tooltip:e,easingValue:t};!1!==Le.notify(this,"beforeTooltipDraw",[n])&&(e.draw(),Le.notify(this,"afterTooltipDraw",[n]))},getElementAtEvent:function(t){return re.modes.single(this,t)},getElementsAtEvent:function(t){return re.modes.label(this,t,{intersect:!0})},getElementsAtXAxis:function(t){return re.modes["x-axis"](this,t,{intersect:!0})},getElementsAtEventForMode:function(t,e,n){var i=re.modes[e];return"function"==typeof i?i(this,t,n):[]},getDatasetAtEvent:function(t){return re.modes.dataset(this,t,{intersect:!0})},getDatasetMeta:function(t){var e=this.data.datasets[t];e._meta||(e._meta={});var n=e._meta[this.id];return n||(n=e._meta[this.id]={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:e.order||0,index:t}),n},getVisibleDatasetCount:function(){for(var t=0,e=0,n=this.data.datasets.length;e<n;++e)this.isDatasetVisible(e)&&t++;return t},isDatasetVisible:function(t){var e=this.getDatasetMeta(t);return"boolean"==typeof e.hidden?!e.hidden:!this.data.datasets[t].hidden},generateLegend:function(){return this.options.legendCallback(this)},destroyDatasetMeta:function(t){var e=this.id,n=this.data.datasets[t],i=n._meta&&n._meta[e];i&&(i.controller.destroy(),delete n._meta[e])},destroy:function(){var t,e,n=this,i=n.canvas;for(n.stop(),t=0,e=n.data.datasets.length;t<e;++t)n.destroyDatasetMeta(t);i&&(n.unbindEvents(),H.canvas.clear(n),Oe.releaseContext(n.ctx),n.canvas=null,n.ctx=null),Le.notify(n,"destroy"),delete tn.instances[n.id]},toBase64Image:function(){return this.canvas.toDataURL.apply(this.canvas,arguments)},initToolTip:function(){var t=this;t.tooltip=new Ye({_chart:t,_chartInstance:t,_data:t.data,_options:t.options.tooltips},t)},bindEvents:function(){var t=this,e=t._listeners={},n=function(){t.eventHandler.apply(t,arguments)};H.each(t.options.events,(function(i){Oe.addEventListener(t,i,n),e[i]=n})),t.options.responsive&&(n=function(){t.resize()},Oe.addEventListener(t,"resize",n),e.resize=n)},unbindEvents:function(){var t=this,e=t._listeners;e&&(delete t._listeners,H.each(e,(function(e,n){Oe.removeEventListener(t,n,e)})))},updateHoverStyle:function(t,e,n){var i,a,r,o=n?"set":"remove";for(a=0,r=t.length;a<r;++a)(i=t[a])&&this.getDatasetMeta(i._datasetIndex).controller[o+"HoverStyle"](i);"dataset"===e&&this.getDatasetMeta(t[0]._datasetIndex).controller["_"+o+"DatasetHoverStyle"]()},eventHandler:function(t){var e=this,n=e.tooltip;if(!1!==Le.notify(e,"beforeEvent",[t])){e._bufferedRender=!0,e._bufferedRequest=null;var i=e.handleEvent(t);n&&(i=n._start?n.handleEvent(t):i|n.handleEvent(t)),Le.notify(e,"afterEvent",[t]);var a=e._bufferedRequest;return a?e.render(a):i&&!e.animating&&(e.stop(),e.render({duration:e.options.hover.animationDuration,lazy:!0})),e._bufferedRender=!1,e._bufferedRequest=null,e}},handleEvent:function(t){var e,n=this,i=n.options||{},a=i.hover;return n.lastActive=n.lastActive||[],"mouseout"===t.type?n.active=[]:n.active=n.getElementsAtEventForMode(t,a.mode,a),H.callback(i.onHover||i.hover.onHover,[t.native,n.active],n),"mouseup"!==t.type&&"click"!==t.type||i.onClick&&i.onClick.call(n,t.native,n.active),n.lastActive.length&&n.updateHoverStyle(n.lastActive,a.mode,!1),n.active.length&&a.mode&&n.updateHoverStyle(n.active,a.mode,!0),e=!H.arrayEquals(n.active,n.lastActive),n.lastActive=n.active,e}}),tn.instances={};var en=tn;tn.Controller=tn,tn.types={},H.configMerge=Ke,H.scaleMerge=Xe;function nn(){throw new Error("This method is not implemented: either no adapter can be found or an incomplete integration was provided.")}function an(t){this.options=t||{}}H.extend(an.prototype,{formats:nn,parse:nn,format:nn,add:nn,diff:nn,startOf:nn,endOf:nn,_create:function(t){return t}}),an.override=function(t){H.extend(an.prototype,t)};var rn={_date:an},on={formatters:{values:function(t){return H.isArray(t)?t:""+t},linear:function(t,e,n){var i=n.length>3?n[2]-n[1]:n[1]-n[0];Math.abs(i)>1&&t!==Math.floor(t)&&(i=t-Math.floor(t));var a=H.log10(Math.abs(i)),r="";if(0!==t)if(Math.max(Math.abs(n[0]),Math.abs(n[n.length-1]))<1e-4){var o=H.log10(Math.abs(t)),s=Math.floor(o)-Math.floor(a);s=Math.max(Math.min(s,20),0),r=t.toExponential(s)}else{var l=-1*Math.floor(a);l=Math.max(Math.min(l,20),0),r=t.toFixed(l)}else r="0";return r},logarithmic:function(t,e,n){var i=t/Math.pow(10,Math.floor(H.log10(t)));return 0===t?"0":1===i||2===i||5===i||0===e||e===n.length-1?t.toExponential():""}}},sn=H.isArray,ln=H.isNullOrUndef,un=H.valueOrDefault,dn=H.valueAtIndexOrDefault;function hn(t,e,n){var i,a=t.getTicks().length,r=Math.min(e,a-1),o=t.getPixelForTick(r),s=t._startPixel,l=t._endPixel;if(!(n&&(i=1===a?Math.max(o-s,l-o):0===e?(t.getPixelForTick(1)-o)/2:(o-t.getPixelForTick(r-1))/2,(o+=r<e?i:-i)<s-1e-6||o>l+1e-6)))return o}function cn(t,e,n,i){var a,r,o,s,l,u,d,h,c,f,g,p,m,v=n.length,b=[],x=[],y=[],_=0,k=0;for(a=0;a<v;++a){if(s=n[a].label,l=n[a].major?e.major:e.minor,t.font=u=l.string,d=i[u]=i[u]||{data:{},gc:[]},h=l.lineHeight,c=f=0,ln(s)||sn(s)){if(sn(s))for(r=0,o=s.length;r<o;++r)g=s[r],ln(g)||sn(g)||(c=H.measureText(t,d.data,d.gc,c,g),f+=h)}else c=H.measureText(t,d.data,d.gc,c,s),f=h;b.push(c),x.push(f),y.push(h/2),_=Math.max(c,_),k=Math.max(f,k)}function w(t){return{width:b[t]||0,height:x[t]||0,offset:y[t]||0}}return function(t,e){H.each(t,(function(t){var n,i=t.gc,a=i.length/2;if(a>e){for(n=0;n<a;++n)delete t.data[i[n]];i.splice(0,a)}}))}(i,v),p=b.indexOf(_),m=x.indexOf(k),{first:w(0),last:w(v-1),widest:w(p),highest:w(m)}}function fn(t){return t.drawTicks?t.tickMarkLength:0}function gn(t){var e,n;return t.display?(e=H.options._parseFont(t),n=H.options.toPadding(t.padding),e.lineHeight+n.height):0}function pn(t,e){return H.extend(H.options._parseFont({fontFamily:un(e.fontFamily,t.fontFamily),fontSize:un(e.fontSize,t.fontSize),fontStyle:un(e.fontStyle,t.fontStyle),lineHeight:un(e.lineHeight,t.lineHeight)}),{color:H.options.resolve([e.fontColor,t.fontColor,N.global.defaultFontColor])})}function mn(t){var e=pn(t,t.minor);return{minor:e,major:t.major.enabled?pn(t,t.major):e}}function vn(t){var e,n,i,a=[];for(n=0,i=t.length;n<i;++n)void 0!==(e=t[n])._index&&a.push(e);return a}function bn(t,e,n,i){var a,r,o,s,l=un(n,0),u=Math.min(un(i,t.length),t.length),d=0;for(e=Math.ceil(e),i&&(e=(a=i-n)/Math.floor(a/e)),s=l;s<0;)d++,s=Math.round(l+d*e);for(r=Math.max(l,0);r<u;r++)o=t[r],r===s?(o._index=r,d++,s=Math.round(l+d*e)):delete o.label}N._set("scale",{display:!0,position:"left",offset:!1,gridLines:{display:!0,color:"rgba(0,0,0,0.1)",lineWidth:1,drawBorder:!0,drawOnChartArea:!0,drawTicks:!0,tickMarkLength:10,zeroLineWidth:1,zeroLineColor:"rgba(0,0,0,0.25)",zeroLineBorderDash:[],zeroLineBorderDashOffset:0,offsetGridLines:!1,borderDash:[],borderDashOffset:0},scaleLabel:{display:!1,labelString:"",padding:{top:4,bottom:4}},ticks:{beginAtZero:!1,minRotation:0,maxRotation:50,mirror:!1,padding:0,reverse:!1,display:!0,autoSkip:!0,autoSkipPadding:0,labelOffset:0,callback:on.formatters.values,minor:{},major:{}}});var xn=K.extend({zeroLineIndex:0,getPadding:function(){return{left:this.paddingLeft||0,top:this.paddingTop||0,right:this.paddingRight||0,bottom:this.paddingBottom||0}},getTicks:function(){return this._ticks},_getLabels:function(){var t=this.chart.data;return this.options.labels||(this.isHorizontal()?t.xLabels:t.yLabels)||t.labels||[]},mergeTicksOptions:function(){},beforeUpdate:function(){H.callback(this.options.beforeUpdate,[this])},update:function(t,e,n){var i,a,r,o,s,l=this,u=l.options.ticks,d=u.sampleSize;if(l.beforeUpdate(),l.maxWidth=t,l.maxHeight=e,l.margins=H.extend({left:0,right:0,top:0,bottom:0},n),l._ticks=null,l.ticks=null,l._labelSizes=null,l._maxLabelLines=0,l.longestLabelWidth=0,l.longestTextCache=l.longestTextCache||{},l._gridLineItems=null,l._labelItems=null,l.beforeSetDimensions(),l.setDimensions(),l.afterSetDimensions(),l.beforeDataLimits(),l.determineDataLimits(),l.afterDataLimits(),l.beforeBuildTicks(),o=l.buildTicks()||[],(!(o=l.afterBuildTicks(o)||o)||!o.length)&&l.ticks)for(o=[],i=0,a=l.ticks.length;i<a;++i)o.push({value:l.ticks[i],major:!1});return l._ticks=o,s=d<o.length,r=l._convertTicksToLabels(s?function(t,e){for(var n=[],i=t.length/e,a=0,r=t.length;a<r;a+=i)n.push(t[Math.floor(a)]);return n}(o,d):o),l._configure(),l.beforeCalculateTickRotation(),l.calculateTickRotation(),l.afterCalculateTickRotation(),l.beforeFit(),l.fit(),l.afterFit(),l._ticksToDraw=u.display&&(u.autoSkip||"auto"===u.source)?l._autoSkip(o):o,s&&(r=l._convertTicksToLabels(l._ticksToDraw)),l.ticks=r,l.afterUpdate(),l.minSize},_configure:function(){var t,e,n=this,i=n.options.ticks.reverse;n.isHorizontal()?(t=n.left,e=n.right):(t=n.top,e=n.bottom,i=!i),n._startPixel=t,n._endPixel=e,n._reversePixels=i,n._length=e-t},afterUpdate:function(){H.callback(this.options.afterUpdate,[this])},beforeSetDimensions:function(){H.callback(this.options.beforeSetDimensions,[this])},setDimensions:function(){var t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0},afterSetDimensions:function(){H.callback(this.options.afterSetDimensions,[this])},beforeDataLimits:function(){H.callback(this.options.beforeDataLimits,[this])},determineDataLimits:H.noop,afterDataLimits:function(){H.callback(this.options.afterDataLimits,[this])},beforeBuildTicks:function(){H.callback(this.options.beforeBuildTicks,[this])},buildTicks:H.noop,afterBuildTicks:function(t){var e=this;return sn(t)&&t.length?H.callback(e.options.afterBuildTicks,[e,t]):(e.ticks=H.callback(e.options.afterBuildTicks,[e,e.ticks])||e.ticks,t)},beforeTickToLabelConversion:function(){H.callback(this.options.beforeTickToLabelConversion,[this])},convertTicksToLabels:function(){var t=this.options.ticks;this.ticks=this.ticks.map(t.userCallback||t.callback,this)},afterTickToLabelConversion:function(){H.callback(this.options.afterTickToLabelConversion,[this])},beforeCalculateTickRotation:function(){H.callback(this.options.beforeCalculateTickRotation,[this])},calculateTickRotation:function(){var t,e,n,i,a,r,o,s=this,l=s.options,u=l.ticks,d=s.getTicks().length,h=u.minRotation||0,c=u.maxRotation,f=h;!s._isVisible()||!u.display||h>=c||d<=1||!s.isHorizontal()?s.labelRotation=h:(e=(t=s._getLabelSizes()).widest.width,n=t.highest.height-t.highest.offset,i=Math.min(s.maxWidth,s.chart.width-e),e+6>(a=l.offset?s.maxWidth/d:i/(d-1))&&(a=i/(d-(l.offset?.5:1)),r=s.maxHeight-fn(l.gridLines)-u.padding-gn(l.scaleLabel),o=Math.sqrt(e*e+n*n),f=H.toDegrees(Math.min(Math.asin(Math.min((t.highest.height+6)/a,1)),Math.asin(Math.min(r/o,1))-Math.asin(n/o))),f=Math.max(h,Math.min(c,f))),s.labelRotation=f)},afterCalculateTickRotation:function(){H.callback(this.options.afterCalculateTickRotation,[this])},beforeFit:function(){H.callback(this.options.beforeFit,[this])},fit:function(){var t=this,e=t.minSize={width:0,height:0},n=t.chart,i=t.options,a=i.ticks,r=i.scaleLabel,o=i.gridLines,s=t._isVisible(),l="bottom"===i.position,u=t.isHorizontal();if(u?e.width=t.maxWidth:s&&(e.width=fn(o)+gn(r)),u?s&&(e.height=fn(o)+gn(r)):e.height=t.maxHeight,a.display&&s){var d=mn(a),h=t._getLabelSizes(),c=h.first,f=h.last,g=h.widest,p=h.highest,m=.4*d.minor.lineHeight,v=a.padding;if(u){var b=0!==t.labelRotation,x=H.toRadians(t.labelRotation),y=Math.cos(x),_=Math.sin(x),k=_*g.width+y*(p.height-(b?p.offset:0))+(b?0:m);e.height=Math.min(t.maxHeight,e.height+k+v);var w,M,S=t.getPixelForTick(0)-t.left,C=t.right-t.getPixelForTick(t.getTicks().length-1);b?(w=l?y*c.width+_*c.offset:_*(c.height-c.offset),M=l?_*(f.height-f.offset):y*f.width+_*f.offset):(w=c.width/2,M=f.width/2),t.paddingLeft=Math.max((w-S)*t.width/(t.width-S),0)+3,t.paddingRight=Math.max((M-C)*t.width/(t.width-C),0)+3}else{var P=a.mirror?0:g.width+v+m;e.width=Math.min(t.maxWidth,e.width+P),t.paddingTop=c.height/2,t.paddingBottom=f.height/2}}t.handleMargins(),u?(t.width=t._length=n.width-t.margins.left-t.margins.right,t.height=e.height):(t.width=e.width,t.height=t._length=n.height-t.margins.top-t.margins.bottom)},handleMargins:function(){var t=this;t.margins&&(t.margins.left=Math.max(t.paddingLeft,t.margins.left),t.margins.top=Math.max(t.paddingTop,t.margins.top),t.margins.right=Math.max(t.paddingRight,t.margins.right),t.margins.bottom=Math.max(t.paddingBottom,t.margins.bottom))},afterFit:function(){H.callback(this.options.afterFit,[this])},isHorizontal:function(){var t=this.options.position;return"top"===t||"bottom"===t},isFullWidth:function(){return this.options.fullWidth},getRightValue:function(t){if(ln(t))return NaN;if(("number"==typeof t||t instanceof Number)&&!isFinite(t))return NaN;if(t)if(this.isHorizontal()){if(void 0!==t.x)return this.getRightValue(t.x)}else if(void 0!==t.y)return this.getRightValue(t.y);return t},_convertTicksToLabels:function(t){var e,n,i,a=this;for(a.ticks=t.map((function(t){return t.value})),a.beforeTickToLabelConversion(),e=a.convertTicksToLabels(t)||a.ticks,a.afterTickToLabelConversion(),n=0,i=t.length;n<i;++n)t[n].label=e[n];return e},_getLabelSizes:function(){var t=this,e=t._labelSizes;return e||(t._labelSizes=e=cn(t.ctx,mn(t.options.ticks),t.getTicks(),t.longestTextCache),t.longestLabelWidth=e.widest.width),e},_parseValue:function(t){var e,n,i,a;return sn(t)?(e=+this.getRightValue(t[0]),n=+this.getRightValue(t[1]),i=Math.min(e,n),a=Math.max(e,n)):(e=void 0,n=t=+this.getRightValue(t),i=t,a=t),{min:i,max:a,start:e,end:n}},_getScaleLabel:function(t){var e=this._parseValue(t);return void 0!==e.start?"["+e.start+", "+e.end+"]":+this.getRightValue(t)},getLabelForIndex:H.noop,getPixelForValue:H.noop,getValueForPixel:H.noop,getPixelForTick:function(t){var e=this.options.offset,n=this._ticks.length,i=1/Math.max(n-(e?0:1),1);return t<0||t>n-1?null:this.getPixelForDecimal(t*i+(e?i/2:0))},getPixelForDecimal:function(t){return this._reversePixels&&(t=1-t),this._startPixel+t*this._length},getDecimalForPixel:function(t){var e=(t-this._startPixel)/this._length;return this._reversePixels?1-e:e},getBasePixel:function(){return this.getPixelForValue(this.getBaseValue())},getBaseValue:function(){var t=this.min,e=this.max;return this.beginAtZero?0:t<0&&e<0?e:t>0&&e>0?t:0},_autoSkip:function(t){var e,n,i,a,r=this.options.ticks,o=this._length,s=r.maxTicksLimit||o/this._tickSize()+1,l=r.major.enabled?function(t){var e,n,i=[];for(e=0,n=t.length;e<n;e++)t[e].major&&i.push(e);return i}(t):[],u=l.length,d=l[0],h=l[u-1];if(u>s)return function(t,e,n){var i,a,r=0,o=e[0];for(n=Math.ceil(n),i=0;i<t.length;i++)a=t[i],i===o?(a._index=i,o=e[++r*n]):delete a.label}(t,l,u/s),vn(t);if(i=function(t,e,n,i){var a,r,o,s,l=function(t){var e,n,i=t.length;if(i<2)return!1;for(n=t[0],e=1;e<i;++e)if(t[e]-t[e-1]!==n)return!1;return n}(t),u=(e.length-1)/i;if(!l)return Math.max(u,1);for(o=0,s=(a=H.math._factorize(l)).length-1;o<s;o++)if((r=a[o])>u)return r;return Math.max(u,1)}(l,t,0,s),u>0){for(e=0,n=u-1;e<n;e++)bn(t,i,l[e],l[e+1]);return a=u>1?(h-d)/(u-1):null,bn(t,i,H.isNullOrUndef(a)?0:d-a,d),bn(t,i,h,H.isNullOrUndef(a)?t.length:h+a),vn(t)}return bn(t,i),vn(t)},_tickSize:function(){var t=this.options.ticks,e=H.toRadians(this.labelRotation),n=Math.abs(Math.cos(e)),i=Math.abs(Math.sin(e)),a=this._getLabelSizes(),r=t.autoSkipPadding||0,o=a?a.widest.width+r:0,s=a?a.highest.height+r:0;return this.isHorizontal()?s*n>o*i?o/n:s/i:s*i<o*n?s/n:o/i},_isVisible:function(){var t,e,n,i=this.chart,a=this.options.display;if("auto"!==a)return!!a;for(t=0,e=i.data.datasets.length;t<e;++t)if(i.isDatasetVisible(t)&&((n=i.getDatasetMeta(t)).xAxisID===this.id||n.yAxisID===this.id))return!0;return!1},_computeGridLineItems:function(t){var e,n,i,a,r,o,s,l,u,d,h,c,f,g,p,m,v,b=this,x=b.chart,y=b.options,_=y.gridLines,k=y.position,w=_.offsetGridLines,M=b.isHorizontal(),S=b._ticksToDraw,C=S.length+(w?1:0),P=fn(_),A=[],D=_.drawBorder?dn(_.lineWidth,0,0):0,T=D/2,I=H._alignPixel,F=function(t){return I(x,t,D)};for("top"===k?(e=F(b.bottom),s=b.bottom-P,u=e-T,h=F(t.top)+T,f=t.bottom):"bottom"===k?(e=F(b.top),h=t.top,f=F(t.bottom)-T,s=e+T,u=b.top+P):"left"===k?(e=F(b.right),o=b.right-P,l=e-T,d=F(t.left)+T,c=t.right):(e=F(b.left),d=t.left,c=F(t.right)-T,o=e+T,l=b.left+P),n=0;n<C;++n)i=S[n]||{},ln(i.label)&&n<S.length||(n===b.zeroLineIndex&&y.offset===w?(g=_.zeroLineWidth,p=_.zeroLineColor,m=_.zeroLineBorderDash||[],v=_.zeroLineBorderDashOffset||0):(g=dn(_.lineWidth,n,1),p=dn(_.color,n,"rgba(0,0,0,0.1)"),m=_.borderDash||[],v=_.borderDashOffset||0),void 0!==(a=hn(b,i._index||n,w))&&(r=I(x,a,g),M?o=l=d=c=r:s=u=h=f=r,A.push({tx1:o,ty1:s,tx2:l,ty2:u,x1:d,y1:h,x2:c,y2:f,width:g,color:p,borderDash:m,borderDashOffset:v})));return A.ticksLength=C,A.borderValue=e,A},_computeLabelItems:function(){var t,e,n,i,a,r,o,s,l,u,d,h,c=this,f=c.options,g=f.ticks,p=f.position,m=g.mirror,v=c.isHorizontal(),b=c._ticksToDraw,x=mn(g),y=g.padding,_=fn(f.gridLines),k=-H.toRadians(c.labelRotation),w=[];for("top"===p?(r=c.bottom-_-y,o=k?"left":"center"):"bottom"===p?(r=c.top+_+y,o=k?"right":"center"):"left"===p?(a=c.right-(m?0:_)-y,o=m?"left":"right"):(a=c.left+(m?0:_)+y,o=m?"right":"left"),t=0,e=b.length;t<e;++t)i=(n=b[t]).label,ln(i)||(s=c.getPixelForTick(n._index||t)+g.labelOffset,u=(l=n.major?x.major:x.minor).lineHeight,d=sn(i)?i.length:1,v?(a=s,h="top"===p?((k?1:.5)-d)*u:(k?0:.5)*u):(r=s,h=(1-d)*u/2),w.push({x:a,y:r,rotation:k,label:i,font:l,textOffset:h,textAlign:o}));return w},_drawGrid:function(t){var e=this,n=e.options.gridLines;if(n.display){var i,a,r,o,s,l=e.ctx,u=e.chart,d=H._alignPixel,h=n.drawBorder?dn(n.lineWidth,0,0):0,c=e._gridLineItems||(e._gridLineItems=e._computeGridLineItems(t));for(r=0,o=c.length;r<o;++r)i=(s=c[r]).width,a=s.color,i&&a&&(l.save(),l.lineWidth=i,l.strokeStyle=a,l.setLineDash&&(l.setLineDash(s.borderDash),l.lineDashOffset=s.borderDashOffset),l.beginPath(),n.drawTicks&&(l.moveTo(s.tx1,s.ty1),l.lineTo(s.tx2,s.ty2)),n.drawOnChartArea&&(l.moveTo(s.x1,s.y1),l.lineTo(s.x2,s.y2)),l.stroke(),l.restore());if(h){var f,g,p,m,v=h,b=dn(n.lineWidth,c.ticksLength-1,1),x=c.borderValue;e.isHorizontal()?(f=d(u,e.left,v)-v/2,g=d(u,e.right,b)+b/2,p=m=x):(p=d(u,e.top,v)-v/2,m=d(u,e.bottom,b)+b/2,f=g=x),l.lineWidth=h,l.strokeStyle=dn(n.color,0),l.beginPath(),l.moveTo(f,p),l.lineTo(g,m),l.stroke()}}},_drawLabels:function(){var t=this;if(t.options.ticks.display){var e,n,i,a,r,o,s,l,u=t.ctx,d=t._labelItems||(t._labelItems=t._computeLabelItems());for(e=0,i=d.length;e<i;++e){if(o=(r=d[e]).font,u.save(),u.translate(r.x,r.y),u.rotate(r.rotation),u.font=o.string,u.fillStyle=o.color,u.textBaseline="middle",u.textAlign=r.textAlign,s=r.label,l=r.textOffset,sn(s))for(n=0,a=s.length;n<a;++n)u.fillText(""+s[n],0,l),l+=o.lineHeight;else u.fillText(s,0,l);u.restore()}}},_drawTitle:function(){var t=this,e=t.ctx,n=t.options,i=n.scaleLabel;if(i.display){var a,r,o=un(i.fontColor,N.global.defaultFontColor),s=H.options._parseFont(i),l=H.options.toPadding(i.padding),u=s.lineHeight/2,d=n.position,h=0;if(t.isHorizontal())a=t.left+t.width/2,r="bottom"===d?t.bottom-u-l.bottom:t.top+u+l.top;else{var c="left"===d;a=c?t.left+u+l.top:t.right-u-l.top,r=t.top+t.height/2,h=c?-.5*Math.PI:.5*Math.PI}e.save(),e.translate(a,r),e.rotate(h),e.textAlign="center",e.textBaseline="middle",e.fillStyle=o,e.font=s.string,e.fillText(i.labelString,0,0),e.restore()}},draw:function(t){this._isVisible()&&(this._drawGrid(t),this._drawTitle(),this._drawLabels())},_layers:function(){var t=this,e=t.options,n=e.ticks&&e.ticks.z||0,i=e.gridLines&&e.gridLines.z||0;return t._isVisible()&&n!==i&&t.draw===t._draw?[{z:i,draw:function(){t._drawGrid.apply(t,arguments),t._drawTitle.apply(t,arguments)}},{z:n,draw:function(){t._drawLabels.apply(t,arguments)}}]:[{z:n,draw:function(){t.draw.apply(t,arguments)}}]},_getMatchingVisibleMetas:function(t){var e=this,n=e.isHorizontal();return e.chart._getSortedVisibleDatasetMetas().filter((function(i){return(!t||i.type===t)&&(n?i.xAxisID===e.id:i.yAxisID===e.id)}))}});xn.prototype._draw=xn.prototype.draw;var yn=xn,_n=H.isNullOrUndef,kn=yn.extend({determineDataLimits:function(){var t,e=this,n=e._getLabels(),i=e.options.ticks,a=i.min,r=i.max,o=0,s=n.length-1;void 0!==a&&(t=n.indexOf(a))>=0&&(o=t),void 0!==r&&(t=n.indexOf(r))>=0&&(s=t),e.minIndex=o,e.maxIndex=s,e.min=n[o],e.max=n[s]},buildTicks:function(){var t=this._getLabels(),e=this.minIndex,n=this.maxIndex;this.ticks=0===e&&n===t.length-1?t:t.slice(e,n+1)},getLabelForIndex:function(t,e){var n=this.chart;return n.getDatasetMeta(e).controller._getValueScaleId()===this.id?this.getRightValue(n.data.datasets[e].data[t]):this._getLabels()[t]},_configure:function(){var t=this,e=t.options.offset,n=t.ticks;yn.prototype._configure.call(t),t.isHorizontal()||(t._reversePixels=!t._reversePixels),n&&(t._startValue=t.minIndex-(e?.5:0),t._valueRange=Math.max(n.length-(e?0:1),1))},getPixelForValue:function(t,e,n){var i,a,r,o=this;return _n(e)||_n(n)||(t=o.chart.data.datasets[n].data[e]),_n(t)||(i=o.isHorizontal()?t.x:t.y),(void 0!==i||void 0!==t&&isNaN(e))&&(a=o._getLabels(),t=H.valueOrDefault(i,t),e=-1!==(r=a.indexOf(t))?r:e,isNaN(e)&&(e=t)),o.getPixelForDecimal((e-o._startValue)/o._valueRange)},getPixelForTick:function(t){var e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t],t+this.minIndex)},getValueForPixel:function(t){var e=Math.round(this._startValue+this.getDecimalForPixel(t)*this._valueRange);return Math.min(Math.max(e,0),this.ticks.length-1)},getBasePixel:function(){return this.bottom}}),wn={position:"bottom"};kn._defaults=wn;var Mn=H.noop,Sn=H.isNullOrUndef;var Cn=yn.extend({getRightValue:function(t){return"string"==typeof t?+t:yn.prototype.getRightValue.call(this,t)},handleTickRangeOptions:function(){var t=this,e=t.options.ticks;if(e.beginAtZero){var n=H.sign(t.min),i=H.sign(t.max);n<0&&i<0?t.max=0:n>0&&i>0&&(t.min=0)}var a=void 0!==e.min||void 0!==e.suggestedMin,r=void 0!==e.max||void 0!==e.suggestedMax;void 0!==e.min?t.min=e.min:void 0!==e.suggestedMin&&(null===t.min?t.min=e.suggestedMin:t.min=Math.min(t.min,e.suggestedMin)),void 0!==e.max?t.max=e.max:void 0!==e.suggestedMax&&(null===t.max?t.max=e.suggestedMax:t.max=Math.max(t.max,e.suggestedMax)),a!==r&&t.min>=t.max&&(a?t.max=t.min+1:t.min=t.max-1),t.min===t.max&&(t.max++,e.beginAtZero||t.min--)},getTickLimit:function(){var t,e=this.options.ticks,n=e.stepSize,i=e.maxTicksLimit;return n?t=Math.ceil(this.max/n)-Math.floor(this.min/n)+1:(t=this._computeTickLimit(),i=i||11),i&&(t=Math.min(i,t)),t},_computeTickLimit:function(){return Number.POSITIVE_INFINITY},handleDirectionalChanges:Mn,buildTicks:function(){var t=this,e=t.options.ticks,n=t.getTickLimit(),i={maxTicks:n=Math.max(2,n),min:e.min,max:e.max,precision:e.precision,stepSize:H.valueOrDefault(e.fixedStepSize,e.stepSize)},a=t.ticks=function(t,e){var n,i,a,r,o=[],s=t.stepSize,l=s||1,u=t.maxTicks-1,d=t.min,h=t.max,c=t.precision,f=e.min,g=e.max,p=H.niceNum((g-f)/u/l)*l;if(p<1e-14&&Sn(d)&&Sn(h))return[f,g];(r=Math.ceil(g/p)-Math.floor(f/p))>u&&(p=H.niceNum(r*p/u/l)*l),s||Sn(c)?n=Math.pow(10,H._decimalPlaces(p)):(n=Math.pow(10,c),p=Math.ceil(p*n)/n),i=Math.floor(f/p)*p,a=Math.ceil(g/p)*p,s&&(!Sn(d)&&H.almostWhole(d/p,p/1e3)&&(i=d),!Sn(h)&&H.almostWhole(h/p,p/1e3)&&(a=h)),r=(a-i)/p,r=H.almostEquals(r,Math.round(r),p/1e3)?Math.round(r):Math.ceil(r),i=Math.round(i*n)/n,a=Math.round(a*n)/n,o.push(Sn(d)?i:d);for(var m=1;m<r;++m)o.push(Math.round((i+m*p)*n)/n);return o.push(Sn(h)?a:h),o}(i,t);t.handleDirectionalChanges(),t.max=H.max(a),t.min=H.min(a),e.reverse?(a.reverse(),t.start=t.max,t.end=t.min):(t.start=t.min,t.end=t.max)},convertTicksToLabels:function(){var t=this;t.ticksAsNumbers=t.ticks.slice(),t.zeroLineIndex=t.ticks.indexOf(0),yn.prototype.convertTicksToLabels.call(t)},_configure:function(){var t,e=this,n=e.getTicks(),i=e.min,a=e.max;yn.prototype._configure.call(e),e.options.offset&&n.length&&(i-=t=(a-i)/Math.max(n.length-1,1)/2,a+=t),e._startValue=i,e._endValue=a,e._valueRange=a-i}}),Pn={position:"left",ticks:{callback:on.formatters.linear}};function An(t,e,n,i){var a,r,o=t.options,s=function(t,e,n){var i=[n.type,void 0===e&&void 0===n.stack?n.index:"",n.stack].join(".");return void 0===t[i]&&(t[i]={pos:[],neg:[]}),t[i]}(e,o.stacked,n),l=s.pos,u=s.neg,d=i.length;for(a=0;a<d;++a)r=t._parseValue(i[a]),isNaN(r.min)||isNaN(r.max)||n.data[a].hidden||(l[a]=l[a]||0,u[a]=u[a]||0,o.relativePoints?l[a]=100:r.min<0||r.max<0?u[a]+=r.min:l[a]+=r.max)}function Dn(t,e,n){var i,a,r=n.length;for(i=0;i<r;++i)a=t._parseValue(n[i]),isNaN(a.min)||isNaN(a.max)||e.data[i].hidden||(t.min=Math.min(t.min,a.min),t.max=Math.max(t.max,a.max))}var Tn=Cn.extend({determineDataLimits:function(){var t,e,n,i,a=this,r=a.options,o=a.chart.data.datasets,s=a._getMatchingVisibleMetas(),l=r.stacked,u={},d=s.length;if(a.min=Number.POSITIVE_INFINITY,a.max=Number.NEGATIVE_INFINITY,void 0===l)for(t=0;!l&&t<d;++t)l=void 0!==(e=s[t]).stack;for(t=0;t<d;++t)n=o[(e=s[t]).index].data,l?An(a,u,e,n):Dn(a,e,n);H.each(u,(function(t){i=t.pos.concat(t.neg),a.min=Math.min(a.min,H.min(i)),a.max=Math.max(a.max,H.max(i))})),a.min=H.isFinite(a.min)&&!isNaN(a.min)?a.min:0,a.max=H.isFinite(a.max)&&!isNaN(a.max)?a.max:1,a.handleTickRangeOptions()},_computeTickLimit:function(){var t;return this.isHorizontal()?Math.ceil(this.width/40):(t=H.options._parseFont(this.options.ticks),Math.ceil(this.height/t.lineHeight))},handleDirectionalChanges:function(){this.isHorizontal()||this.ticks.reverse()},getLabelForIndex:function(t,e){return this._getScaleLabel(this.chart.data.datasets[e].data[t])},getPixelForValue:function(t){return this.getPixelForDecimal((+this.getRightValue(t)-this._startValue)/this._valueRange)},getValueForPixel:function(t){return this._startValue+this.getDecimalForPixel(t)*this._valueRange},getPixelForTick:function(t){var e=this.ticksAsNumbers;return t<0||t>e.length-1?null:this.getPixelForValue(e[t])}}),In=Pn;Tn._defaults=In;var Fn=H.valueOrDefault,On=H.math.log10;var Ln={position:"left",ticks:{callback:on.formatters.logarithmic}};function Rn(t,e){return H.isFinite(t)&&t>=0?t:e}var zn=yn.extend({determineDataLimits:function(){var t,e,n,i,a,r,o=this,s=o.options,l=o.chart,u=l.data.datasets,d=o.isHorizontal();function h(t){return d?t.xAxisID===o.id:t.yAxisID===o.id}o.min=Number.POSITIVE_INFINITY,o.max=Number.NEGATIVE_INFINITY,o.minNotZero=Number.POSITIVE_INFINITY;var c=s.stacked;if(void 0===c)for(t=0;t<u.length;t++)if(e=l.getDatasetMeta(t),l.isDatasetVisible(t)&&h(e)&&void 0!==e.stack){c=!0;break}if(s.stacked||c){var f={};for(t=0;t<u.length;t++){var g=[(e=l.getDatasetMeta(t)).type,void 0===s.stacked&&void 0===e.stack?t:"",e.stack].join(".");if(l.isDatasetVisible(t)&&h(e))for(void 0===f[g]&&(f[g]=[]),a=0,r=(i=u[t].data).length;a<r;a++){var p=f[g];n=o._parseValue(i[a]),isNaN(n.min)||isNaN(n.max)||e.data[a].hidden||n.min<0||n.max<0||(p[a]=p[a]||0,p[a]+=n.max)}}H.each(f,(function(t){if(t.length>0){var e=H.min(t),n=H.max(t);o.min=Math.min(o.min,e),o.max=Math.max(o.max,n)}}))}else for(t=0;t<u.length;t++)if(e=l.getDatasetMeta(t),l.isDatasetVisible(t)&&h(e))for(a=0,r=(i=u[t].data).length;a<r;a++)n=o._parseValue(i[a]),isNaN(n.min)||isNaN(n.max)||e.data[a].hidden||n.min<0||n.max<0||(o.min=Math.min(n.min,o.min),o.max=Math.max(n.max,o.max),0!==n.min&&(o.minNotZero=Math.min(n.min,o.minNotZero)));o.min=H.isFinite(o.min)?o.min:null,o.max=H.isFinite(o.max)?o.max:null,o.minNotZero=H.isFinite(o.minNotZero)?o.minNotZero:null,this.handleTickRangeOptions()},handleTickRangeOptions:function(){var t=this,e=t.options.ticks;t.min=Rn(e.min,t.min),t.max=Rn(e.max,t.max),t.min===t.max&&(0!==t.min&&null!==t.min?(t.min=Math.pow(10,Math.floor(On(t.min))-1),t.max=Math.pow(10,Math.floor(On(t.max))+1)):(t.min=1,t.max=10)),null===t.min&&(t.min=Math.pow(10,Math.floor(On(t.max))-1)),null===t.max&&(t.max=0!==t.min?Math.pow(10,Math.floor(On(t.min))+1):10),null===t.minNotZero&&(t.min>0?t.minNotZero=t.min:t.max<1?t.minNotZero=Math.pow(10,Math.floor(On(t.max))):t.minNotZero=1)},buildTicks:function(){var t=this,e=t.options.ticks,n=!t.isHorizontal(),i={min:Rn(e.min),max:Rn(e.max)},a=t.ticks=function(t,e){var n,i,a=[],r=Fn(t.min,Math.pow(10,Math.floor(On(e.min)))),o=Math.floor(On(e.max)),s=Math.ceil(e.max/Math.pow(10,o));0===r?(n=Math.floor(On(e.minNotZero)),i=Math.floor(e.minNotZero/Math.pow(10,n)),a.push(r),r=i*Math.pow(10,n)):(n=Math.floor(On(r)),i=Math.floor(r/Math.pow(10,n)));var l=n<0?Math.pow(10,Math.abs(n)):1;do{a.push(r),10===++i&&(i=1,l=++n>=0?1:l),r=Math.round(i*Math.pow(10,n)*l)/l}while(n<o||n===o&&i<s);var u=Fn(t.max,r);return a.push(u),a}(i,t);t.max=H.max(a),t.min=H.min(a),e.reverse?(n=!n,t.start=t.max,t.end=t.min):(t.start=t.min,t.end=t.max),n&&a.reverse()},convertTicksToLabels:function(){this.tickValues=this.ticks.slice(),yn.prototype.convertTicksToLabels.call(this)},getLabelForIndex:function(t,e){return this._getScaleLabel(this.chart.data.datasets[e].data[t])},getPixelForTick:function(t){var e=this.tickValues;return t<0||t>e.length-1?null:this.getPixelForValue(e[t])},_getFirstTickValue:function(t){var e=Math.floor(On(t));return Math.floor(t/Math.pow(10,e))*Math.pow(10,e)},_configure:function(){var t=this,e=t.min,n=0;yn.prototype._configure.call(t),0===e&&(e=t._getFirstTickValue(t.minNotZero),n=Fn(t.options.ticks.fontSize,N.global.defaultFontSize)/t._length),t._startValue=On(e),t._valueOffset=n,t._valueRange=(On(t.max)-On(e))/(1-n)},getPixelForValue:function(t){var e=this,n=0;return(t=+e.getRightValue(t))>e.min&&t>0&&(n=(On(t)-e._startValue)/e._valueRange+e._valueOffset),e.getPixelForDecimal(n)},getValueForPixel:function(t){var e=this,n=e.getDecimalForPixel(t);return 0===n&&0===e.min?0:Math.pow(10,e._startValue+(n-e._valueOffset)*e._valueRange)}}),Nn=Ln;zn._defaults=Nn;var Bn=H.valueOrDefault,En=H.valueAtIndexOrDefault,Wn=H.options.resolve,Vn={display:!0,animate:!0,position:"chartArea",angleLines:{display:!0,color:"rgba(0,0,0,0.1)",lineWidth:1,borderDash:[],borderDashOffset:0},gridLines:{circular:!1},ticks:{showLabelBackdrop:!0,backdropColor:"rgba(255,255,255,0.75)",backdropPaddingY:2,backdropPaddingX:2,callback:on.formatters.linear},pointLabels:{display:!0,fontSize:10,callback:function(t){return t}}};function Hn(t){var e=t.ticks;return e.display&&t.display?Bn(e.fontSize,N.global.defaultFontSize)+2*e.backdropPaddingY:0}function jn(t,e,n,i,a){return t===i||t===a?{start:e-n/2,end:e+n/2}:t<i||t>a?{start:e-n,end:e}:{start:e,end:e+n}}function qn(t){return 0===t||180===t?"center":t<180?"left":"right"}function Un(t,e,n,i){var a,r,o=n.y+i/2;if(H.isArray(e))for(a=0,r=e.length;a<r;++a)t.fillText(e[a],n.x,o),o+=i;else t.fillText(e,n.x,o)}function Yn(t,e,n){90===t||270===t?n.y-=e.h/2:(t>270||t<90)&&(n.y-=e.h)}function Gn(t){return H.isNumber(t)?t:0}var Xn=Cn.extend({setDimensions:function(){var t=this;t.width=t.maxWidth,t.height=t.maxHeight,t.paddingTop=Hn(t.options)/2,t.xCenter=Math.floor(t.width/2),t.yCenter=Math.floor((t.height-t.paddingTop)/2),t.drawingArea=Math.min(t.height-t.paddingTop,t.width)/2},determineDataLimits:function(){var t=this,e=t.chart,n=Number.POSITIVE_INFINITY,i=Number.NEGATIVE_INFINITY;H.each(e.data.datasets,(function(a,r){if(e.isDatasetVisible(r)){var o=e.getDatasetMeta(r);H.each(a.data,(function(e,a){var r=+t.getRightValue(e);isNaN(r)||o.data[a].hidden||(n=Math.min(r,n),i=Math.max(r,i))}))}})),t.min=n===Number.POSITIVE_INFINITY?0:n,t.max=i===Number.NEGATIVE_INFINITY?0:i,t.handleTickRangeOptions()},_computeTickLimit:function(){return Math.ceil(this.drawingArea/Hn(this.options))},convertTicksToLabels:function(){var t=this;Cn.prototype.convertTicksToLabels.call(t),t.pointLabels=t.chart.data.labels.map((function(){var e=H.callback(t.options.pointLabels.callback,arguments,t);return e||0===e?e:""}))},getLabelForIndex:function(t,e){return+this.getRightValue(this.chart.data.datasets[e].data[t])},fit:function(){var t=this.options;t.display&&t.pointLabels.display?function(t){var e,n,i,a=H.options._parseFont(t.options.pointLabels),r={l:0,r:t.width,t:0,b:t.height-t.paddingTop},o={};t.ctx.font=a.string,t._pointLabelSizes=[];var s,l,u,d=t.chart.data.labels.length;for(e=0;e<d;e++){i=t.getPointPosition(e,t.drawingArea+5),s=t.ctx,l=a.lineHeight,u=t.pointLabels[e],n=H.isArray(u)?{w:H.longestText(s,s.font,u),h:u.length*l}:{w:s.measureText(u).width,h:l},t._pointLabelSizes[e]=n;var h=t.getIndexAngle(e),c=H.toDegrees(h)%360,f=jn(c,i.x,n.w,0,180),g=jn(c,i.y,n.h,90,270);f.start<r.l&&(r.l=f.start,o.l=h),f.end>r.r&&(r.r=f.end,o.r=h),g.start<r.t&&(r.t=g.start,o.t=h),g.end>r.b&&(r.b=g.end,o.b=h)}t.setReductions(t.drawingArea,r,o)}(this):this.setCenterPoint(0,0,0,0)},setReductions:function(t,e,n){var i=this,a=e.l/Math.sin(n.l),r=Math.max(e.r-i.width,0)/Math.sin(n.r),o=-e.t/Math.cos(n.t),s=-Math.max(e.b-(i.height-i.paddingTop),0)/Math.cos(n.b);a=Gn(a),r=Gn(r),o=Gn(o),s=Gn(s),i.drawingArea=Math.min(Math.floor(t-(a+r)/2),Math.floor(t-(o+s)/2)),i.setCenterPoint(a,r,o,s)},setCenterPoint:function(t,e,n,i){var a=this,r=a.width-e-a.drawingArea,o=t+a.drawingArea,s=n+a.drawingArea,l=a.height-a.paddingTop-i-a.drawingArea;a.xCenter=Math.floor((o+r)/2+a.left),a.yCenter=Math.floor((s+l)/2+a.top+a.paddingTop)},getIndexAngle:function(t){var e=this.chart,n=(t*(360/e.data.labels.length)+((e.options||{}).startAngle||0))%360;return(n<0?n+360:n)*Math.PI*2/360},getDistanceFromCenterForValue:function(t){var e=this;if(H.isNullOrUndef(t))return NaN;var n=e.drawingArea/(e.max-e.min);return e.options.ticks.reverse?(e.max-t)*n:(t-e.min)*n},getPointPosition:function(t,e){var n=this.getIndexAngle(t)-Math.PI/2;return{x:Math.cos(n)*e+this.xCenter,y:Math.sin(n)*e+this.yCenter}},getPointPositionForValue:function(t,e){return this.getPointPosition(t,this.getDistanceFromCenterForValue(e))},getBasePosition:function(t){var e=this.min,n=this.max;return this.getPointPositionForValue(t||0,this.beginAtZero?0:e<0&&n<0?n:e>0&&n>0?e:0)},_drawGrid:function(){var t,e,n,i=this,a=i.ctx,r=i.options,o=r.gridLines,s=r.angleLines,l=Bn(s.lineWidth,o.lineWidth),u=Bn(s.color,o.color);if(r.pointLabels.display&&function(t){var e=t.ctx,n=t.options,i=n.pointLabels,a=Hn(n),r=t.getDistanceFromCenterForValue(n.ticks.reverse?t.min:t.max),o=H.options._parseFont(i);e.save(),e.font=o.string,e.textBaseline="middle";for(var s=t.chart.data.labels.length-1;s>=0;s--){var l=0===s?a/2:0,u=t.getPointPosition(s,r+l+5),d=En(i.fontColor,s,N.global.defaultFontColor);e.fillStyle=d;var h=t.getIndexAngle(s),c=H.toDegrees(h);e.textAlign=qn(c),Yn(c,t._pointLabelSizes[s],u),Un(e,t.pointLabels[s],u,o.lineHeight)}e.restore()}(i),o.display&&H.each(i.ticks,(function(t,n){0!==n&&(e=i.getDistanceFromCenterForValue(i.ticksAsNumbers[n]),function(t,e,n,i){var a,r=t.ctx,o=e.circular,s=t.chart.data.labels.length,l=En(e.color,i-1),u=En(e.lineWidth,i-1);if((o||s)&&l&&u){if(r.save(),r.strokeStyle=l,r.lineWidth=u,r.setLineDash&&(r.setLineDash(e.borderDash||[]),r.lineDashOffset=e.borderDashOffset||0),r.beginPath(),o)r.arc(t.xCenter,t.yCenter,n,0,2*Math.PI);else{a=t.getPointPosition(0,n),r.moveTo(a.x,a.y);for(var d=1;d<s;d++)a=t.getPointPosition(d,n),r.lineTo(a.x,a.y)}r.closePath(),r.stroke(),r.restore()}}(i,o,e,n))})),s.display&&l&&u){for(a.save(),a.lineWidth=l,a.strokeStyle=u,a.setLineDash&&(a.setLineDash(Wn([s.borderDash,o.borderDash,[]])),a.lineDashOffset=Wn([s.borderDashOffset,o.borderDashOffset,0])),t=i.chart.data.labels.length-1;t>=0;t--)e=i.getDistanceFromCenterForValue(r.ticks.reverse?i.min:i.max),n=i.getPointPosition(t,e),a.beginPath(),a.moveTo(i.xCenter,i.yCenter),a.lineTo(n.x,n.y),a.stroke();a.restore()}},_drawLabels:function(){var t=this,e=t.ctx,n=t.options.ticks;if(n.display){var i,a,r=t.getIndexAngle(0),o=H.options._parseFont(n),s=Bn(n.fontColor,N.global.defaultFontColor);e.save(),e.font=o.string,e.translate(t.xCenter,t.yCenter),e.rotate(r),e.textAlign="center",e.textBaseline="middle",H.each(t.ticks,(function(r,l){(0!==l||n.reverse)&&(i=t.getDistanceFromCenterForValue(t.ticksAsNumbers[l]),n.showLabelBackdrop&&(a=e.measureText(r).width,e.fillStyle=n.backdropColor,e.fillRect(-a/2-n.backdropPaddingX,-i-o.size/2-n.backdropPaddingY,a+2*n.backdropPaddingX,o.size+2*n.backdropPaddingY)),e.fillStyle=s,e.fillText(r,0,-i))})),e.restore()}},_drawTitle:H.noop}),Kn=Vn;Xn._defaults=Kn;var Zn=H._deprecated,$n=H.options.resolve,Jn=H.valueOrDefault,Qn=Number.MIN_SAFE_INTEGER||-9007199254740991,ti=Number.MAX_SAFE_INTEGER||9007199254740991,ei={millisecond:{common:!0,size:1,steps:1e3},second:{common:!0,size:1e3,steps:60},minute:{common:!0,size:6e4,steps:60},hour:{common:!0,size:36e5,steps:24},day:{common:!0,size:864e5,steps:30},week:{common:!1,size:6048e5,steps:4},month:{common:!0,size:2628e6,steps:12},quarter:{common:!1,size:7884e6,steps:4},year:{common:!0,size:3154e7}},ni=Object.keys(ei);function ii(t,e){return t-e}function ai(t){return H.valueOrDefault(t.time.min,t.ticks.min)}function ri(t){return H.valueOrDefault(t.time.max,t.ticks.max)}function oi(t,e,n,i){var a=function(t,e,n){for(var i,a,r,o=0,s=t.length-1;o>=0&&o<=s;){if(a=t[(i=o+s>>1)-1]||null,r=t[i],!a)return{lo:null,hi:r};if(r[e]<n)o=i+1;else{if(!(a[e]>n))return{lo:a,hi:r};s=i-1}}return{lo:r,hi:null}}(t,e,n),r=a.lo?a.hi?a.lo:t[t.length-2]:t[0],o=a.lo?a.hi?a.hi:t[t.length-1]:t[1],s=o[e]-r[e],l=s?(n-r[e])/s:0,u=(o[i]-r[i])*l;return r[i]+u}function si(t,e){var n=t._adapter,i=t.options.time,a=i.parser,r=a||i.format,o=e;return"function"==typeof a&&(o=a(o)),H.isFinite(o)||(o="string"==typeof r?n.parse(o,r):n.parse(o)),null!==o?+o:(a||"function"!=typeof r||(o=r(e),H.isFinite(o)||(o=n.parse(o))),o)}function li(t,e){if(H.isNullOrUndef(e))return null;var n=t.options.time,i=si(t,t.getRightValue(e));return null===i?i:(n.round&&(i=+t._adapter.startOf(i,n.round)),i)}function ui(t,e,n,i){var a,r,o,s=ni.length;for(a=ni.indexOf(t);a<s-1;++a)if(o=(r=ei[ni[a]]).steps?r.steps:ti,r.common&&Math.ceil((n-e)/(o*r.size))<=i)return ni[a];return ni[s-1]}function di(t,e,n){var i,a,r=[],o={},s=e.length;for(i=0;i<s;++i)o[a=e[i]]=i,r.push({value:a,major:!1});return 0!==s&&n?function(t,e,n,i){var a,r,o=t._adapter,s=+o.startOf(e[0].value,i),l=e[e.length-1].value;for(a=s;a<=l;a=+o.add(a,1,i))(r=n[a])>=0&&(e[r].major=!0);return e}(t,r,o,n):r}var hi=yn.extend({initialize:function(){this.mergeTicksOptions(),yn.prototype.initialize.call(this)},update:function(){var t=this,e=t.options,n=e.time||(e.time={}),i=t._adapter=new rn._date(e.adapters.date);return Zn("time scale",n.format,"time.format","time.parser"),Zn("time scale",n.min,"time.min","ticks.min"),Zn("time scale",n.max,"time.max","ticks.max"),H.mergeIf(n.displayFormats,i.formats()),yn.prototype.update.apply(t,arguments)},getRightValue:function(t){return t&&void 0!==t.t&&(t=t.t),yn.prototype.getRightValue.call(this,t)},determineDataLimits:function(){var t,e,n,i,a,r,o,s=this,l=s.chart,u=s._adapter,d=s.options,h=d.time.unit||"day",c=ti,f=Qn,g=[],p=[],m=[],v=s._getLabels();for(t=0,n=v.length;t<n;++t)m.push(li(s,v[t]));for(t=0,n=(l.data.datasets||[]).length;t<n;++t)if(l.isDatasetVisible(t))if(a=l.data.datasets[t].data,H.isObject(a[0]))for(p[t]=[],e=0,i=a.length;e<i;++e)r=li(s,a[e]),g.push(r),p[t][e]=r;else p[t]=m.slice(0),o||(g=g.concat(m),o=!0);else p[t]=[];m.length&&(c=Math.min(c,m[0]),f=Math.max(f,m[m.length-1])),g.length&&(g=n>1?function(t){var e,n,i,a={},r=[];for(e=0,n=t.length;e<n;++e)a[i=t[e]]||(a[i]=!0,r.push(i));return r}(g).sort(ii):g.sort(ii),c=Math.min(c,g[0]),f=Math.max(f,g[g.length-1])),c=li(s,ai(d))||c,f=li(s,ri(d))||f,c=c===ti?+u.startOf(Date.now(),h):c,f=f===Qn?+u.endOf(Date.now(),h)+1:f,s.min=Math.min(c,f),s.max=Math.max(c+1,f),s._table=[],s._timestamps={data:g,datasets:p,labels:m}},buildTicks:function(){var t,e,n,i=this,a=i.min,r=i.max,o=i.options,s=o.ticks,l=o.time,u=i._timestamps,d=[],h=i.getLabelCapacity(a),c=s.source,f=o.distribution;for(u="data"===c||"auto"===c&&"series"===f?u.data:"labels"===c?u.labels:function(t,e,n,i){var a,r=t._adapter,o=t.options,s=o.time,l=s.unit||ui(s.minUnit,e,n,i),u=$n([s.stepSize,s.unitStepSize,1]),d="week"===l&&s.isoWeekday,h=e,c=[];if(d&&(h=+r.startOf(h,"isoWeek",d)),h=+r.startOf(h,d?"day":l),r.diff(n,e,l)>1e5*u)throw e+" and "+n+" are too far apart with stepSize of "+u+" "+l;for(a=h;a<n;a=+r.add(a,u,l))c.push(a);return a!==n&&"ticks"!==o.bounds||c.push(a),c}(i,a,r,h),"ticks"===o.bounds&&u.length&&(a=u[0],r=u[u.length-1]),a=li(i,ai(o))||a,r=li(i,ri(o))||r,t=0,e=u.length;t<e;++t)(n=u[t])>=a&&n<=r&&d.push(n);return i.min=a,i.max=r,i._unit=l.unit||(s.autoSkip?ui(l.minUnit,i.min,i.max,h):function(t,e,n,i,a){var r,o;for(r=ni.length-1;r>=ni.indexOf(n);r--)if(o=ni[r],ei[o].common&&t._adapter.diff(a,i,o)>=e-1)return o;return ni[n?ni.indexOf(n):0]}(i,d.length,l.minUnit,i.min,i.max)),i._majorUnit=s.major.enabled&&"year"!==i._unit?function(t){for(var e=ni.indexOf(t)+1,n=ni.length;e<n;++e)if(ei[ni[e]].common)return ni[e]}(i._unit):void 0,i._table=function(t,e,n,i){if("linear"===i||!t.length)return[{time:e,pos:0},{time:n,pos:1}];var a,r,o,s,l,u=[],d=[e];for(a=0,r=t.length;a<r;++a)(s=t[a])>e&&s<n&&d.push(s);for(d.push(n),a=0,r=d.length;a<r;++a)l=d[a+1],o=d[a-1],s=d[a],void 0!==o&&void 0!==l&&Math.round((l+o)/2)===s||u.push({time:s,pos:a/(r-1)});return u}(i._timestamps.data,a,r,f),i._offsets=function(t,e,n,i,a){var r,o,s=0,l=0;return a.offset&&e.length&&(r=oi(t,"time",e[0],"pos"),s=1===e.length?1-r:(oi(t,"time",e[1],"pos")-r)/2,o=oi(t,"time",e[e.length-1],"pos"),l=1===e.length?o:(o-oi(t,"time",e[e.length-2],"pos"))/2),{start:s,end:l,factor:1/(s+1+l)}}(i._table,d,0,0,o),s.reverse&&d.reverse(),di(i,d,i._majorUnit)},getLabelForIndex:function(t,e){var n=this,i=n._adapter,a=n.chart.data,r=n.options.time,o=a.labels&&t<a.labels.length?a.labels[t]:"",s=a.datasets[e].data[t];return H.isObject(s)&&(o=n.getRightValue(s)),r.tooltipFormat?i.format(si(n,o),r.tooltipFormat):"string"==typeof o?o:i.format(si(n,o),r.displayFormats.datetime)},tickFormatFunction:function(t,e,n,i){var a=this._adapter,r=this.options,o=r.time.displayFormats,s=o[this._unit],l=this._majorUnit,u=o[l],d=n[e],h=r.ticks,c=l&&u&&d&&d.major,f=a.format(t,i||(c?u:s)),g=c?h.major:h.minor,p=$n([g.callback,g.userCallback,h.callback,h.userCallback]);return p?p(f,e,n):f},convertTicksToLabels:function(t){var e,n,i=[];for(e=0,n=t.length;e<n;++e)i.push(this.tickFormatFunction(t[e].value,e,t));return i},getPixelForOffset:function(t){var e=this._offsets,n=oi(this._table,"time",t,"pos");return this.getPixelForDecimal((e.start+n)*e.factor)},getPixelForValue:function(t,e,n){var i=null;if(void 0!==e&&void 0!==n&&(i=this._timestamps.datasets[n][e]),null===i&&(i=li(this,t)),null!==i)return this.getPixelForOffset(i)},getPixelForTick:function(t){var e=this.getTicks();return t>=0&&t<e.length?this.getPixelForOffset(e[t].value):null},getValueForPixel:function(t){var e=this._offsets,n=this.getDecimalForPixel(t)/e.factor-e.end,i=oi(this._table,"pos",n,"time");return this._adapter._create(i)},_getLabelSize:function(t){var e=this.options.ticks,n=this.ctx.measureText(t).width,i=H.toRadians(this.isHorizontal()?e.maxRotation:e.minRotation),a=Math.cos(i),r=Math.sin(i),o=Jn(e.fontSize,N.global.defaultFontSize);return{w:n*a+o*r,h:n*r+o*a}},getLabelWidth:function(t){return this._getLabelSize(t).w},getLabelCapacity:function(t){var e=this,n=e.options.time,i=n.displayFormats,a=i[n.unit]||i.millisecond,r=e.tickFormatFunction(t,0,di(e,[t],e._majorUnit),a),o=e._getLabelSize(r),s=Math.floor(e.isHorizontal()?e.width/o.w:e.height/o.h);return e.options.offset&&s--,s>0?s:1}}),ci={position:"bottom",distribution:"linear",bounds:"data",adapters:{},time:{parser:!1,unit:!1,round:!1,displayFormat:!1,isoWeekday:!1,minUnit:"millisecond",displayFormats:{}},ticks:{autoSkip:!1,source:"auto",major:{enabled:!1}}};hi._defaults=ci;var fi={category:kn,linear:Tn,logarithmic:zn,radialLinear:Xn,time:hi},gi={datetime:"MMM D, YYYY, h:mm:ss a",millisecond:"h:mm:ss.SSS a",second:"h:mm:ss a",minute:"h:mm a",hour:"hA",day:"MMM D",week:"ll",month:"MMM YYYY",quarter:"[Q]Q - YYYY",year:"YYYY"};rn._date.override("function"==typeof t?{_id:"moment",formats:function(){return gi},parse:function(e,n){return"string"==typeof e&&"string"==typeof n?e=t(e,n):e instanceof t||(e=t(e)),e.isValid()?e.valueOf():null},format:function(e,n){return t(e).format(n)},add:function(e,n,i){return t(e).add(n,i).valueOf()},diff:function(e,n,i){return t(e).diff(t(n),i)},startOf:function(e,n,i){return e=t(e),"isoWeek"===n?e.isoWeekday(i).valueOf():e.startOf(n).valueOf()},endOf:function(e,n){return t(e).endOf(n).valueOf()},_create:function(e){return t(e)}}:{}),N._set("global",{plugins:{filler:{propagate:!0}}});var pi={dataset:function(t){var e=t.fill,n=t.chart,i=n.getDatasetMeta(e),a=i&&n.isDatasetVisible(e)&&i.dataset._children||[],r=a.length||0;return r?function(t,e){return e<r&&a[e]._view||null}:null},boundary:function(t){var e=t.boundary,n=e?e.x:null,i=e?e.y:null;return H.isArray(e)?function(t,n){return e[n]}:function(t){return{x:null===n?t.x:n,y:null===i?t.y:i}}}};function mi(t,e,n){var i,a=t._model||{},r=a.fill;if(void 0===r&&(r=!!a.backgroundColor),!1===r||null===r)return!1;if(!0===r)return"origin";if(i=parseFloat(r,10),isFinite(i)&&Math.floor(i)===i)return"-"!==r[0]&&"+"!==r[0]||(i=e+i),!(i===e||i<0||i>=n)&&i;switch(r){case"bottom":return"start";case"top":return"end";case"zero":return"origin";case"origin":case"start":case"end":return r;default:return!1}}function vi(t){return(t.el._scale||{}).getPointPositionForValue?function(t){var e,n,i,a,r,o=t.el._scale,s=o.options,l=o.chart.data.labels.length,u=t.fill,d=[];if(!l)return null;for(e=s.ticks.reverse?o.max:o.min,n=s.ticks.reverse?o.min:o.max,i=o.getPointPositionForValue(0,e),a=0;a<l;++a)r="start"===u||"end"===u?o.getPointPositionForValue(a,"start"===u?e:n):o.getBasePosition(a),s.gridLines.circular&&(r.cx=i.x,r.cy=i.y,r.angle=o.getIndexAngle(a)-Math.PI/2),d.push(r);return d}(t):function(t){var e,n=t.el._model||{},i=t.el._scale||{},a=t.fill,r=null;if(isFinite(a))return null;if("start"===a?r=void 0===n.scaleBottom?i.bottom:n.scaleBottom:"end"===a?r=void 0===n.scaleTop?i.top:n.scaleTop:void 0!==n.scaleZero?r=n.scaleZero:i.getBasePixel&&(r=i.getBasePixel()),null!=r){if(void 0!==r.x&&void 0!==r.y)return r;if(H.isFinite(r))return{x:(e=i.isHorizontal())?r:null,y:e?null:r}}return null}(t)}function bi(t,e,n){var i,a=t[e].fill,r=[e];if(!n)return a;for(;!1!==a&&-1===r.indexOf(a);){if(!isFinite(a))return a;if(!(i=t[a]))return!1;if(i.visible)return a;r.push(a),a=i.fill}return!1}function xi(t){var e=t.fill,n="dataset";return!1===e?null:(isFinite(e)||(n="boundary"),pi[n](t))}function yi(t){return t&&!t.skip}function _i(t,e,n,i,a){var r,o,s,l;if(i&&a){for(t.moveTo(e[0].x,e[0].y),r=1;r<i;++r)H.canvas.lineTo(t,e[r-1],e[r]);if(void 0===n[0].angle)for(t.lineTo(n[a-1].x,n[a-1].y),r=a-1;r>0;--r)H.canvas.lineTo(t,n[r],n[r-1],!0);else for(o=n[0].cx,s=n[0].cy,l=Math.sqrt(Math.pow(n[0].x-o,2)+Math.pow(n[0].y-s,2)),r=a-1;r>0;--r)t.arc(o,s,l,n[r].angle,n[r-1].angle,!0)}}function ki(t,e,n,i,a,r){var o,s,l,u,d,h,c,f,g=e.length,p=i.spanGaps,m=[],v=[],b=0,x=0;for(t.beginPath(),o=0,s=g;o<s;++o)d=n(u=e[l=o%g]._view,l,i),h=yi(u),c=yi(d),r&&void 0===f&&h&&(s=g+(f=o+1)),h&&c?(b=m.push(u),x=v.push(d)):b&&x&&(p?(h&&m.push(u),c&&v.push(d)):(_i(t,m,v,b,x),b=x=0,m=[],v=[]));_i(t,m,v,b,x),t.closePath(),t.fillStyle=a,t.fill()}var wi={id:"filler",afterDatasetsUpdate:function(t,e){var n,i,a,r,o=(t.data.datasets||[]).length,s=e.propagate,l=[];for(i=0;i<o;++i)r=null,(a=(n=t.getDatasetMeta(i)).dataset)&&a._model&&a instanceof kt.Line&&(r={visible:t.isDatasetVisible(i),fill:mi(a,i,o),chart:t,el:a}),n.$filler=r,l.push(r);for(i=0;i<o;++i)(r=l[i])&&(r.fill=bi(l,i,s),r.boundary=vi(r),r.mapper=xi(r))},beforeDatasetsDraw:function(t){var e,n,i,a,r,o,s,l=t._getSortedVisibleDatasetMetas(),u=t.ctx;for(n=l.length-1;n>=0;--n)(e=l[n].$filler)&&e.visible&&(a=(i=e.el)._view,r=i._children||[],o=e.mapper,s=a.backgroundColor||N.global.defaultColor,o&&s&&r.length&&(H.canvas.clipArea(u,t.chartArea),ki(u,r,o,a,s,i._loop),H.canvas.unclipArea(u)))}},Mi=H.rtl.getRtlAdapter,Si=H.noop,Ci=H.valueOrDefault;function Pi(t,e){return t.usePointStyle&&t.boxWidth>e?e:t.boxWidth}N._set("global",{legend:{display:!0,position:"top",align:"center",fullWidth:!0,reverse:!1,weight:1e3,onClick:function(t,e){var n=e.datasetIndex,i=this.chart,a=i.getDatasetMeta(n);a.hidden=null===a.hidden?!i.data.datasets[n].hidden:null,i.update()},onHover:null,onLeave:null,labels:{boxWidth:40,padding:10,generateLabels:function(t){var e=t.data.datasets,n=t.options.legend||{},i=n.labels&&n.labels.usePointStyle;return t._getSortedDatasetMetas().map((function(n){var a=n.controller.getStyle(i?0:void 0);return{text:e[n.index].label,fillStyle:a.backgroundColor,hidden:!t.isDatasetVisible(n.index),lineCap:a.borderCapStyle,lineDash:a.borderDash,lineDashOffset:a.borderDashOffset,lineJoin:a.borderJoinStyle,lineWidth:a.borderWidth,strokeStyle:a.borderColor,pointStyle:a.pointStyle,rotation:a.rotation,datasetIndex:n.index}}),this)}}},legendCallback:function(t){var e,n,i,a=document.createElement("ul"),r=t.data.datasets;for(a.setAttribute("class",t.id+"-legend"),e=0,n=r.length;e<n;e++)(i=a.appendChild(document.createElement("li"))).appendChild(document.createElement("span")).style.backgroundColor=r[e].backgroundColor,r[e].label&&i.appendChild(document.createTextNode(r[e].label));return a.outerHTML}});var Ai=K.extend({initialize:function(t){H.extend(this,t),this.legendHitBoxes=[],this._hoveredItem=null,this.doughnutMode=!1},beforeUpdate:Si,update:function(t,e,n){var i=this;return i.beforeUpdate(),i.maxWidth=t,i.maxHeight=e,i.margins=n,i.beforeSetDimensions(),i.setDimensions(),i.afterSetDimensions(),i.beforeBuildLabels(),i.buildLabels(),i.afterBuildLabels(),i.beforeFit(),i.fit(),i.afterFit(),i.afterUpdate(),i.minSize},afterUpdate:Si,beforeSetDimensions:Si,setDimensions:function(){var t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0,t.minSize={width:0,height:0}},afterSetDimensions:Si,beforeBuildLabels:Si,buildLabels:function(){var t=this,e=t.options.labels||{},n=H.callback(e.generateLabels,[t.chart],t)||[];e.filter&&(n=n.filter((function(n){return e.filter(n,t.chart.data)}))),t.options.reverse&&n.reverse(),t.legendItems=n},afterBuildLabels:Si,beforeFit:Si,fit:function(){var t=this,e=t.options,n=e.labels,i=e.display,a=t.ctx,r=H.options._parseFont(n),o=r.size,s=t.legendHitBoxes=[],l=t.minSize,u=t.isHorizontal();if(u?(l.width=t.maxWidth,l.height=i?10:0):(l.width=i?10:0,l.height=t.maxHeight),i){if(a.font=r.string,u){var d=t.lineWidths=[0],h=0;a.textAlign="left",a.textBaseline="middle",H.each(t.legendItems,(function(t,e){var i=Pi(n,o)+o/2+a.measureText(t.text).width;(0===e||d[d.length-1]+i+2*n.padding>l.width)&&(h+=o+n.padding,d[d.length-(e>0?0:1)]=0),s[e]={left:0,top:0,width:i,height:o},d[d.length-1]+=i+n.padding})),l.height+=h}else{var c=n.padding,f=t.columnWidths=[],g=t.columnHeights=[],p=n.padding,m=0,v=0;H.each(t.legendItems,(function(t,e){var i=Pi(n,o)+o/2+a.measureText(t.text).width;e>0&&v+o+2*c>l.height&&(p+=m+n.padding,f.push(m),g.push(v),m=0,v=0),m=Math.max(m,i),v+=o+c,s[e]={left:0,top:0,width:i,height:o}})),p+=m,f.push(m),g.push(v),l.width+=p}t.width=l.width,t.height=l.height}else t.width=l.width=t.height=l.height=0},afterFit:Si,isHorizontal:function(){return"top"===this.options.position||"bottom"===this.options.position},draw:function(){var t=this,e=t.options,n=e.labels,i=N.global,a=i.defaultColor,r=i.elements.line,o=t.height,s=t.columnHeights,l=t.width,u=t.lineWidths;if(e.display){var d,h=Mi(e.rtl,t.left,t.minSize.width),c=t.ctx,f=Ci(n.fontColor,i.defaultFontColor),g=H.options._parseFont(n),p=g.size;c.textAlign=h.textAlign("left"),c.textBaseline="middle",c.lineWidth=.5,c.strokeStyle=f,c.fillStyle=f,c.font=g.string;var m=Pi(n,p),v=t.legendHitBoxes,b=function(t,i){switch(e.align){case"start":return n.padding;case"end":return t-i;default:return(t-i+n.padding)/2}},x=t.isHorizontal();d=x?{x:t.left+b(l,u[0]),y:t.top+n.padding,line:0}:{x:t.left+n.padding,y:t.top+b(o,s[0]),line:0},H.rtl.overrideTextDirection(t.ctx,e.textDirection);var y=p+n.padding;H.each(t.legendItems,(function(e,i){var f=c.measureText(e.text).width,g=m+p/2+f,_=d.x,k=d.y;h.setWidth(t.minSize.width),x?i>0&&_+g+n.padding>t.left+t.minSize.width&&(k=d.y+=y,d.line++,_=d.x=t.left+b(l,u[d.line])):i>0&&k+y>t.top+t.minSize.height&&(_=d.x=_+t.columnWidths[d.line]+n.padding,d.line++,k=d.y=t.top+b(o,s[d.line]));var w=h.x(_);!function(t,e,i){if(!(isNaN(m)||m<=0)){c.save();var o=Ci(i.lineWidth,r.borderWidth);if(c.fillStyle=Ci(i.fillStyle,a),c.lineCap=Ci(i.lineCap,r.borderCapStyle),c.lineDashOffset=Ci(i.lineDashOffset,r.borderDashOffset),c.lineJoin=Ci(i.lineJoin,r.borderJoinStyle),c.lineWidth=o,c.strokeStyle=Ci(i.strokeStyle,a),c.setLineDash&&c.setLineDash(Ci(i.lineDash,r.borderDash)),n&&n.usePointStyle){var s=m*Math.SQRT2/2,l=h.xPlus(t,m/2),u=e+p/2;H.canvas.drawPoint(c,i.pointStyle,s,l,u,i.rotation)}else c.fillRect(h.leftForLtr(t,m),e,m,p),0!==o&&c.strokeRect(h.leftForLtr(t,m),e,m,p);c.restore()}}(w,k,e),v[i].left=h.leftForLtr(w,v[i].width),v[i].top=k,function(t,e,n,i){var a=p/2,r=h.xPlus(t,m+a),o=e+a;c.fillText(n.text,r,o),n.hidden&&(c.beginPath(),c.lineWidth=2,c.moveTo(r,o),c.lineTo(h.xPlus(r,i),o),c.stroke())}(w,k,e,f),x?d.x+=g+n.padding:d.y+=y})),H.rtl.restoreTextDirection(t.ctx,e.textDirection)}},_getLegendItemAt:function(t,e){var n,i,a,r=this;if(t>=r.left&&t<=r.right&&e>=r.top&&e<=r.bottom)for(a=r.legendHitBoxes,n=0;n<a.length;++n)if(t>=(i=a[n]).left&&t<=i.left+i.width&&e>=i.top&&e<=i.top+i.height)return r.legendItems[n];return null},handleEvent:function(t){var e,n=this,i=n.options,a="mouseup"===t.type?"click":t.type;if("mousemove"===a){if(!i.onHover&&!i.onLeave)return}else{if("click"!==a)return;if(!i.onClick)return}e=n._getLegendItemAt(t.x,t.y),"click"===a?e&&i.onClick&&i.onClick.call(n,t.native,e):(i.onLeave&&e!==n._hoveredItem&&(n._hoveredItem&&i.onLeave.call(n,t.native,n._hoveredItem),n._hoveredItem=e),i.onHover&&e&&i.onHover.call(n,t.native,e))}});function Di(t,e){var n=new Ai({ctx:t.ctx,options:e,chart:t});pe.configure(t,n,e),pe.addBox(t,n),t.legend=n}var Ti={id:"legend",_element:Ai,beforeInit:function(t){var e=t.options.legend;e&&Di(t,e)},beforeUpdate:function(t){var e=t.options.legend,n=t.legend;e?(H.mergeIf(e,N.global.legend),n?(pe.configure(t,n,e),n.options=e):Di(t,e)):n&&(pe.removeBox(t,n),delete t.legend)},afterEvent:function(t,e){var n=t.legend;n&&n.handleEvent(e)}},Ii=H.noop;N._set("global",{title:{display:!1,fontStyle:"bold",fullWidth:!0,padding:10,position:"top",text:"",weight:2e3}});var Fi=K.extend({initialize:function(t){H.extend(this,t),this.legendHitBoxes=[]},beforeUpdate:Ii,update:function(t,e,n){var i=this;return i.beforeUpdate(),i.maxWidth=t,i.maxHeight=e,i.margins=n,i.beforeSetDimensions(),i.setDimensions(),i.afterSetDimensions(),i.beforeBuildLabels(),i.buildLabels(),i.afterBuildLabels(),i.beforeFit(),i.fit(),i.afterFit(),i.afterUpdate(),i.minSize},afterUpdate:Ii,beforeSetDimensions:Ii,setDimensions:function(){var t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0,t.minSize={width:0,height:0}},afterSetDimensions:Ii,beforeBuildLabels:Ii,buildLabels:Ii,afterBuildLabels:Ii,beforeFit:Ii,fit:function(){var t,e=this,n=e.options,i=e.minSize={},a=e.isHorizontal();n.display?(t=(H.isArray(n.text)?n.text.length:1)*H.options._parseFont(n).lineHeight+2*n.padding,e.width=i.width=a?e.maxWidth:t,e.height=i.height=a?t:e.maxHeight):e.width=i.width=e.height=i.height=0},afterFit:Ii,isHorizontal:function(){var t=this.options.position;return"top"===t||"bottom"===t},draw:function(){var t=this,e=t.ctx,n=t.options;if(n.display){var i,a,r,o=H.options._parseFont(n),s=o.lineHeight,l=s/2+n.padding,u=0,d=t.top,h=t.left,c=t.bottom,f=t.right;e.fillStyle=H.valueOrDefault(n.fontColor,N.global.defaultFontColor),e.font=o.string,t.isHorizontal()?(a=h+(f-h)/2,r=d+l,i=f-h):(a="left"===n.position?h+l:f-l,r=d+(c-d)/2,i=c-d,u=Math.PI*("left"===n.position?-.5:.5)),e.save(),e.translate(a,r),e.rotate(u),e.textAlign="center",e.textBaseline="middle";var g=n.text;if(H.isArray(g))for(var p=0,m=0;m<g.length;++m)e.fillText(g[m],0,p,i),p+=s;else e.fillText(g,0,0,i);e.restore()}}});function Oi(t,e){var n=new Fi({ctx:t.ctx,options:e,chart:t});pe.configure(t,n,e),pe.addBox(t,n),t.titleBlock=n}var Li={},Ri=wi,zi=Ti,Ni={id:"title",_element:Fi,beforeInit:function(t){var e=t.options.title;e&&Oi(t,e)},beforeUpdate:function(t){var e=t.options.title,n=t.titleBlock;e?(H.mergeIf(e,N.global.title),n?(pe.configure(t,n,e),n.options=e):Oi(t,e)):n&&(pe.removeBox(t,n),delete t.titleBlock)}};for(var Bi in Li.filler=Ri,Li.legend=zi,Li.title=Ni,en.helpers=H,function(){function t(t,e,n){var i;return"string"==typeof t?(i=parseInt(t,10),-1!==t.indexOf("%")&&(i=i/100*e.parentNode[n])):i=t,i}function e(t){return null!=t&&"none"!==t}function n(n,i,a){var r=document.defaultView,o=H._getParentNode(n),s=r.getComputedStyle(n)[i],l=r.getComputedStyle(o)[i],u=e(s),d=e(l),h=Number.POSITIVE_INFINITY;return u||d?Math.min(u?t(s,n,a):h,d?t(l,o,a):h):"none"}H.where=function(t,e){if(H.isArray(t)&&Array.prototype.filter)return t.filter(e);var n=[];return H.each(t,(function(t){e(t)&&n.push(t)})),n},H.findIndex=Array.prototype.findIndex?function(t,e,n){return t.findIndex(e,n)}:function(t,e,n){n=void 0===n?t:n;for(var i=0,a=t.length;i<a;++i)if(e.call(n,t[i],i,t))return i;return-1},H.findNextWhere=function(t,e,n){H.isNullOrUndef(n)&&(n=-1);for(var i=n+1;i<t.length;i++){var a=t[i];if(e(a))return a}},H.findPreviousWhere=function(t,e,n){H.isNullOrUndef(n)&&(n=t.length);for(var i=n-1;i>=0;i--){var a=t[i];if(e(a))return a}},H.isNumber=function(t){return!isNaN(parseFloat(t))&&isFinite(t)},H.almostEquals=function(t,e,n){return Math.abs(t-e)<n},H.almostWhole=function(t,e){var n=Math.round(t);return n-e<=t&&n+e>=t},H.max=function(t){return t.reduce((function(t,e){return isNaN(e)?t:Math.max(t,e)}),Number.NEGATIVE_INFINITY)},H.min=function(t){return t.reduce((function(t,e){return isNaN(e)?t:Math.min(t,e)}),Number.POSITIVE_INFINITY)},H.sign=Math.sign?function(t){return Math.sign(t)}:function(t){return 0===(t=+t)||isNaN(t)?t:t>0?1:-1},H.toRadians=function(t){return t*(Math.PI/180)},H.toDegrees=function(t){return t*(180/Math.PI)},H._decimalPlaces=function(t){if(H.isFinite(t)){for(var e=1,n=0;Math.round(t*e)/e!==t;)e*=10,n++;return n}},H.getAngleFromPoint=function(t,e){var n=e.x-t.x,i=e.y-t.y,a=Math.sqrt(n*n+i*i),r=Math.atan2(i,n);return r<-.5*Math.PI&&(r+=2*Math.PI),{angle:r,distance:a}},H.distanceBetweenPoints=function(t,e){return Math.sqrt(Math.pow(e.x-t.x,2)+Math.pow(e.y-t.y,2))},H.aliasPixel=function(t){return t%2==0?0:.5},H._alignPixel=function(t,e,n){var i=t.currentDevicePixelRatio,a=n/2;return Math.round((e-a)*i)/i+a},H.splineCurve=function(t,e,n,i){var a=t.skip?e:t,r=e,o=n.skip?e:n,s=Math.sqrt(Math.pow(r.x-a.x,2)+Math.pow(r.y-a.y,2)),l=Math.sqrt(Math.pow(o.x-r.x,2)+Math.pow(o.y-r.y,2)),u=s/(s+l),d=l/(s+l),h=i*(u=isNaN(u)?0:u),c=i*(d=isNaN(d)?0:d);return{previous:{x:r.x-h*(o.x-a.x),y:r.y-h*(o.y-a.y)},next:{x:r.x+c*(o.x-a.x),y:r.y+c*(o.y-a.y)}}},H.EPSILON=Number.EPSILON||1e-14,H.splineCurveMonotone=function(t){var e,n,i,a,r,o,s,l,u,d=(t||[]).map((function(t){return{model:t._model,deltaK:0,mK:0}})),h=d.length;for(e=0;e<h;++e)if(!(i=d[e]).model.skip){if(n=e>0?d[e-1]:null,(a=e<h-1?d[e+1]:null)&&!a.model.skip){var c=a.model.x-i.model.x;i.deltaK=0!==c?(a.model.y-i.model.y)/c:0}!n||n.model.skip?i.mK=i.deltaK:!a||a.model.skip?i.mK=n.deltaK:this.sign(n.deltaK)!==this.sign(i.deltaK)?i.mK=0:i.mK=(n.deltaK+i.deltaK)/2}for(e=0;e<h-1;++e)i=d[e],a=d[e+1],i.model.skip||a.model.skip||(H.almostEquals(i.deltaK,0,this.EPSILON)?i.mK=a.mK=0:(r=i.mK/i.deltaK,o=a.mK/i.deltaK,(l=Math.pow(r,2)+Math.pow(o,2))<=9||(s=3/Math.sqrt(l),i.mK=r*s*i.deltaK,a.mK=o*s*i.deltaK)));for(e=0;e<h;++e)(i=d[e]).model.skip||(n=e>0?d[e-1]:null,a=e<h-1?d[e+1]:null,n&&!n.model.skip&&(u=(i.model.x-n.model.x)/3,i.model.controlPointPreviousX=i.model.x-u,i.model.controlPointPreviousY=i.model.y-u*i.mK),a&&!a.model.skip&&(u=(a.model.x-i.model.x)/3,i.model.controlPointNextX=i.model.x+u,i.model.controlPointNextY=i.model.y+u*i.mK))},H.nextItem=function(t,e,n){return n?e>=t.length-1?t[0]:t[e+1]:e>=t.length-1?t[t.length-1]:t[e+1]},H.previousItem=function(t,e,n){return n?e<=0?t[t.length-1]:t[e-1]:e<=0?t[0]:t[e-1]},H.niceNum=function(t,e){var n=Math.floor(H.log10(t)),i=t/Math.pow(10,n);return(e?i<1.5?1:i<3?2:i<7?5:10:i<=1?1:i<=2?2:i<=5?5:10)*Math.pow(10,n)},H.requestAnimFrame="undefined"==typeof window?function(t){t()}:window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){return window.setTimeout(t,1e3/60)},H.getRelativePosition=function(t,e){var n,i,a=t.originalEvent||t,r=t.target||t.srcElement,o=r.getBoundingClientRect(),s=a.touches;s&&s.length>0?(n=s[0].clientX,i=s[0].clientY):(n=a.clientX,i=a.clientY);var l=parseFloat(H.getStyle(r,"padding-left")),u=parseFloat(H.getStyle(r,"padding-top")),d=parseFloat(H.getStyle(r,"padding-right")),h=parseFloat(H.getStyle(r,"padding-bottom")),c=o.right-o.left-l-d,f=o.bottom-o.top-u-h;return{x:n=Math.round((n-o.left-l)/c*r.width/e.currentDevicePixelRatio),y:i=Math.round((i-o.top-u)/f*r.height/e.currentDevicePixelRatio)}},H.getConstraintWidth=function(t){return n(t,"max-width","clientWidth")},H.getConstraintHeight=function(t){return n(t,"max-height","clientHeight")},H._calculatePadding=function(t,e,n){return(e=H.getStyle(t,e)).indexOf("%")>-1?n*parseInt(e,10)/100:parseInt(e,10)},H._getParentNode=function(t){var e=t.parentNode;return e&&"[object ShadowRoot]"===e.toString()&&(e=e.host),e},H.getMaximumWidth=function(t){var e=H._getParentNode(t);if(!e)return t.clientWidth;var n=e.clientWidth,i=n-H._calculatePadding(e,"padding-left",n)-H._calculatePadding(e,"padding-right",n),a=H.getConstraintWidth(t);return isNaN(a)?i:Math.min(i,a)},H.getMaximumHeight=function(t){var e=H._getParentNode(t);if(!e)return t.clientHeight;var n=e.clientHeight,i=n-H._calculatePadding(e,"padding-top",n)-H._calculatePadding(e,"padding-bottom",n),a=H.getConstraintHeight(t);return isNaN(a)?i:Math.min(i,a)},H.getStyle=function(t,e){return t.currentStyle?t.currentStyle[e]:document.defaultView.getComputedStyle(t,null).getPropertyValue(e)},H.retinaScale=function(t,e){var n=t.currentDevicePixelRatio=e||"undefined"!=typeof window&&window.devicePixelRatio||1;if(1!==n){var i=t.canvas,a=t.height,r=t.width;i.height=a*n,i.width=r*n,t.ctx.scale(n,n),i.style.height||i.style.width||(i.style.height=a+"px",i.style.width=r+"px")}},H.fontString=function(t,e,n){return e+" "+t+"px "+n},H.longestText=function(t,e,n,i){var a=(i=i||{}).data=i.data||{},r=i.garbageCollect=i.garbageCollect||[];i.font!==e&&(a=i.data={},r=i.garbageCollect=[],i.font=e),t.font=e;var o,s,l,u,d,h=0,c=n.length;for(o=0;o<c;o++)if(null!=(u=n[o])&&!0!==H.isArray(u))h=H.measureText(t,a,r,h,u);else if(H.isArray(u))for(s=0,l=u.length;s<l;s++)null==(d=u[s])||H.isArray(d)||(h=H.measureText(t,a,r,h,d));var f=r.length/2;if(f>n.length){for(o=0;o<f;o++)delete a[r[o]];r.splice(0,f)}return h},H.measureText=function(t,e,n,i,a){var r=e[a];return r||(r=e[a]=t.measureText(a).width,n.push(a)),r>i&&(i=r),i},H.numberOfLabelLines=function(t){var e=1;return H.each(t,(function(t){H.isArray(t)&&t.length>e&&(e=t.length)})),e},H.color=_?function(t){return t instanceof CanvasGradient&&(t=N.global.defaultColor),_(t)}:function(t){return console.error("Color.js not found!"),t},H.getHoverColor=function(t){return t instanceof CanvasPattern||t instanceof CanvasGradient?t:H.color(t).saturate(.5).darken(.1).rgbString()}}(),en._adapters=rn,en.Animation=$,en.animationService=J,en.controllers=Jt,en.DatasetController=it,en.defaults=N,en.Element=K,en.elements=kt,en.Interaction=re,en.layouts=pe,en.platform=Oe,en.plugins=Le,en.Scale=yn,en.scaleService=Re,en.Ticks=on,en.Tooltip=Ye,en.helpers.each(fi,(function(t,e){en.scaleService.registerScaleType(e,t,t._defaults)})),Li)Li.hasOwnProperty(Bi)&&en.plugins.register(Li[Bi]);en.platform.initialize();var Ei=en;return"undefined"!=typeof window&&(window.Chart=en),en.Chart=en,en.Legend=Li.legend._element,en.Title=Li.title._element,en.pluginService=en.plugins,en.PluginBase=en.Element.extend({}),en.canvasHelpers=en.helpers.canvas,en.layoutService=en.layouts,en.LinearScaleBase=Cn,en.helpers.each(["Bar","Bubble","Doughnut","Line","PolarArea","Radar","Scatter"],(function(t){en[t]=function(e,n){return new en(e,en.helpers.merge(n||{},{type:t.charAt(0).toLowerCase()+t.slice(1)}))}})),Ei})); | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/xinha/plugins/ImageManager/assets/wz_jsgraphics.js | var jg_ihtm,jg_ie,jg_dom,jg_n4=(document.layers&&typeof document.classes!="undefined");function chkDHTM(a,b){a=document.body||null;jg_ie=(a&&typeof a.insertAdjacentHTML!="undefined");jg_dom=(a&&!jg_ie&&typeof a.appendChild!="undefined"&&typeof document.createRange!="undefined"&&typeof(b=document.createRange()).setStartBefore!="undefined"&&typeof b.createContextualFragment!="undefined");jg_ihtm=(!jg_ie&&!jg_dom&&a&&typeof a.innerHTML!="undefined")}function pntDoc(){this.wnd.document.write(this.htm);this.htm=""}function pntCnvDom(){var a=document.createRange();a.setStartBefore(this.cnv);a=a.createContextualFragment(this.htm);this.cnv.appendChild(a);this.htm=""}function pntCnvIe(){this.cnv.insertAdjacentHTML("BeforeEnd",this.htm);this.htm=""}function pntCnvIhtm(){this.cnv.innerHTML+=this.htm;this.htm=""}function pntCnv(){this.htm=""}function mkDiv(a,d,b,c){this.htm+='<div style="position:absolute;left:'+a+"px;top:"+d+"px;width:"+b+"px;height:"+c+"px;clip:rect(0,"+b+"px,"+c+"px,0);overflow:hidden;background-color:"+this.color+';"></div>'}function mkDivPrint(a,d,b,c){this.htm+='<div style="position:absolute;border-left:'+b+"px solid "+this.color+";left:"+a+"px;top:"+d+"px;width:"+b+"px;height:"+c+"px;clip:rect(0,"+b+"px,"+c+"px,0);overflow:hidden;background-color:"+this.color+';"></div>'}function mkLyr(a,d,b,c){this.htm+='<layer left="'+a+'" top="'+d+'" width="'+b+'" height="'+c+'" bgcolor="'+this.color+'"></layer>\n'}function mkLbl(b,a,c){this.htm+='<div style="position:absolute;white-space:nowrap;left:'+a+"px;top:"+c+"px;font-family:"+this.ftFam+";font-size:"+this.ftSz+";color:"+this.color+";"+this.ftSty+'">'+b+"</div>"}function mkLin(e,m,b,k){if(e>b){var j=b;var g=k;b=e;k=m;e=j;m=g}var q=b-e,o=Math.abs(k-m),l=e,i=m,n=(m>k)?-1:1;if(q>=o){var a=o<<1,h=a-(q<<1),d=a-q,f=l;while((q--)>0){++l;if(d>0){this.mkDiv(f,i,l-f,1);i+=n;d+=h;f=l}else{d+=a}}this.mkDiv(f,i,b-f+1,1)}else{var a=q<<1,h=a-(o<<1),d=a-o,c=i;if(k<=m){while((o--)>0){if(d>0){this.mkDiv(l++,i,1,c-i+1);i+=n;d+=h;c=i}else{i+=n;d+=a}}this.mkDiv(b,k,1,c-k+1)}else{while((o--)>0){i+=n;if(d>0){this.mkDiv(l++,c,1,i-c);d+=h;c=i}else{d+=a}}this.mkDiv(b,c,1,k-c+1)}}}function mkLin2D(r,b,q,a){if(r>q){var f=q;var n=a;q=r;a=b;r=f;b=n}var j=q-r,i=Math.abs(a-b),h=r,g=b,m=(b>a)?-1:1;var k=this.stroke;if(j>=i){if(k-3>0){var t=(k*j*Math.sqrt(1+i*i/(j*j))-j-(k>>1)*i)/j;t=(!(k-4)?Math.ceil(t):Math.round(t))+1}else{var t=k}var u=Math.ceil(k/2);var o=i<<1,e=o-(j<<1),l=o-j,d=h;while((j--)>0){++h;if(l>0){this.mkDiv(d,g,h-d+u,t);g+=m;l+=e;d=h}else{l+=o}}this.mkDiv(d,g,q-d+u+1,t)}else{if(k-3>0){var t=(k*i*Math.sqrt(1+j*j/(i*i))-(k>>1)*j-i)/i;t=(!(k-4)?Math.ceil(t):Math.round(t))+1}else{var t=k}var u=Math.round(k/2);var o=j<<1,e=o-(i<<1),l=o-i,c=g;if(a<=b){++u;while((i--)>0){if(l>0){this.mkDiv(h++,g,t,c-g+u);g+=m;l+=e;c=g}else{g+=m;l+=o}}this.mkDiv(q,a,t,c-a+u)}else{while((i--)>0){g+=m;if(l>0){this.mkDiv(h++,c,t,g-c+u);l+=e;c=g}else{l+=o}}this.mkDiv(q,c,t,a-c+u+1)}}}function mkLinDott(d,k,b,i){if(d>b){var h=b;var e=i;b=d;i=k;d=h;k=e}var o=b-d,n=Math.abs(i-k),j=d,g=k,m=(k>i)?-1:1,l=true;if(o>=n){var a=n<<1,f=a-(o<<1),c=a-o;while((o--)>0){if(l){this.mkDiv(j,g,1,1)}l=!l;if(c>0){g+=m;c+=f}else{c+=a}++j}if(l){this.mkDiv(j,g,1,1)}}else{var a=o<<1,f=a-(n<<1),c=a-n;while((n--)>0){if(l){this.mkDiv(j,g,1,1)}l=!l;g+=m;if(c>0){++j;c+=f}else{c+=a}}if(l){this.mkDiv(j,g,1,1)}}}function mkOv(g,p,r,q){var v=r>>1,u=q>>1,o=r&1,f=(q&1)+1,e=g+v,d=p+u,m=0,l=u,j=0,i=u,z=(v*v)<<1,k=(u*u)<<1,s=(z>>1)*(1-(u<<1))+k,c=(k>>1)-z*((u<<1)-1),n,t;while(l>0){if(s<0){s+=k*((m<<1)+3);c+=(k<<1)*(++m)}else{if(c<0){s+=k*((m<<1)+3)-(z<<1)*(l-1);c+=(k<<1)*(++m)-z*(((l--)<<1)-3);n=m-j;t=i-l;if(n&2&&t&2){this.mkOvQds(e,d,-m+2,j+o,-i,i-1+f,1,1);this.mkOvQds(e,d,-m+1,m-1+o,-l-1,l+f,1,1)}else{this.mkOvQds(e,d,-m+1,j+o,-i,i-t+f,n,t)}j=m;i=l}else{c-=z*((l<<1)-3);s-=(z<<1)*(--l)}}}this.mkDiv(e-v,d-i,v-j+1,(i<<1)+f);this.mkDiv(e+j+o,d-i,v-j+1,(i<<1)+f)}function mkOv2D(e,f,c,d){var G=this.stroke;c+=G-1;d+=G-1;var R=c>>1,P=d>>1,O=c&1,z=(d&1)+1,o=e+R,n=f+P,E=0,B=P,k=(R*R)<<1,U=(P*P)<<1,g=(k>>1)*(1-(P<<1))+U,N=(U>>1)-k*((P<<1)-1);if(G-4<0&&(!(G-2)||c-51>0&&d-51>0)){var r=0,q=P,F,L,D,v,u,I,t;while(B>0){if(g<0){g+=U*((E<<1)+3);N+=(U<<1)*(++E)}else{if(N<0){g+=U*((E<<1)+3)-(k<<1)*(B-1);N+=(U<<1)*(++E)-k*(((B--)<<1)-3);F=E-r;L=q-B;if(F-1){t=F+1+(G&1);L=G}else{if(L-1){t=G;L+=1+(G&1)}else{t=L=G}}this.mkOvQds(o,n,-E+1,r-t+F+O,-q,-L+q+z,t,L);r=E;q=B}else{N-=k*((B<<1)-3);g-=(k<<1)*(--B)}}}this.mkDiv(o-R,n-q,G,(q<<1)+z);this.mkDiv(o+R+O-G+1,n-q,G,(q<<1)+z)}else{var S=(c-((G-1)<<1))>>1,Q=(d-((G-1)<<1))>>1,C=0,A=Q,m=(S*S)<<1,T=(Q*Q)<<1,j=(m>>1)*(1-(Q<<1))+T,M=(T>>1)-m*((Q<<1)-1),D=new Array(),u=new Array(),H=new Array();D[0]=0;u[0]=P;H[0]=Q-1;while(B>0){if(g<0){g+=U*((E<<1)+3);N+=(U<<1)*(++E);D[D.length]=E;u[u.length]=B}else{if(N<0){g+=U*((E<<1)+3)-(k<<1)*(B-1);N+=(U<<1)*(++E)-k*(((B--)<<1)-3);D[D.length]=E;u[u.length]=B}else{N-=k*((B<<1)-3);g-=(k<<1)*(--B)}}if(A>0){if(j<0){j+=T*((C<<1)+3);M+=(T<<1)*(++C);H[H.length]=A-1}else{if(M<0){j+=T*((C<<1)+3)-(m<<1)*(A-1);M+=(T<<1)*(++C)-m*(((A--)<<1)-3);H[H.length]=A-1}else{M-=m*((A<<1)-3);j-=(m<<1)*(--A);H[H.length-1]--}}}}var r=0,q=P,p=H[0],J=D.length,F,L;for(var K=0;K<J;K++){if(typeof H[K]!="undefined"){if(H[K]<p||u[K]<q){E=D[K];this.mkOvQds(o,n,-E+1,r+O,-q,p+z,E-r,q-p);r=E;q=u[K];p=H[K]}}else{E=D[K];this.mkDiv(o-E+1,n-q,1,(q<<1)+z);this.mkDiv(o+r+O,n-q,1,(q<<1)+z);r=E;q=u[K]}}this.mkDiv(o-R,n-q,1,(q<<1)+z);this.mkDiv(o+r+O,n-q,1,(q<<1)+z)}}function mkOvDott(g,l,q,o){var t=q>>1,s=o>>1,k=q&1,f=o&1,e=g+t,d=l+s,j=0,i=s,p=(t*t)<<1,m=p<<1,h=(s*s)<<1,r=(p>>1)*(1-(s<<1))+h,c=(h>>1)-p*((s<<1)-1),n=true;while(i>0){if(r<0){r+=h*((j<<1)+3);c+=(h<<1)*(++j)}else{if(c<0){r+=h*((j<<1)+3)-m*(i-1);c+=(h<<1)*(++j)-p*(((i--)<<1)-3)}else{c-=p*((i<<1)-3);r-=m*(--i)}}if(n){this.mkOvQds(e,d,-j,j+k,-i,i+f,1,1)}n=!n}}function mkRect(a,e,b,d){var c=this.stroke;this.mkDiv(a,e,b,c);this.mkDiv(a+b,e,c,d);this.mkDiv(a,e+d,b+c,c);this.mkDiv(a,e+c,c,d-c)}function mkRectDott(a,d,b,c){this.drawLine(a,d,a+b,d);this.drawLine(a+b,d,a+b,d+c);this.drawLine(a,d+c,a+b,d+c);this.drawLine(a,d,a,d+c)}function jsgFont(){this.PLAIN="font-weight:normal;";this.BOLD="font-weight:bold;";this.ITALIC="font-style:italic;";this.ITALIC_BOLD=this.ITALIC+this.BOLD;this.BOLD_ITALIC=this.ITALIC_BOLD}var Font=new jsgFont();function jsgStroke(){this.DOTTED=-1}var Stroke=new jsgStroke();function jsGraphics(b,a){this.setColor=new Function("arg","this.color = arg;");this.getColor=new Function("return this.color");this.setStroke=function(c){this.stroke=c;if(!(c+1)){this.drawLine=mkLinDott;this.mkOv=mkOvDott;this.drawRect=mkRectDott}else{if(c-1>0){this.drawLine=mkLin2D;this.mkOv=mkOv2D;this.drawRect=mkRect}else{this.drawLine=mkLin;this.mkOv=mkOv;this.drawRect=mkRect}}};this.setPrintable=function(c){this.printable=c;this.mkDiv=jg_n4?mkLyr:c?mkDivPrint:mkDiv};this.setFont=function(d,e,c){this.ftFam=d;this.ftSz=e;this.ftSty=c||Font.PLAIN};this.drawPolyline=this.drawPolyLine=function(c,f,e){var d=c.length-1;while(d>=0){this.drawLine(c[d],f[d],c[--d],f[d])}};this.fillRect=function(c,f,d,e){this.mkDiv(c,f,d,e)};this.fillRectPattern=function(c,g,d,f,e){this.htm+='<div style="position:absolute;left:'+c+"px;top:"+g+"px;width:"+d+"px;height:"+f+"px;clip:rect(0,"+d+"px,"+f+"px,0);overflow:hidden;background-image: url('"+e+"');layer-background-image: url('"+e+"');z-index:100;\"></div>"};this.drawHandle=function(c,g,d,e,f){this.htm+='<div style="position:absolute;left:'+c+"px;top:"+g+"px;width:"+d+"px;height:"+e+"px;clip:rect(0,"+d+"px,"+e+"px,0);padding: 2px;overflow:hidden;cursor: '"+f+'\';" class="handleBox" id="'+f+'" ></div>'};this.drawHandleBox=function(c,g,d,e,f){this.htm+='<div style="position:absolute;left:'+c+"px;top:"+g+"px;width:"+d+"px;height:"+e+"px;clip:rect(0,"+(d+2)+"px,"+(e+2)+"px,0);overflow:hidden; border: solid 1px "+this.color+";cursor: '"+f+'\';" class="handleBox" id="'+f+'" ></div>'};this.drawPolygon=function(c,d){this.drawPolyline(c,d);this.drawLine(c[c.length-1],d[c.length-1],c[0],d[0])};this.drawEllipse=this.drawOval=function(c,f,d,e){this.mkOv(c,f,d,e)};this.fillEllipse=this.fillOval=function(g,r,n,v){var B=(n-=1)>>1,A=(v-=1)>>1,o=(n&1)+1,f=(v&1)+1,e=g+B,d=r+A,m=0,l=A,j=0,i=A,t=(B*B)<<1,s=t<<1,k=(A*A)<<1,u=(t>>1)*(1-(A<<1))+k,c=(k>>1)-t*((A<<1)-1),q,p,z;if(n+1){while(l>0){if(u<0){u+=k*((m<<1)+3);c+=(k<<1)*(++m)}else{if(c<0){u+=k*((m<<1)+3)-s*(l-1);q=e-m;p=(m<<1)+o;c+=(k<<1)*(++m)-t*(((l--)<<1)-3);z=i-l;this.mkDiv(q,d-i,p,z);this.mkDiv(q,d+i-z+f,p,z);j=m;i=l}else{c-=t*((l<<1)-3);u-=s*(--l)}}}}this.mkDiv(e-B,d-i,n+1,(i<<1)+f)};this.drawString=mkLbl;this.clear=function(){this.htm="";if(this.cnv){this.cnv.innerHTML=this.defhtm}};this.mkOvQds=function(d,k,j,g,f,c,e,i){this.mkDiv(g+d,f+k,e,i);this.mkDiv(g+d,c+k,e,i);this.mkDiv(j+d,c+k,e,i);this.mkDiv(j+d,f+k,e,i)};this.setStroke(1);this.setPrintable(false);this.setFont("verdana,geneva,helvetica,sans-serif",String.fromCharCode(49,50,112,120),Font.PLAIN);this.color="#000000";this.htm="";this.wnd=a||window;if(!(jg_ie||jg_dom||jg_ihtm)){chkDHTM()}if(typeof b!="string"||!b){this.paint=pntDoc}else{this.cnv=document.all?(this.wnd.document.all[b]||null):document.getElementById?(this.wnd.document.getElementById(b)||null):null;this.defhtm=(this.cnv&&this.cnv.innerHTML)?this.cnv.innerHTML:"";this.paint=jg_dom?pntCnvDom:jg_ie?pntCnvIe:jg_ihtm?pntCnvIhtm:pntCnv}}; | PypiClean |
/CharGer-0.5.2.tar.gz/CharGer-0.5.2/README.md | # CharGer
Characterization of Germline variants
## Install
pip install .
## Configure
Add the following to your PATH environment (add it in ~/.bash_profile or ~/.bashrc)
export PATH="/path/to/charger/bin:${PATH}"
## Run
Example for a VCF file
charger -f <variant file> -o <output file>
## For Help
Run:
charger
For detailed help/support, email Adam:
[email protected]
## Usage Details
### Input data
-m Standard .maf
-f Standard .vcf
-T Custom .tsv
Variant data may be input via at least one variant file.
This means that if variants are spread across several files, then you can input one of each type.
For the .maf and .tsv, use the custom columns to determine which columns to use.
Note that a standard .maf does not include protein annotations.
Use the custom column for the peptide change column.
If your .vcf has VEP annotations, then CharGer should be able to parse the information.
This information will be added to your variants when available.
### Output
-o output file
-w output as HTML (flag)
-k annotate input (flag)
--run-url-test test url when creating links
Name your output file; otherwise it will be called charger_summary.tsv.
You can opt to make the output into an HTML page, instead of a readable .tsv.
If you need to be assured of properly linked URL's, use the url test flag.
### Access data
-l ClinVar (flag)
-x ExAC (flag)
-E VEP (flag)
-t TCGA cancer types (flag)
Using these flags turns on accession features built in.
For the ClinVar, ExAC, and VEP flags, if no local VEP or databse is provided, then BioMine will be used to access the ReST interface.
The TCGA flag allows disease determination from sample barcodes in a .maf when using a diseases file (see below).
### Suppress data or overrides
-O override with ClinVar description (flag)
-D suppress needing disease specific (flag)
You can have CharGer override its pathogenic characterization with whatever ClinVar has.
Suppressing disease specific variants takes any variants in the diseases file (see below) and treats them as equally pathogenic without disease consideration.
### Cross-reference data
-z pathogenic variants, .vcf
-e expression matrix file, .tsv
-g gene list file, (format: gene\\tdisease\\tmode_of_inheritance) .txt
-d diseases file, (format: gene\\tdisease\\tmode_of_inheritance) .tsv
-n de novo file, standard .maf
-a assumed de novo file, standard .maf
-c co-segregation file, standard .maf
-H HotSpot3D clusters file, .clusters
-r recurrence threshold (default = 2)
Variants or genes from each of these files can be used as additional known information.
An expression matrix file has columns for each sample, and its rows are genes.
The genes should be approved HUGO symbols.
HotSpot3D clusters can be used for versions v1.x.x.
The recurrence threshold will be pulled from the recurrence/weight column of the .clusters file when provided.
### Pathogenicity module scoring
Specify option and positive whole number value to change the default value.
Standard modules:
```
--PVS1 very strong pathogenicity (default = 1)
--PS1, --PS2, --PS3, --PS4 strong pathogenicity (defaults = 1)
--PM1, --PM2, --PM3, --PM4, --PM5, --PM6 moderate pathogenicity (defaults = 1)
--PP1, --PP2, --PP3, --PP4, --PP5 supporting pathogenicity (defaults = 1)
--BA1 stand-alone benignity (default = 1)
--BS1, --BS2, --BS3, --BS4 strong benignity (defaults = 1)
--BP1, --BP2, --BP3, --BP4, --BP5, --BP6, --BP7 supporting benignity (defaults = 1)
```
CharGer-defined modules:
```
--PSC1 strong pathogenicity (default = 1)
--PMC1 moderate pathogenicity (default = 1)
--PPC1, --PPC2 supporting pathogenicity (defaults = 1)
--BSC1 strong benignity (default = 1)
--BMC1 moderate benignity (default = 1)
```
### Pathogenicity category thresholds
Specify option and positive whole number value to change the default value.
```
--min-pathogenic-score threshold for classifying variant as pathogenic (default = 8)
--min-likely-pathogenic-score threshold for classifying variant as likely pathogenic (default = 5)
--min-benign-score threshold for classifying variant as benign (default = 8)
--min-likely-benign-score threshold for classifying variant as likely benign (default = 4)
```
### Local VEP
--vep-script Path to VEP
--vep-dir Path to VEP directory
--vep-cache Path to VEP cache directory
--vep-version VEP version (default = 87)
--vep-output VEP output file (default = charger.vep.vcf)
--grch assembly GRCh verion (default = 37)
--ensembl-release Ensembl release version (default = 75)
--reference-fasta VEP reference fasta
--fork Number of forked processes used in VEP (default = 0)
This currently only works with .vcf input only.
Annotations are run with the VEP everything flag, so any local plugins will be used.
The BioMine accession is also suppressed when using a local VEP installaltion.
The VEP directory is not the same as would be given to VEP's --dir option.
Instead it is the path to the directory with the VEP .pl file.
The VEP script is the .pl file only.
If not given, it will be /vep-dir/variant\_effect\_predictor.pl.
The VEP cache directory is the same as would be given to VEP's --dir-cache option.
If you have multiple VEP versions, then specify the version you want to use.
This can be different from the Ensembl release option.
VEP output is the same os would be given to VEP's -o option and should end with .vcf.
The default output file will be called charger.vep.vcf.
The GRCh reference genome can be set to either 37 or 38.
The reference Fasta file will be deteremined automatically if not specified.
If the reference Fasta file is constructed automatically, then if, for example, the VEP chache is ~/.vep/, the Ensembl release is 74, and the reference assembly is 37, then the reference Fasta file will be ~/.vep/homo\_sapiens/74\_GRCH37/Homo\_sapiens.GRCh37.74.dna.primary\_assembly.fa.gz.
### Local databases
--exac-vcf ExAC vcf.gz
--mac-clinvar-tsv ClinVar from MacArthur lab (clinvar_alleles.tsv.gz)
Using local databases suppresses the BioMine accession too.
These files can be downloaded from their respective sites.
### Filters
--rare Allele frequency threshold for rare/common (default = 1, process variant with any frequency):
--vcf-any-filter Allow variants that do not pass all filters in .vcf input (flag)
--mutation-types Comma delimited list of types to allow
Using filters will limit the variants processed.
The rare option takes variants with allele frequency less than the given value.
The .vcf any filter accepts only variants that have passed all filters.
If no .vcf pass filter status given, the .vcf null value will be taken as having passed.
Mutation types filtering requires a comma delimitted list (no spaces) using terms from Ensembl's consequence terms.
### ReST batch sizes
-v VEP (#variants, default/max allowed = 150)
-b ClinVar summary (#variants, default/max allowed = 500)
-B ClinVar searchsize (#variants, default/max allowed = 50)
ReST API's usually have limits on the amount of data sent or received.
Exceeding these batch sizes would normally lead to warnings and/or IP blockage, but CharGer and BioMine try to keep batches at safe sizes. Last updated limits February 2017.
### Custom columns (0-based)
-G HUGO gene symbol
-X chromosome
-S start position
-P stop position
-R reference allele
-A alternate allele
-s strand
-M sample name
-C codon
-p peptide change
-L variant classification
-F allele frequency
Use these for .tsv and/or .maf input variant files to specify columns of relevant data.
CharGer makes use of genomic and protein variant annotations, so the more data made available the better your results.
| PypiClean |
/MAVR-0.93.tar.gz/MAVR-0.93/scripts/sequence_clusters/treefam/extract_families_of_species.py | __author__ = 'Sergei F. Kliver'
"""
Tested on treefam database 9.
Species with problems during extracting:
UNSOLVED: uncommon format in treefam database(even ids were not extracted), solution - write custom scripts for extraction
bursaphelenchus_xylophilus
capitella_teleta
helobdella_robusta
heterorhabditis_bacteriophora
lottia_gigantea
meloidogyne_hapla
monosiga_brevicollis
proterospongia_sp
strongyloides_ratti
SOLVED: different ids in sequence and families (presence ":" in ids in files with families - in sequence files is replaced by "_"), solved by replacement in files with extracted ids
tribolium_castaneum
amphimedon_queenslandica
schizosaccharomyces_pombe
"""
import os
import sys
import argparse
import multiprocessing as mp
import subprocess as sb
def extract_species_genes(family_file_name, queue):
family_name = family_file_name[:-8]
gene_lines = sb.Popen("grep '%s' %s/%s | tee %s/%s"
% (args.species, args.input, family_file_name, args.output, family_file_name),
shell=True, stdout=sb.PIPE)
protein_list = []
gene_list = []
for line in gene_lines.stdout:
line_list = line.strip().split()
protein_list.append(line_list[2])
gene_list.append(line_list[7])
if gene_list:
queue.put((family_name, gene_list, protein_list))
return family_name, gene_list, protein_list
def listener(queue):
"""listens for messages on the queue, writes to file."""
protein_fd = open(args.prefix + "_proteins.fam", "w")
genes_fd = open(args.prefix + "_genes.fam", "w")
protein_ids_fd = open(args.prefix + "_in_treefam_families_protein.ids", "w")
gene_ids_fd = open(args.prefix + "_in_treefam_families_genes.ids", "w")
while 1:
m = queue.get()
if m == 'kill':
break
protein_fd.write("%s\t%s\n" % (m[0], ",".join(m[2])))
genes_fd.write("%s\t%s\n" % (m[0], ",".join(m[1])))
for protein_id in m[2]:
protein_ids_fd.write(protein_id + "\n")
for gene_id in m[1]:
gene_ids_fd.write(gene_id + "\n")
protein_fd.flush()
genes_fd.flush()
protein_ids_fd.flush()
gene_ids_fd.flush()
protein_fd.close()
genes_fd.close()
protein_ids_fd.close()
gene_ids_fd.close()
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_directory", action="store", dest="input",
help="Directory with TreeFam database.")
parser.add_argument("-s", "--species", action="store", dest="species",
help="Latin name of species to extract. Example: mus_musculus, homo_sapiens etc")
parser.add_argument("-o", "--output_directory", action="store", dest="output",
help="Output directory. Default: species")
parser.add_argument("-f", "--prefix", action="store", dest="prefix",
help="Prefix of files with families.")
parser.add_argument("-p", "--threads", action="store", dest="threads", type=int,
help="Number of threads. Default: 1")
args = parser.parse_args()
if args.output is None:
args.output = args.species
if args.prefix is None:
args.prefix = args.species
try:
os.mkdir(args.output)
except OSError:
pass
list_of_files = os.listdir(args.input)
nhx_files = []
for entry in list_of_files:
#print entry
if entry[-8:] == '.nhx.emf':
nhx_files.append(entry)
print("Found %i families" % len(nhx_files))
print("Starting extraction...")
manager = mp.Manager()
queue = manager.Queue()
process_pool = mp.Pool(args.threads)
watcher = process_pool.apply_async(listener, (queue, ))
jobs = []
for entry in nhx_files:
#print entry
job = process_pool.apply_async(extract_species_genes, (entry, queue))
jobs.append(job)
for job in jobs:
#print job
job.get()
#now we are done, kill the listener
queue.put('kill')
process_pool.close() | PypiClean |
/Appium-Python-Client-2.11.1.tar.gz/Appium-Python-Client-2.11.1/appium/webdriver/extensions/android/gsm.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, cast
from selenium.common.exceptions import UnknownMethodException
from appium.common.helper import extract_const_attributes
from appium.common.logger import logger
from appium.protocols.webdriver.can_execute_commands import CanExecuteCommands
from appium.protocols.webdriver.can_execute_scripts import CanExecuteScripts
from appium.protocols.webdriver.can_remember_extension_presence import CanRememberExtensionPresence
from appium.webdriver.mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
from appium.webdriver.webdriver import WebDriver
class GsmCallActions:
CALL = 'call'
ACCEPT = 'accept'
CANCEL = 'cancel'
HOLD = 'hold'
class GsmSignalStrength:
NONE_OR_UNKNOWN = 0
POOR = 1
MODERATE = 2
GOOD = 3
GREAT = 4
class GsmVoiceState:
UNREGISTERED = 'unregistered'
HOME = 'home'
ROAMING = 'roaming'
SEARCHING = 'searching'
DENIED = 'denied'
OFF = 'off'
ON = 'on'
class Gsm(CanExecuteCommands, CanExecuteScripts, CanRememberExtensionPresence):
def make_gsm_call(self, phone_number: str, action: str) -> 'WebDriver':
"""Make GSM call (Emulator only)
Android only.
Args:
phone_number: The phone number to call to.
action: The call action.
A member of the const `appium.webdriver.extensions.android.gsm.GsmCallActions`
Usage:
self.driver.make_gsm_call('5551234567', GsmCallActions.CALL)
Returns:
Union['WebDriver', 'Gsm']: Self instance
"""
ext_name = 'mobile: gsmCall'
constants = extract_const_attributes(GsmCallActions)
if action not in constants.values():
logger.warning(
f'{action} is unknown. Consider using one of {list(constants.keys())} constants. '
f'(e.g. {GsmCallActions.__name__}.CALL)'
)
args = {'phoneNumber': phone_number, 'action': action}
try:
self.assert_extension_exists(ext_name).execute_script(ext_name, args)
except UnknownMethodException:
# TODO: Remove the fallback
self.mark_extension_absence(ext_name).execute(Command.MAKE_GSM_CALL, args)
return cast('WebDriver', self)
def set_gsm_signal(self, strength: int) -> 'WebDriver':
"""Set GSM signal strength (Emulator only)
Android only.
Args:
strength: Signal strength.
A member of the enum :obj:`appium.webdriver.extensions.android.gsm.GsmSignalStrength`
Usage:
self.driver.set_gsm_signal(GsmSignalStrength.GOOD)
Returns:
Union['WebDriver', 'Gsm']: Self instance
"""
ext_name = 'mobile: gsmSignal'
constants = extract_const_attributes(GsmSignalStrength)
if strength not in constants.values():
logger.warning(
f'{strength} is out of range. Consider using one of {list(constants.keys())} constants. '
f'(e.g. {GsmSignalStrength.__name__}.GOOD)'
)
try:
self.assert_extension_exists(ext_name).execute_script(ext_name, {'strength': strength})
except UnknownMethodException:
# TODO: Remove the fallback
self.mark_extension_absence(ext_name).execute(
Command.SET_GSM_SIGNAL, {'signalStrength': strength, 'signalStrengh': strength}
)
return cast('WebDriver', self)
def set_gsm_voice(self, state: str) -> 'WebDriver':
"""Set GSM voice state (Emulator only)
Android only.
Args:
state: State of GSM voice.
A member of the const `appium.webdriver.extensions.android.gsm.GsmVoiceState`
Usage:
self.driver.set_gsm_voice(GsmVoiceState.HOME)
Returns:
Union['WebDriver', 'Gsm']: Self instance
"""
ext_name = 'mobile: gmsVoice'
constants = extract_const_attributes(GsmVoiceState)
if state not in constants.values():
logger.warning(
f'{state} is unknown. Consider using one of {list(constants.keys())} constants. '
f'(e.g. {GsmVoiceState.__name__}.HOME)'
)
args = {'state': state}
try:
self.assert_extension_exists(ext_name).execute_script(ext_name, args)
except UnknownMethodException:
# TODO: Remove the fallback
self.mark_extension_absence(ext_name).execute(Command.SET_GSM_VOICE, args)
return cast('WebDriver', self)
def _add_commands(self) -> None:
# noinspection PyProtectedMember,PyUnresolvedReferences
commands = self.command_executor._commands
commands[Command.MAKE_GSM_CALL] = ('POST', '/session/$sessionId/appium/device/gsm_call')
commands[Command.SET_GSM_SIGNAL] = (
'POST',
'/session/$sessionId/appium/device/gsm_signal',
)
commands[Command.SET_GSM_VOICE] = ('POST', '/session/$sessionId/appium/device/gsm_voice') | PypiClean |
/Flask-ULID-0.1.1.tar.gz/Flask-ULID-0.1.1/README.md | # What is this?
This is a Flask extension to deal URL with [ULID](https://github.com/ulid/spec).
# How to install
$ pip install flask-ulid
# How to use
## Wrap Flask app
```python
from flask import Flask
from flask_ulid import FlaskULID
app = Flask(__name__)
FlaskULID(app)
@app.route('/article/<ulid:article_id>')
def article(article_id):
return '{} is valid ulid'.format(article_id)
```
## Add converter for Flask app
```python
from flask import Flask
from flask_ulid import ULIDConverter
app = Flask(__name__)
app.url_map.converters['ulid'] = ULIDConverter
@app.route('/article/<ulid:article_id>')
def article(article_id):
return '{} is valid ulid'.format(article_id)
```
| PypiClean |
/FactoringRuc-1.0.1.tar.gz/FactoringRuc-1.0.1/README.md | # Factoring Total
[![PyPI version](https://badge.fury.io/py/FactoringRuc.svg)](https://pypi.org/project/FactoringRuc/1.0.0/)
## Descripción
Libreria para obtener datos basicos desde la SUNAT
## Instalación
```
pip install FactoringRuc
```
## Uso básico
```python
from factoring_ruc import ConsultaRuc
cr = ConsultaRuc()
usuario_enc = ""
password_enc = ""
key = ""
tipo = ""
ruc = ""
bloque = ""
respuesta = cr.search_by_document(usuario_enc,password_enc,key,tipo,ruc,bloque)
print(respuesta)
```
Obtendremos el siguiente resultado:
```python
{'informacion_basica': {'TDoc': 'R', 'NDoc': '20377892918', 'RelTDoc': '', 'RelNDoc': '', 'RazSoc': 'LEASING TOTAL S.A', 'NomCom': '-', 'TipCon': 'SOCIEDAD ANONIMA', 'IniAct': '1997-10-14', 'ActEco': '65912 - ARRENDAMIENTO CON OPCION DE COMPRA', 'FchInsRRPP': '1998-01-15', 'NumParReg': '- ', 'Fol': '- ', 'Asi': '- ', 'AgeRet': 'S', 'ApeMat': '', 'ApePat': '', 'Nom': '', 'DigVer': '', 'Sex': '', 'FecNac': '', 'EstCon': 'ACTIVO', 'EstDom': 'HABIDO', 'EstDomic': '00', 'CondDomic': '00', 'Direcc': [{'Direccion': 'AVENIDA CIRCUNVALACION CLUB GOLF NRO 134 SANTIAGO DE SURCO - LIMA - LIMA (TORRE 2 PISO 16)', 'Fuente': 'SUNAT - DOMICILIO FISCAL'}, {'Direccion': 'AVENIDA J PARDO NRO. 231 MIRAFLORES - LIMA - LIMA (PISO 11 OFICINA 1101)', 'Fuente': 'SUNAT - OF.ADMINIST.'}], 'RepLeg': [{'TDOC': 'D', 'NDOC': '09377548', 'Nombre': 'RUIZ SANDOVAL SILVIA LILIANA', 'FecIniCar': '2013-01-22', 'Cargo': 'APODERADO'}, {'TDOC': 'D', 'NDOC': '30862793 ', 'Nombre': 'NUÑEZ MOLLEAPASA DAVID ANIBAL', 'FecIniCar': '1998-08-16', 'Cargo': 'GERENTE GRAL'}, {'TDOC': 'D', 'NDOC': '42848529', 'Nombre': 'ROSAS AVALOS AARON DIEGO', 'FecIniCar': '2013-07-04', 'Cargo': 'APODERADO'}]}}
```
## Configuración
Revisar PDF enviado
## Métodos
#### search_by_document(`usuario_enc, password_enc, key, tipo, ruc)
Con esto, obtendremos informacion basica del ruc o tipo de documento a consultar. Devolverá por defecto un diccionario ([https://docs.python.org/3.6/tutorial/datastructures.html#dictionaries](https://docs.python.org/3.6/tutorial/datastructures.html#dictionaries)).
## Consideraciones
* Debe utilizar python >=3.6
| PypiClean |
/Discord%20Anti-Spam-1.8.1.tar.gz/Discord Anti-Spam-1.8.1/antispam/caches/memory/memory.py | import logging
from typing import AsyncIterable
from antispam import dataclasses, exceptions
from antispam.abc import Cache
from antispam.enums import ResetType
log = logging.getLogger(__name__)
class MemoryCache(Cache):
def __init__(self, handler):
self.handler = handler
self.cache = {}
log.info("Cache instance ready to roll.")
async def initialize(self, *args, **kwargs) -> None:
return await super().initialize(*args, **kwargs)
async def get_guild(self, guild_id: int) -> dataclasses.Guild:
log.debug("Attempting to return cached Guild(id=%s)", guild_id)
try:
return self.cache[guild_id]
except KeyError:
raise exceptions.GuildNotFound from None
async def set_guild(self, guild: dataclasses.Guild) -> None:
log.debug("Attempting to set Guild(id=%s)", guild.id)
self.cache[guild.id] = guild
async def delete_guild(self, guild_id: int) -> None:
log.debug("Attempting to delete Guild(id=%s)", guild_id)
self.cache.pop(guild_id, None)
async def get_member(self, member_id: int, guild_id: int) -> dataclasses.Member:
log.debug(
"Attempting to return a cached Member(id=%s) for Guild(id=%s)",
member_id,
guild_id,
)
guild = await self.get_guild(guild_id)
try:
return guild.members[member_id]
except KeyError:
raise exceptions.MemberNotFound from None
async def set_member(self, member: dataclasses.Member) -> None:
log.debug(
"Attempting to cache Member(id=%s) for Guild(id=%s)",
member.id,
member.guild_id,
)
try:
guild = await self.get_guild(member.guild_id)
except exceptions.GuildNotFound:
guild = dataclasses.Guild(id=member.guild_id, options=self.handler.options)
guild.members[member.id] = member
await self.set_guild(guild)
async def delete_member(self, member_id: int, guild_id: int) -> None:
log.debug(
"Attempting to delete Member(id=%s) in Guild(id=%s)", member_id, guild_id
)
try:
guild = await self.get_guild(guild_id)
except exceptions.GuildNotFound:
return
try:
guild.members.pop(member_id)
except KeyError:
return
await self.set_guild(guild)
async def add_message(self, message: dataclasses.Message) -> None:
log.debug(
"Attempting to add a Message(id=%s) to Member(id=%s) in Guild(id=%s)",
message.id,
message.author_id,
message.guild_id,
)
try:
member = await self.get_member(
member_id=message.author_id, guild_id=message.guild_id
)
except exceptions.GuildNotFound:
member = dataclasses.Member(id=message.author_id, guild_id=message.guild_id)
guild = dataclasses.Guild(id=message.guild_id, options=self.handler.options)
guild.members[member.id] = member
await self.set_guild(guild)
except exceptions.MemberNotFound:
guild = await self.get_guild(message.guild_id)
member = dataclasses.Member(id=message.author_id, guild_id=message.guild_id)
guild.members[member.id] = member
await self.set_guild(guild)
member.messages.append(message)
await self.set_member(member)
async def reset_member_count(
self, member_id: int, guild_id: int, reset_type: ResetType
) -> None:
log.debug(
"Attempting to reset counts on Member(id=%s) in Guild(id=%s) with type %s",
member_id,
guild_id,
reset_type.name,
)
try:
member = await self.get_member(member_id, guild_id)
if reset_type == ResetType.KICK_COUNTER:
member.kick_count = 0
# elif reset_type == ResetType.WARN_COUNTER:
else:
member.warn_count = 0
await self.set_member(member)
except (exceptions.MemberNotFound, exceptions.GuildNotFound):
# This is fine
return
async def get_all_members(
self, guild_id: int
) -> AsyncIterable[dataclasses.Member]: # noqa
log.debug("Yielding all cached members for Guild(id=%s)", guild_id)
guilds = await self.get_guild(guild_id=guild_id)
for member in guilds.members.values():
yield member
async def get_all_guilds(self) -> AsyncIterable[dataclasses.Guild]: # noqa
log.debug("Yielding all cached guilds")
for guild in self.cache.values():
yield guild
async def drop(self) -> None:
log.warning("Cache was just dropped")
self.cache = {} | PypiClean |
/Maxar_Portal_SDK-1.1.2-py3-none-any.whl/Maxar_Portal_SDK/account_service/usage.py | import requests
import warnings
import Maxar_Portal_SDK.process as process
warnings.filterwarnings('ignore')
class Usage:
def __init__(self, auth):
self.base_url = auth.api_base_url
self.auth = auth
def get_usage(self, activation_id=None):
"""
Function returns the usage for an activation.
Args:
activation_id: string of the activation you want to check. if not provided attempts to get it from your token.
Needs the account admin permissions to get Activation Id from token. If using elevated permission from account admin. Activation ID is required.
"""
authorization = process.authorization(self.auth)
if activation_id:
url = "{}/usageservice/api/v1/usage/activation/{}".format(self.base_url, activation_id)
else:
url = "{}/usageservice/api/v1/usage".format(self.base_url)
response = requests.request("GET", url, headers=authorization, verify=self.auth.SSL)
process._response_handler(response)
return response.json()
def get_usage_overview(self): # Account admin needed
"""
this function gets aggregated usage information for the usage overview
"""
authorization = process.authorization(self.auth)
url = "{}/usageservice/api/v1/usage/activation/overview".format(self.base_url)
response = requests.request("GET", url, headers=authorization, verify=self.auth.SSL)
process._response_handler(response)
return response.json()
def get_usage_allowed(self, activation_id=None):
"""
This function checks if there is usage left on the activation tied to your token.
Returns 200 server status code if you have credit remaining.
Args:
activation_id: string of the activation you want to check. if not provided attempts to get it from your token.
Needs the account admin permissions to get Activation Id from token. If using elevated permission from account admin. Activation ID is required.
"""
authorization = process.authorization(self.auth)
if activation_id:
url = '{}/usageservice/api/v1/usage/activation/{}/allowed'.format(self.base_url, activation_id)
else:
url = "{}/usageservice/api/v1/usage/allowed".format(self.base_url)
response = requests.request("GET", url, headers=authorization, verify=self.auth.SSL)
process._response_handler(response)
try:
return response.json()
except:
return response.status_code
def get_usage_allowed_download(self,bbox,productId,activation_id=None):
"""
Function checks if there is enough usage left on your activation to download data.
Args:
bbox = Str of the area that you want to download. Is coverted to an area
productId = String of the Id of product that you are trying to download
activation_id: string of the activation you want to check. if not provided attempts to get it from your token.
Needs the account admin permissions to get Activation Id from token. If using elevated permission from account admin. Activation ID is required.
"""
authorization = process.authorization(self.auth)
sqkm = process.area_sqkm(bbox)
if activation_id:
url = '{}/usageservice/api/v1/usage/activation/{}/allowed/download?sqKm={}&productId={}'.format(self.base_url,activation_id,sqkm,productId)
else:
url = "{}/usageservice/api/v1/usage/allowed/download?sqKm={}&productId={}".format(self.base_url,sqkm,productId)
response = requests.request("GET", url, headers=authorization, verify=self.auth.SSL)
process._response_handler(response)
try:
return response.json()
except:
return response.status_code | PypiClean |
/OPi.GPIO-0.5.2.tar.gz/OPi.GPIO-0.5.2/OPi/sysfs.py |
from contextlib import contextmanager
from OPi.constants import HIGH, LOW, IN, OUT, \
NONE, RISING, FALLING, BOTH
import os
import time
# Allow to wait up to 1 second for the file have the correct permissions
WAIT_PERMISSION_TIMEOUT = 1.
def await_permissions(path):
start_time = time.time()
def timed_out():
return time.time() - start_time >= WAIT_PERMISSION_TIMEOUT
while (not os.access(path, os.W_OK) and not timed_out()):
time.sleep(0.1)
@contextmanager
def value_descriptor(pin, mode="r"):
path = "/sys/class/gpio/gpio{0}/value".format(pin)
await_permissions(path)
with open(path, mode) as fp:
yield fp
def export(pin):
path = "/sys/class/gpio/export"
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(pin))
def unexport(pin):
path = "/sys/class/gpio/unexport"
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(pin))
def direction(pin, dir):
assert dir in [IN, OUT]
path = "/sys/class/gpio/gpio{0}/direction".format(pin)
await_permissions(path)
with open(path, "w") as fp:
if dir == IN:
fp.write("in")
else:
fp.write("out")
def input(pin):
with value_descriptor(pin) as fp:
value = fp.read()
if value.strip() == str(LOW):
return LOW
else:
return HIGH
def output(pin, value):
str_value = "1" if value else "0"
with value_descriptor(pin, "w") as fp:
fp.write(str_value)
def edge(pin, trigger):
assert trigger in [NONE, RISING, FALLING, BOTH]
path = "/sys/class/gpio/gpio{0}/edge".format(pin)
await_permissions(path)
opts = {
NONE: "none",
RISING: "rising",
FALLING: "falling",
BOTH: "both"
}
with open(path, "w") as fp:
fp.write(opts[trigger])
# Hardware PWM functionality:
# resources: https://developer.toradex.com/knowledge-base/pwm-linux & https://www.faschingbauer.me/trainings/material/soup/hardware/pwm/topic.html
def PWM_Export(chip, pin): # some chips will have more than 1 pwm chip. the OPi PC+ only has 1 called pwmchip0. To list what chips are available use 'ls -l /sys/class/pwm'
path = "/sys/class/pwm/pwmchip{0}/export".format(chip)
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(pin))
def PWM_Unexport(chip, pin):
path = "/sys/class/pwm/pwmchip{0}/unexport".format(chip)
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(pin))
def PWM_Enable(chip, pin): # enables PWM so that it can be controlled
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/enable".format(chip, pin)
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(1))
def PWM_Disable(chip, pin): # disables PWM
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/enable".format(chip, pin)
await_permissions(path)
with open(path, "w") as fp:
fp.write(str(0))
def PWM_Polarity(chip, pin, invert=False): # inverts the pwm signal. i.e rather than a higher duty cycle being brighter it becomes dimmer. Import to add as sometimes inverted is the defualt
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/polarity".format(chip, pin) # important! pwm must be disables before inverting or it wont work
await_permissions(path)
if invert is True:
with open(path, "w") as fp:
fp.write(str("inversed"))
else:
with open(path, "w") as fp:
fp.write(str("normal"))
def PWM_Period(chip, pin, pwm_period): # in nanoseconds
duty_cycle_path = "/sys/class/pwm/pwmchip{0}/pwm{1}/duty_cycle".format(chip, pin)
with open(duty_cycle_path, "r") as fp: # read the current period to compare. this is necessary as the duty cycle has to be less than the period.
current_duty_cycle_period = int(fp.read())
fp.close()
if (current_duty_cycle_period > pwm_period):
print("Error the new duty cycle period must be less than or equal to the PWM Period: ", pwm_period)
print("New Duty Cyce = ", current_duty_cycle_period, " Current PWM Period = ", pwm_period)
os.error
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/period".format(chip, pin)
await_permissions(path)
with open(path, "w") as fp: # pretty sure this
fp.write(str(pwm_period))
def PWM_Frequency(chip, pin, pwm_frequency): # in Hz
pwm_period = (1 / pwm_frequency) * 1e9 # convert freq to time in nanoseconds
pwm_period = int(round(pwm_period, 0))
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/period".format(chip, pin)
await_permissions(path)
with open(path, "w") as fp: # pretty sure this
fp.write(str(pwm_period))
def PWM_Duty_Cycle_Percent(chip, pin, Duty_cycle): # in percentage
PWM_period_path = "/sys/class/pwm/pwmchip{0}/pwm{1}/period".format(chip, pin)
with open(PWM_period_path, "r") as fp: # read the current period to compare. this is necessary as the duty cycle has to be less than the period.
current_period = int(fp.read())
fp.close()
new_duty_cycle = int(round(Duty_cycle / 100 * current_period, 0))
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/duty_cycle".format(chip, pin)
with open(path, "w") as fp: # pretty sure this
fp.write(str(new_duty_cycle))
def PWM_Duty_Cycle(chip, pin, Duty_cycle): # in nanoseconds
PWM_period_path = "/sys/class/pwm/pwmchip{0}/pwm{1}/period".format(chip, pin)
with open(PWM_period_path, "r") as fp: # read the current period to compare. this is necessary as the duty cycle has to be less than the period.
current_period = int(fp.read())
fp.close()
if (Duty_cycle > current_period):
print("Error the new duty cycle period must be less than or equal to the PWM Period: ", current_period)
print("New Duty Cyce = ", Duty_cycle, " Current PWM Period = ", current_period)
os.error
path = "/sys/class/pwm/pwmchip{0}/pwm{1}/duty_cycle".format(chip, pin)
with open(path, "w") as fp: # pretty sure this
fp.write(str(Duty_cycle)) | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/plugins/popup/plugin.js | CKEDITOR.plugins.add( 'popup' );
CKEDITOR.tools.extend( CKEDITOR.editor.prototype, {
/**
* Opens Browser in a popup. The `width` and `height` parameters accept
* numbers (pixels) or percent (of screen size) values.
*
* @member CKEDITOR.editor
* @param {String} url The url of the external file browser.
* @param {Number/String} [width='80%'] Popup window width.
* @param {Number/String} [height='70%'] Popup window height.
* @param {String} [options='location=no,menubar=no,toolbar=no,dependent=yes,minimizable=no,modal=yes,alwaysRaised=yes,resizable=yes,scrollbars=yes']
* Popup window features.
*/
popup: function( url, width, height, options ) {
width = width || '80%';
height = height || '70%';
if ( typeof width == 'string' && width.length > 1 && width.substr( width.length - 1, 1 ) == '%' )
width = parseInt( window.screen.width * parseInt( width, 10 ) / 100, 10 );
if ( typeof height == 'string' && height.length > 1 && height.substr( height.length - 1, 1 ) == '%' )
height = parseInt( window.screen.height * parseInt( height, 10 ) / 100, 10 );
if ( width < 640 )
width = 640;
if ( height < 420 )
height = 420;
var top = parseInt( ( window.screen.height - height ) / 2, 10 ),
left = parseInt( ( window.screen.width - width ) / 2, 10 );
options = ( options || 'location=no,menubar=no,toolbar=no,dependent=yes,minimizable=no,modal=yes,alwaysRaised=yes,resizable=yes,scrollbars=yes' ) + ',width=' + width +
',height=' + height +
',top=' + top +
',left=' + left;
var popupWindow = window.open( '', null, options, true );
// Blocked by a popup blocker.
if ( !popupWindow )
return false;
try {
// Chrome is problematic with moveTo/resizeTo, but it's not really needed here (https://dev.ckeditor.com/ticket/8855).
var ua = navigator.userAgent.toLowerCase();
if ( ua.indexOf( ' chrome/' ) == -1 ) {
popupWindow.moveTo( left, top );
popupWindow.resizeTo( width, height );
}
popupWindow.focus();
popupWindow.location.href = url;
} catch ( e ) {
popupWindow = window.open( url, null, options, true );
}
return true;
}
} ); | PypiClean |
/IOT3ApiClient-1.0.0.tar.gz/IOT3ApiClient-1.0.0/requests/api.py | from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs) | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/doctrees/nbsphinx/cond_tutorial_new.ipynb | # Tutorial for Conductivity module
```
# import the necessary packages along with HavNegpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
# extract the data
filename = 'cond_example_data.txt'
col_names = ['Freq','Sig1','Sig2']
df = pd.read_csv(filename, sep='\t',index_col=False,usecols = [0,1,2],names=col_names,header=None,skiprows=10,encoding='unicode_escape',engine='python')
```
## Fitting the real part of Conductivity
```
# plot the data
x = np.log10(df['Freq'])
y = np.log10(df['Sig1'])
plt.scatter(x,y,label='example data')
plt.xlabel('log f [Hz]')
plt.ylabel('log ( $\sigma´$)')
plt.legend()
plt.title('Example for Conductivity fitting')
```
![data_example-2.png](attachment:data_example-2.png)
```
# instantiate the HN module
cond = dd.Conductivity()
```
**Select the region of interest (ROI) to fit the data using the select range method**. <br>
The data in ROI is shown as image in the next cell
```
#select range
x1,y1 = cond.select_range(x,y)
```
**Plot of the ROI for fitting**
```
plt.scatter(x1,y1)
plt.xlabel('log f [Hz]')
plt.ylabel('log ( $\sigma´$)')
```
![ROI.png](attachment:ROI.png)
```
#dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'
#this is required before performing the first fitting as it takes the initial guess from the json file created
cond.dump_parameters()
# perform least squares fitting
# the plot is shown as a image in the next cell
cond.fit(x1,y1)
```
**Plot of the final fit**
![final_fitting.png](attachment:final_fitting.png)
```
# before saving fit results an analysis file has to be created using create_analysis file method
cond.create_analysis_file()
#save the fit results using save_fit method of the corresponding fit function
#takes one argument, read more on the documentation
cond.save_fit(1)
```
| PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mvc/_DataBindingMixin.js | define("dojox/mvc/_DataBindingMixin",["dojo/_base/lang","dojo/_base/array","dojo/_base/declare","dojo/Stateful","dijit/registry"],function(_1,_2,_3,_4,_5){
return _3("dojox.mvc._DataBindingMixin",null,{ref:null,isValid:function(){
return this.get("binding")?this.get("binding").get("valid"):true;
},_dbstartup:function(){
if(this._databound){
return;
}
this._unwatchArray(this._viewWatchHandles);
this._viewWatchHandles=[this.watch("ref",function(_6,_7,_8){
if(this._databound){
this._setupBinding();
}
}),this.watch("value",function(_9,_a,_b){
if(this._databound){
var _c=this.get("binding");
if(_c){
if(!((_b&&_a)&&(_a.valueOf()===_b.valueOf()))){
_c.set("value",_b);
}
}
}
})];
this._beingBound=true;
this._setupBinding();
delete this._beingBound;
this._databound=true;
},_setupBinding:function(_d){
if(!this.ref){
return;
}
var _e=this.ref,pw,pb,_f;
if(_e&&_1.isFunction(_e.toPlainObject)){
_f=_e;
}else{
if(/^\s*expr\s*:\s*/.test(_e)){
_e=_e.replace(/^\s*expr\s*:\s*/,"");
_f=_1.getObject(_e);
}else{
if(/^\s*rel\s*:\s*/.test(_e)){
_e=_e.replace(/^\s*rel\s*:\s*/,"");
_d=_d||this._getParentBindingFromDOM();
if(_d){
_f=_1.getObject(""+_e,false,_d);
}
}else{
if(/^\s*widget\s*:\s*/.test(_e)){
_e=_e.replace(/^\s*widget\s*:\s*/,"");
var _10=_e.split(".");
if(_10.length==1){
_f=_5.byId(_e).get("binding");
}else{
pb=_5.byId(_10.shift()).get("binding");
_f=_1.getObject(_10.join("."),false,pb);
}
}else{
_d=_d||this._getParentBindingFromDOM();
if(_d){
_f=_1.getObject(""+_e,false,_d);
}else{
try{
if(_1.getObject(_e) instanceof _4){
_f=_1.getObject(_e);
}
}
catch(err){
if(_e.indexOf("${")==-1){
throw new Error("dojox.mvc._DataBindingMixin: '"+this.domNode+"' widget with illegal ref expression: '"+_e+"'");
}
}
}
}
}
}
}
if(_f){
if(_1.isFunction(_f.toPlainObject)){
this.binding=_f;
this._updateBinding("binding",null,_f);
}else{
throw new Error("dojox.mvc._DataBindingMixin: '"+this.domNode+"' widget with illegal ref not evaluating to a dojo.Stateful node: '"+_e+"'");
}
}
},_isEqual:function(one,_11){
return one===_11||isNaN(one)&&typeof one==="number"&&isNaN(_11)&&typeof _11==="number";
},_updateBinding:function(_12,old,_13){
this._unwatchArray(this._modelWatchHandles);
var _14=this.get("binding");
if(_14&&_1.isFunction(_14.watch)){
var _15=this;
this._modelWatchHandles=[_14.watch("value",function(_16,old,_17){
if(_15._isEqual(old,_17)){
return;
}
if(_15._isEqual(_15.get("value"),_17)){
return;
}
_15.set("value",_17);
}),_14.watch("valid",function(_18,old,_19){
_15._updateProperty(_18,old,_19,true);
if(_19!==_15.get(_18)){
if(_15.validate&&_1.isFunction(_15.validate)){
_15.validate();
}
}
}),_14.watch("required",function(_1a,old,_1b){
_15._updateProperty(_1a,old,_1b,false,_1a,_1b);
}),_14.watch("readOnly",function(_1c,old,_1d){
_15._updateProperty(_1c,old,_1d,false,_1c,_1d);
}),_14.watch("relevant",function(_1e,old,_1f){
_15._updateProperty(_1e,old,_1f,false,"disabled",!_1f);
})];
var val=_14.get("value");
if(val!=null){
this.set("value",val);
}
}
this._updateChildBindings();
},_updateProperty:function(_20,old,_21,_22,_23,_24){
if(old===_21){
return;
}
if(_21===null&&_22!==undefined){
_21=_22;
}
if(_21!==this.get("binding").get(_20)){
this.get("binding").set(_20,_21);
}
if(_23){
this.set(_23,_24);
}
},_updateChildBindings:function(_25){
var _26=this.get("binding")||_25;
if(_26&&!this._beingBound){
_2.forEach(_5.findWidgets(this.domNode),function(_27){
if(_27.ref&&_27._setupBinding){
_27._setupBinding(_26);
}else{
_27._updateChildBindings(_26);
}
});
}
},_getParentBindingFromDOM:function(){
var pn=this.domNode.parentNode,pw,pb;
while(pn){
pw=_5.getEnclosingWidget(pn);
if(pw){
pb=pw.get("binding");
if(pb&&_1.isFunction(pb.toPlainObject)){
break;
}
}
pn=pw?pw.domNode.parentNode:null;
}
return pb;
},_unwatchArray:function(_28){
_2.forEach(_28,function(h){
h.unwatch();
});
}});
}); | PypiClean |
/FLaP-0.6.0.tar.gz/FLaP-0.6.0/flap/util/oofs.py |
import os
from flap.util.path import Path, ROOT
class File:
def __init__(self, file_system, path, content):
assert path, "Invalid path (found '%s')" % path.full()
self.fileSystem = file_system
self._path = path
self._content = content
@staticmethod
def is_file():
return True
def is_directory(self):
return not self.is_file()
@classmethod
def exists(cls):
return True
def is_missing(self):
return not self.exists()
def contains(self, content):
return self._content == content
def content(self):
if not self._content:
self._content = self.fileSystem.load(self._path)
return self._content
def path(self):
return self._path
def fullname(self):
return self._path.fullname()
def basename(self):
return self._path.basename()
def has_extension(self, extension=None):
return self._path.has_extension(extension)
def has_extension_from(self, candidates_extensions):
for any_extension in candidates_extensions:
if self._path.has_extension(any_extension):
return True
def extension(self):
return self._path.extension()
def container(self):
return self.fileSystem.open(self._path.container())
def sibling(self, name):
return self.fileSystem.open(self._path.container() / name)
@classmethod
def files(cls):
return []
def files_that_matches(self, pattern):
path = Path.fromText(str(self._path) + "/" + str(pattern))
directory = self.fileSystem.open(path.container())
return [any_file for any_file in directory.files()
if str(any_file.path()).startswith(str(path))]
def __repr__(self):
return str(self.path())
class Directory(File):
def __init__(self, file_system, path):
super().__init__(file_system, path, None)
def is_file(self):
return False
def content(self):
return None
def files(self):
return self.fileSystem.filesIn(self.path())
class MissingFile(File):
def __init__(self, path):
super().__init__(None, path, None)
def exists(self):
return False
def contains(self, content):
return False
def content(self):
return None
@staticmethod
def location():
return None
class FileSystem:
def create_file(self, path, content):
pass
def createDirectory(self, path):
pass
def deleteDirectory(self, path):
pass
def open(self, path):
pass
def filesIn(self, path):
pass
def copy(self, file, destination):
pass
def load(self, path):
pass
def move_to_directory(self, path):
pass
class OSFileSystem(FileSystem):
def __init__(self):
super().__init__()
self.current_directory = Path.fromText(os.getcwd())
@staticmethod
def for_OS(path):
return os.path.sep.join([eachPart.fullname()
for eachPart in path.parts()])
def move_to_directory(self, path):
os.chdir(self.for_OS(path))
def create_file(self, path, content):
self._create_path(path)
os_path = self.for_OS(path)
with open(os_path, "w") as f:
f.write(content)
def deleteDirectory(self, path):
import shutil
osPath = self.for_OS(path)
if os.path.exists(osPath):
shutil.rmtree(osPath)
def open(self, path):
osPath = self.for_OS(path)
if os.path.isdir(osPath):
return Directory(self, path)
else:
return File(self, path, None)
def filesIn(self, path):
return [self.open(path / each)
for each in os.listdir(self.for_OS(path))]
def copy(self, file, destination):
import shutil
self._create_path(destination)
source = self.for_OS(file.path())
target = destination if destination.has_extension() \
else destination / file.fullname()
shutil.copyfile(source, self.for_OS(target))
def _create_path(self, path):
targetDir = path
if path.has_extension():
targetDir = path.container()
os_target = self.for_OS(targetDir)
if not os.path.exists(os_target):
os.makedirs(os_target)
def load(self, path):
assert path, "Invalid path (found '%s')" % path
os_path = self.for_OS(path)
with open(os_path) as file:
return file.read()
class InMemoryFileSystem(FileSystem):
def __init__(self, path_separator=os.path.sep):
super().__init__()
self.drive = {}
self._current_directory = ROOT
self.pathSeparator = path_separator
def move_to_directory(self, path):
self._current_directory = path.absolute_from(self._current_directory)
def createDirectory(self, path):
if path in self.drive.keys():
if self.drive[path].is_file():
raise ValueError("There is already a resource at '%s'"
% path.full())
self.drive[path] = Directory(self, path)
if not path.isRoot():
self.createDirectory(path.container())
def create_file(self, path, content):
if not isinstance(content, str):
raise ValueError("File content should be text!")
absolute = path.absolute_from(self._current_directory)
self.drive[absolute] = File(self, absolute, content)
self.createDirectory(absolute.container())
def filesIn(self, path):
return [self.drive[p] for p in self.drive.keys()
if p in path and len(p.parts()) == len(path.parts()) + 1]
def open(self, path):
absolute = path.absolute_from(self._current_directory)
if absolute in self.drive.keys():
return self.drive[absolute]
else:
return MissingFile(absolute)
def copy(self, file, destination):
if destination.has_extension():
path = destination
else:
path = destination / file.path().fullname()
absolute = path.absolute_from(self._current_directory)
self.drive[absolute] = File(self, absolute, file.content()) | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/Struct16.py | pairs="""
130 194
58 74
80 211
151 4
47 173
213 112
9 30
170 86
105 77
115 38
1 115
12 52
137 193
122 134
99 205
66 71
18 145
198 155
135 61
168 3
16 119
78 169
150 160
204 138
178 195
124 192
73 19
167 81
189 7
56 37
63 90
64 91
93 64
123 65
141 17
84 27
205 94
136 182
113 161
133 73
57 145
208 55
26 100
123 204
83 166
219 187
141 55
7 210
99 120
172 157
169 197
40 126
122 27
26 122
199 20
200 22
163 155
132 83
117 102
206 35
100 47
92 21
178 185
202 90
82 163
188 14
28 134
131 50
69 206
218 22
45 167
118 21
162 35
116 44
182 129
63 57
111 54
159 150
176 162
11 174
31 106
132 206
212 120
164 118
37 20
0 72
19 36
144 70
166 25
117 209
84 14
58 102
185 173
107 111
2 119
108 114
141 142
51 52
92 93
0 184
135 152
147 45
135 126
48 7
170 46
117 191
143 3
17 180
60 96
6 109
48 45
208 203
185 54
42 22
186 210
28 68
34 91
10 86
193 53
71 205
147 165
79 221
10 89
164 64
164 155
46 152
24 203
185 44
88 134
140 93
87 211
123 56
57 46
211 53
193 175
169 76
12 200
104 54
188 122
125 104
168 38
85 174
127 109
1 69
16 74
150 158
212 67
208 162
148 94
132 177
189 215
87 59
82 175
144 65
153 181
130 89
139 121
181 103
140 155
164 192
187 49
89 216
4 83
26 215
199 172
24 110
124 175
32 194
125 212
157 114
41 171
109 161
13 111
12 114
81 134
159 31
25 206
214 202
56 147
23 167
96 192
1 182
47 27
166 180
10 145
210 70
18 161
118 39
162 17
178 95
138 128
2 92
76 80
90 152
210 100
80 191
11 132
170 216
15 41
18 61
160 196
189 65
28 149
146 29
54 99
33 133
37 160
204 108
207 110
76 203
176 129
172 3
27 149
102 197
141 50
101 190
16 220
188 158
140 103
202 113
69 156
151 172
178 88
51 106
2 87
195 116
43 174
187 108
101 36
139 66
34 29
188 186
62 156
99 183
11 9
30 35
75 171
123 48
71 112
19 86
146 207
37 187
33 170
49 196
24 97
126 198
129 156
127 198
51 168
125 213
16 79
5 215
221 53
220 53
160 106
32 202
116 120
77 100
165 70
39 184
84 167
105 67
154 73
107 44
39 163
104 95
195 67
117 55
58 97
218 38
79 39
119 118
52 196
213 98
201 92
144 158
143 11
189 14
77 81
73 139
48 128
184 103
119 91
75 209
215 88
109 214
209 76
110 55
125 68
121 179
201 79
168 108
116 149
23 88
4 182
143 115
21 96
151 218
191 207
80 97
5 77
52 85
40 60
33 101
43 136
177 85
13 33
75 62
131 30
23 7
15 142
86 113
56 158
42 115
58 59
159 14
104 179
130 126
153 90
78 110
28 95
31 165
219 199
213 107
136 200
94 217
137 201
198 181
128 70
66 120
1 177
194 60
62 25
36 145
5 195
157 138
32 61
204 106
49 3
173 183
6 40
142 78
127 61
221 64
211 91
34 191
154 190
157 196
31 65
13 205
154 113
179 94
143 4
131 62
150 128
15 197
32 10
112 36
15 176
133 71
186 81
12 219
137 74
85 38
135 216
148 95
176 166
127 72
190 216
20 165
146 2
72 192
29 193
148 67
171 102
159 45
98 19
83 22
43 219
59 207
68 105
34 74
142 156
41 25
23 47
30 129
72 194
9 136
43 151
133 57
8 35
105 173
0 153
5 84
89 63
29 93
68 44
212 217
214 181
214 60
41 8
42 9
13 98
217 139
144 26
146 220
17 69
131 180
147 186
169 59
82 96
121 111
42 180
101 121
174 49
51 199
124 221
140 175
40 163
177 200
21 103
24 171
6 152
154 63
8 203
190 18
208 197
183 149
148 183
6 0
218 114
75 78
20 138
82 201
98 217
112 179
66 107
209 50
8 50
124 184
130 153
46 161
137 87
220 97
"""
waters="""
0.66922 0.70961 0.68577
0.33077 0.29039 0.31424
0.41923 0.20961 0.55512
0.0 0.0 0.23421
0.12372 0.87628 0.29514
0.33589 0.29295 0.0191
0.87628 0.75256 0.70486
0.08077 0.54039 0.07423
0.74744 0.87372 0.40756
0.70961 0.04039 0.31424
0.45961 0.91923 0.77788
0.75256 0.87628 0.29514
0.66667 0.33333 0.23454
0.04295 0.70705 0.86606
0.45961 0.54039 0.07423
0.12628 0.87372 0.40756
0.79039 0.58077 0.55512
0.20961 0.41923 0.37153
0.87628 0.12372 0.77821
0.41667 0.08333 0.83333
0.33333 0.91667 0.16667
0.41923 0.20961 0.62847
0.91922 0.45961 0.29062
0.91411 0.45705 0.0382
0.62372 0.62628 0.46728
0.79294 0.58589 0.37604
0.0 0.0 0.05729
0.70706 0.66411 0.0191
0.66411 0.95705 0.9809
0.20706 0.41411 0.55545
0.62372 0.99744 0.35243
0.33589 0.29294 0.13395
0.54294 0.08589 0.74089
0.91667 0.58334 0.83333
0.99744 0.37372 0.53272
0.58077 0.79039 0.37153
0.66667 0.08333 0.83333
0.58333 0.91667 0.16667
0.20706 0.4141 0.25911
0.79039 0.58077 0.62847
0.04038 0.70961 0.68577
0.87628 0.75256 0.40756
0.95961 0.29039 0.31424
0.45706 0.91411 0.25911
0.33334 0.66667 0.94722
0.66667 0.33334 0.09912
0.87628 0.75256 0.77821
0.95706 0.66411 0.0191
0.9141 0.45705 0.11154
0.75256 0.87628 0.22179
0.87628 0.12372 0.40756
0.33078 0.29039 0.19939
0.54039 0.45961 0.22212
0.58589 0.79295 0.55545
0.91923 0.45961 0.92577
0.20961 0.41923 0.44487
0.70706 0.04295 0.13395
0.66923 0.70962 0.80061
0.0 0.75 0.5
0.25 0.0 0.5
0.29039 0.95961 0.68577
0.79295 0.20705 0.74089
0.66667 0.33334 0.39063
0.45961 0.54039 0.77788
0.87372 0.12628 0.59244
0.12628 0.25256 0.11121
0.45706 0.54295 0.88846
0.25256 0.12628 0.95729
0.45706 0.91411 0.96181
0.37372 0.37628 0.35243
0.12628 0.87372 0.11121
0.66411 0.70705 0.86605
0.66923 0.95962 0.68577
0.41667 0.33334 0.83333
0.99744 0.62372 0.53272
0.66667 0.33334 0.43213
0.62372 0.99744 0.46728
0.33589 0.04295 0.0191
0.41411 0.20706 0.44455
0.87372 0.74744 0.59244
0.75 0.0 0.5
0.54295 0.0859 0.0382
0.20705 0.79295 0.62396
0.95961 0.66923 0.31424
0.54295 0.45705 0.0382
0.45706 0.54295 0.25911
0.29039 0.95961 0.80061
0.37628 0.00256 0.53272
0.95706 0.29295 0.0191
0.33333 0.66667 0.76546
0.54295 0.45706 0.74089
0.79039 0.20961 0.55512
0.25256 0.12628 0.59244
0.12372 0.24744 0.59244
0.0 0.0 0.90121
0.87372 0.12628 0.95729
0.37628 0.00256 0.64757
0.75 0.75 0.5
0.29295 0.95705 0.86606
0.79039 0.58078 0.92578
0.12628 0.87372 0.04271
0.91667 0.33334 0.83333
0.00256 0.62628 0.46728
0.37628 0.37372 0.64757
0.79039 0.20961 0.92578
0.29295 0.95705 0.9809
0.33333 0.41667 0.16667
0.33334 0.66667 0.90088
0.95961 0.29039 0.19939
0.0 0.0 0.71944
0.37372 0.37628 0.46728
0.0859 0.54295 0.88846
0.66411 0.95705 0.86605
0.24744 0.12372 0.77821
0.91922 0.45961 0.22212
0.12372 0.24744 0.29514
0.45706 0.54295 0.96181
0.00256 0.37628 0.46728
0.66667 0.33333 0.61389
0.66667 0.33333 0.56754
0.54039 0.45962 0.92578
0.04295 0.3359 0.86606
0.74744 0.87372 0.04271
0.95705 0.29294 0.13395
0.58589 0.79295 0.62396
0.54039 0.08077 0.92578
0.08077 0.54039 0.70938
0.87628 0.12372 0.70486
0.95705 0.66411 0.13395
0.37372 0.99744 0.35243
0.33333 0.66667 0.72396
0.79294 0.20706 0.37604
0.70961 0.66923 0.31424
0.66667 0.58334 0.83333
0.70705 0.04294 0.0191
0.91411 0.45706 0.74089
0.54039 0.08077 0.29062
0.20706 0.79295 0.55545
0.08333 0.66667 0.16667
0.29295 0.3359 0.86606
0.20705 0.41411 0.62396
0.12628 0.25256 0.40756
0.24744 0.12372 0.40756
0.0 0.0 0.28056
0.0 0.0 0.0988
0.66923 0.95961 0.80061
0.37628 0.37372 0.53272
0.54295 0.0859 0.11154
0.0 0.0 0.94271
0.66411 0.70706 0.9809
0.70706 0.66411 0.13395
0.20706 0.79295 0.25911
0.79294 0.5859 0.74089
0.45961 0.54039 0.70938
0.29039 0.33078 0.80061
0.99744 0.37372 0.64757
0.41411 0.20706 0.37604
0.95961 0.66923 0.19939
0.74744 0.87372 0.11121
0.54295 0.45705 0.11154
0.58333 0.66667 0.16667
0.0 0.0 0.76579
0.33333 0.66667 0.38611
0.99744 0.62372 0.64757
0.79039 0.20962 0.62847
0.33589 0.04295 0.13395
0.00256 0.62628 0.35243
0.66667 0.33334 0.05278
0.12372 0.24744 0.22179
0.37372 0.99744 0.46728
0.04039 0.70961 0.80061
0.79294 0.58589 0.44455
0.12372 0.87628 0.22179
0.04295 0.70706 0.9809
0.5859 0.79295 0.25911
0.33333 0.66667 0.60937
0.20961 0.79039 0.37153
0.54039 0.45961 0.29062
0.04295 0.33589 0.9809
0.87372 0.12628 0.88879
0.00256 0.37628 0.35243
0.29039 0.33077 0.68576
0.33077 0.04039 0.31424
0.87372 0.74744 0.95729
0.62628 0.62372 0.64757
0.0859 0.54295 0.96181
0.45961 0.91923 0.07423
0.70961 0.04039 0.19939
0.58078 0.79039 0.07423
0.20961 0.41922 0.07423
0.04039 0.33078 0.80061
0.0 0.25 0.5
0.62628 0.00256 0.64757
0.33334 0.66667 0.56787
0.45961 0.91923 0.70938
0.29295 0.33589 0.9809
0.70961 0.66923 0.19939
0.20961 0.79039 0.44487
0.04039 0.33078 0.68577
0.33078 0.04039 0.19939
0.66667 0.33333 0.27604
0.12372 0.87628 0.59244
0.4141 0.20705 0.74089
0.58077 0.79039 0.44488
0.08333 0.41667 0.16667
0.87372 0.74744 0.88879
0.62372 0.62628 0.35243
0.25 0.25 0.5
0.33333 0.66667 0.43246
0.79295 0.20706 0.44455
0.20961 0.79039 0.07423
0.62628 0.00256 0.53272
0.41922 0.20961 0.92577
0.45706 0.9141 0.88846
0.24744 0.12372 0.70486
0.12628 0.25256 0.04271
0.08078 0.54039 0.77788
0.25256 0.12628 0.88879
0.08589 0.54295 0.25911
0.54039 0.08077 0.22212
0.62628 0.62372 0.53272
0.75256 0.87628 0.59244
"""
coord= "relative"
cages="""
14 0.33333333333333337 0.6666666666666667 0.12606666666666666
12 -0.34357 -0.17179 -0.07639
12 0.33333 0.66667 -0.33333
16 0.9999966666666668 2.0000033333333334 1.3513933333333334
12 0.34357 0.17179 0.07639
12 0.6769033333333333 0.8384566666666667 0.7430566666666667
12 -0.6769033333333333 -0.8384566666666667 -0.7430566666666667
15 0.33333333333333337 0.6666666666666667 0.8326766666666667
15 0.0 0.0 -0.16601
14 0.0 0.0 0.5406
16 0.66667 0.33333 -0.01806
12 -0.010236666666666666 0.49487666666666663 0.5902766666666667
12 -0.17178 -0.34357 0.07639
12 0.17178 -0.17179 -0.07639
12 0.5051233333333334 1.0102366666666667 0.5902766666666667
14 0.33333333333333337 0.6666666666666667 1.2072666666666667
14 0.6666666666666667 1.3333333333333335 1.8739333333333335
16 0.6666633333333333 1.3333366666666666 0.6847266666666667
12 0.5051133333333333 0.49487666666666663 0.5902766666666667
15 0.6666666666666667 0.33333333333333337 -0.8326766666666667
12 1.0102366666666667 1.5051233333333334 1.4097233333333334
16 1.3333366666666666 0.6666633333333333 -0.6847266666666667
12 -0.17179 0.17178 0.07639
12 0.17179 0.34357 -0.07639
12 1.0000033333333334 0.9999966666666668 0.9999966666666668
12 0.16154333333333334 0.8384466666666667 0.7430566666666667
12 0.8384466666666667 0.16154333333333334 -0.7430566666666667
16 0.33333 0.66667 0.01806
14 0.0 0.0 -0.5406
16 1.0000033333333334 0.9999966666666668 0.6486066666666667
15 0.33333333333333337 0.6666666666666667 0.5006566666666666
12 0.66667 0.33333 0.33333
12 -0.5051233333333334 -1.0102366666666667 -0.5902766666666667
15 0.6666666666666667 1.3333333333333335 1.4993433333333335
12 0.16155333333333333 0.3230966666666667 0.7430566666666667
15 0.0 0.0 0.16601
14 0.6666666666666667 0.33333333333333337 -1.2072666666666667
12 0.8384566666666667 0.6769033333333333 -0.7430566666666667
12 0.49487666666666663 1.5051133333333335 1.4097233333333334
"""
bondlen = 3
cell = """
12.746393818818838 0.0 0.0
-6.373196909409416 11.038700853738058 0.0
4.332977656078051e-15 7.504937448387889e-15 70.76289521345811
"""
density = 0.6664596154282791
from genice.cell import cellvectors
cell = cellvectors(a=12.746393818818838,
b=12.746393818818838,
c=70.76289521345811,
C=119.99999999999999) | PypiClean |
/Amino.ed-2.8.4.13.tar.gz/Amino.ed-2.8.4.13/aminoed/helpers/types.py | GLOBAL_ID = 0
class CallTypes:
NONE: int = 0
VOICE: int = 1
VIDEO: int = 2
AVATAR: int = 3
SCREEN_ROOM: int = 4
class GenderTypes:
MALE: int = 1
FAMALE: int = 2
NONE: int = 255
class CallPermissionTypes:
OPEN_TO_EVERYONE: int = 1
JOIN_REQUEST: int = 2
INVITE_ONLY: int = 3
class FlagTypes:
AGGRESSION: int = 0
SPAM: int = 2
OFFTOPIC: int = 4
VIOLENCE: int = 106
INTOLERANCE: int = 107
SUICIDE: int = 108
TROLLING: int = 109
PORNOGRAPHY: int = 110
class ContentTypes:
AAC: str = "audio/aac"
JPG: str = "image/jpg"
PNG: str = "image/png"
JSON: str = "application/json; charset=utf-8"
URL_ENCODED: str = "application/x-www-form-urlencoded"
OCTET_STREAM: str = "application/octet-stream"
class SortingTypes:
NEWEST: str = "newest"
OLDEST: str = "oldest"
TOP: str = "vote"
class RepairTypes:
COINS: str = "1"
MEMBERSHIP: str = "2"
class ActivityStatusTypes:
ON: int = 1
OFF: int = 2
class ChatPublishTypes:
IS_GLOBAL: int = 2
OFF: int = 0
ON: int = 1
class SourceTypes:
USER_PROFILE: str = "UserProfileView"
DATAIL_POST: str = "PostDetailView"
GLOBAL_COMPOSE: str = "GlobalComposeMenu"
class UserTypes:
RECENT: str = "recent"
BANNED: str = "banned"
FEATURED: str = "featured"
LEADERS: str = "leaders"
CURATORS: str = "curators"
class PostTypes:
BLOG: str = "blog"
WIKI: str = "wiki"
USER: str = "user"
class LeadernoardTypes:
DAY: int = 1
WEEK: int = 2
REPUTATION: int = 3
CHECK_IN: int = 4
QUIZ: int = 5
class FeaturedTypes:
UNFEATURE: int = 0
USER: int = 4
BLOG: int = 1
WIKI: int = 1
CHAT: int = 5
class ObjectTypes:
USER: int = 0
BLOG: int = 1
ITEM: int = 2
COMMENT: int = 3
BLOG_CATEGORY: int = 4
BLOG_CATEGORY_ITEM_TAG: int = 5
FEATURED_ITEM: int = 6
CHAT_MESSAGE: int = 7
REPUTATIONLOG_ITEM: int = 10
POLL_OPTION: int = 11
CHAT_THREAD: int = 12
COMMUNITY: int = 16
IMAGE: int = 100
MUSIC: int = 101
VIDEO: int = 102
YOUTUBE: int = 103
SHARED_FOLDER: int = 106
FOLDER_FILE: int = 109
VOICE: int = 110
MODERATION_TASK: int = 111
SCREENSHOT: int = 112
STICKER: int = 113
STICKER_COLLECTION: int = 114
PROP: int = 115
CHAT_BUBBLE: int = 116
VIDEO_FILTER: int = 117
ORDER: int = 118
SHARE_REQUEST: int = 119
VV_CHAT: int = 120
P2A: int = 121
SUBSCRIPTION: int = 122
AMINO_VIDEO: int = 123
class MessageTypes:
GENERAL: int = 0
STRIKE: int = 1
VOICE: int = 2
STICKER: int = 3
VIDEO: int = 4
SHARE_EXURL: int = 50
SHARE_USER: int = 51
CALL_NO_ANSWERED: int = 52
CALL_CANCELLED: int = 53
CALL_DECLINED: int = 54
VIDEO_CALL_NO_ANSWERED: int = 55
VIDEO_CALL_CANCELLED: int = 56
VIDEO_CALL_DECLINED: int = 57
AVATAR_CALL_NO_ANSWERED: int = 58
AVATAR_CALL_CANCELLED: int = 59
AVATAR_CALL_DECLINED: int = 60
DELETED: int = 100
MEMBER_JOIN: int = 101
MEMBER_QUIT: int = 102
PRIVATE_CHAT_INIT: int = 103
BACKGROUND_CHANGE: int = 104
TITLE_CHANGE: int = 105
ICON_CHANGE: int = 106
START_VOICE_CHAT: int = 107
START_VIDEO_CHAT: int = 108
START_AVATAR_CHAT: int = 109
END_VOICE_CHAT: int = 110
END_VIDEO_CHAT: int = 111
END_AVATAR_CHAT: int = 112
CONTENT_CHANGE: int = 113
START_SCREENING_ROOM: int = 114
END_SCREENING_ROOM: int = 115
ORGANIZER_TRANSFERRED: int = 116
FORCE_REMOVED_FROM_CHAT: int = 117
CHAT_REMOVED: int = 118
DELETED_BY_ADMIN: int = 119
SEND_COINS: int = 120
PIN_ANNOUNCEMENT: int = 121
VV_CHAT_PERMISSION_OPEN_TO_EVERYONE: int = 122
VV_CHAT_PERMISSION_INVITED: int = 123
VV_CHAT_PERMISSION_INVITE_ONLY: int = 124
ENABALE_VIEW_ONLY: int = 125
DISABALE_VIEW_ONLY: int = 126
UNPIN_ANNOUNCEMENT: int = 127
ENABLE_TIP_PERMISSION: int = 128
DISABLE_TIP_PERMISSION: int = 129
TIMESTAMP: int = 65281
WELCOME_MESSAGE: int = 65282
INVITE_MESSAGE: int = 65283
class EventTypes:
ANY: str = "any"
ACTION: str = "action"
NOTIFICATION: str = "notification"
USER_TYPING_START: str = "user_typing_start"
USER_TYPING_END: str = "user_typing_end"
MESSAGE: str = "message"
TEXT_MESSAGE: str = "0:0"
IMAGE_MESSAGE: str = "0:100"
YOUTUBE_MESSAGE: str = "0:103"
STRIKE_MESSAGE: str = "1:0"
VOICE_MESSAGE: str = "2:110"
STICKER_MESSAGE: str = "3:113"
VIDEO_MESSAGE: str = "4:0"
SHARE_EXURL: int = "50:0"
SHARE_USER: int = "51:0"
CALL_NO_ANSWERED: int = "52:0"
CALL_CANCELLED: int = "53:0"
CALL_DECLINED: int = "54:0"
VIDEO_CALL_NO_ANSWERED: int = "55:0"
VIDEO_CALL_CANCELLED: int = "56:0"
VIDEO_CALL_DECLINED: int = "57:0"
AVATAR_CALL_NO_ANSWERED: int = "58:0"
AVATAR_CALL_CANCELLED: int = "59:0"
AVATAR_CALL_DECLINED: int = "60:0"
DELETE_MESSAGE: int = "100:0"
MEMBER_JOIN: int = "101:0"
MEMBER_QUIT: int = "102:0"
PRIVATE_CHAT_INIT: int = "103:0"
BACKGROUND_CHANGE: int = "104:0"
TITLE_CHANGE: int = "105:0"
ICON_CHANGE: int = "106:0"
VOICE_CHAT_START: int = "107:0"
VIDEO_CHAT_START: int = "108:0"
AVATAR_CHAT_START: int = "109:0"
VOICE_CHAT_END: int = "110:0"
VIDEO_CHAT_END: int = "111:0"
AVATAR_CHAT_END: int = "112:0"
CONTENT_CHANGE: int = "113:0"
SCREENING_ROOM_START: int = "114:0"
SCREENING_ROOM_END: int = "115:0"
ORGANIZER_TRANSFERRED: int = "116:0"
FORCE_REMOVED_FROM_CHAT: int = "117:0"
CHAT_REMOVED: int = "118:0"
ADMIN_DELETE_MESSAGE: int = "119:0"
SEND_COINS: int = "120:0"
ANNOUNCEMENT_PIN: int = "121:0"
VV_CHAT_PERMISSION_OPEN_TO_EVERYONE: int = "122:0"
VV_CHAT_PERMISSION_INVITED: int = "123:0"
VV_CHAT_PERMISSION_INVITE_ONLY: int = "124:0"
VIEW_ONLY_ENABLE: int = "125:0"
VIEW_ONLY_DISABLE: int = "126:0"
ANNOUNCEMENT_UNPIN: int = "127:0"
TIP_PERMISSION_ENABLE: int = "128:0"
TIP_PERMISSION_DISABLE: int = "129:0"
TIMESTAMP: int = "65281:0"
WELCOME_MESSAGE: int = "65282:0"
INVITE_MESSAGE: int = "65283:0"
class PathTypes:
WELCOME_MESSAGE = "general.welcomeMessage"
CHAT = "module.chat.enabled"
LIVE_CHAT = "module.chat.avChat.videoEnabled"
SCREENING_ROOM = "module.chat.avChat.screeningRoomEnabled"
PUBLIC_CHAT = "module.chat.publicChat.enabled"
POST = "module.post.enabled"
RANKING = "module.ranking.enabled"
LEADERBOARD = "module.ranking.leaderboardEnabled"
FEATURED = "module.featured.enabled"
FEATURED_POST = "module.featured.postEnabled"
FEATURED_MEMBER = "module.featured.memberEnabled"
FEATURED_CHAT = "module.featured.publicChatRoomEnabled"
SHARED_FOLDER = "module.sharedFolder.enabled"
INFLUENCER = "module.influencer.enabled"
CATALOG = "module.catalog.enabled"
EXTERNAL_CONTENT = "module.externalContent.enabled"
TOPIC_CATEGORIES = "module.topicCategories.enabled"
LEFT_SIDE_PANEL_ICON_COLOR = "appearance.leftSidePanel.style.iconColor"
class Language:
RU: str = "ru-UA"
ENG: str = "en-US"
class PromoteRankTypes:
AGENT = "transfer-agent"
LEADER = "leader"
CURATOR = "curator"
def allTypes(self: classmethod):
normal_values: list = []
values: tuple = self.__dict__.items()
normal_values.extend([value[1] for value in values
if isinstance(value[1], int) or isinstance(value[1], str)])
return normal_values | PypiClean |
/Kivy-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/kivy/lib/ddsfile.py |
from struct import pack, unpack, calcsize
# DDSURFACEDESC2 dwFlags
DDSD_CAPS = 0x00000001
DDSD_HEIGHT = 0x00000002
DDSD_WIDTH = 0x00000004
DDSD_PITCH = 0x00000008
DDSD_PIXELFORMAT = 0x00001000
DDSD_MIPMAPCOUNT = 0x00020000
DDSD_LINEARSIZE = 0x00080000
DDSD_DEPTH = 0x00800000
# DDPIXELFORMAT dwFlags
DDPF_ALPHAPIXELS = 0x00000001
DDPF_FOURCC = 0x00000004
DDPF_RGB = 0x00000040
DDPF_LUMINANCE = 0x00020000
# DDSCAPS2 dwCaps1
DDSCAPS_COMPLEX = 0x00000008
DDSCAPS_TEXTURE = 0x00001000
DDSCAPS_MIPMAP = 0x00400000
# DDSCAPS2 dwCaps2
DDSCAPS2_CUBEMAP = 0x00000200
DDSCAPS2_CUBEMAP_POSITIVEX = 0x00000400
DDSCAPS2_CUBEMAP_NEGATIVEX = 0x00000800
DDSCAPS2_CUBEMAP_POSITIVEY = 0x00001000
DDSCAPS2_CUBEMAP_NEGATIVEY = 0x00002000
DDSCAPS2_CUBEMAP_POSITIVEZ = 0x00004000
DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x00008000
DDSCAPS2_VOLUME = 0x00200000
# Common FOURCC codes
DDS_DXTN = 0x00545844
DDS_DXT1 = 0x31545844
DDS_DXT2 = 0x32545844
DDS_DXT3 = 0x33545844
DDS_DXT4 = 0x34545844
DDS_DXT5 = 0x35545844
def dxt_to_str(dxt):
if dxt == DDS_DXT1:
return 's3tc_dxt1'
elif dxt == DDS_DXT2:
return 's3tc_dxt2'
elif dxt == DDS_DXT3:
return 's3tc_dxt3'
elif dxt == DDS_DXT4:
return 's3tc_dxt4'
elif dxt == DDS_DXT5:
return 's3tc_dxt5'
elif dxt == 0:
return 'rgba'
elif dxt == 1:
return 'alpha'
elif dxt == 2:
return 'luminance'
elif dxt == 3:
return 'luminance_alpha'
def str_to_dxt(dxt):
if dxt == 's3tc_dxt1':
return DDS_DXT1
if dxt == 's3tc_dxt2':
return DDS_DXT2
if dxt == 's3tc_dxt3':
return DDS_DXT3
if dxt == 's3tc_dxt4':
return DDS_DXT4
if dxt == 's3tc_dxt5':
return DDS_DXT5
if dxt == 'rgba':
return 0
if dxt == 'alpha':
return 1
if dxt == 'luminance':
return 2
if dxt == 'luminance_alpha':
return 3
def align_value(val, b):
return val + (-val % b)
def check_flags(val, fl):
return (val & fl) == fl
def dxt_size(w, h, dxt):
w = max(1, w // 4)
h = max(1, h // 4)
if dxt == DDS_DXT1:
return w * h * 8
elif dxt in (DDS_DXT2, DDS_DXT3, DDS_DXT4, DDS_DXT5):
return w * h * 16
return -1
class QueryDict(dict):
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
try:
return super(QueryDict, self).__getattr__(attr)
except AttributeError:
raise KeyError(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
class DDSException(Exception):
pass
class DDSFile(object):
fields = (
('size', 0), ('flags', 1), ('height', 2),
('width', 3), ('pitchOrLinearSize', 4), ('depth', 5),
('mipmapCount', 6), ('pf_size', 18), ('pf_flags', 19),
('pf_fourcc', 20), ('pf_rgbBitCount', 21), ('pf_rBitMask', 22),
('pf_gBitMask', 23), ('pf_bBitMask', 24), ('pf_aBitMask', 25),
('caps1', 26), ('caps2', 27))
def __init__(self, filename=None):
super(DDSFile, self).__init__()
self._dxt = 0
self._fmt = None
self.meta = meta = QueryDict()
self.count = 0
self.images = []
self.images_size = []
for field, index in DDSFile.fields:
meta[field] = 0
if filename:
self.load(filename)
def load(self, filename):
self.filename = filename
with open(filename, 'rb') as fd:
data = fd.read()
if data[:4] != b'DDS ':
raise DDSException('Invalid magic header {}'.format(data[:4]))
# read header
fmt = 'I' * 31
fmt_size = calcsize(fmt)
pf_size = calcsize('I' * 8)
header, data = data[4:4+fmt_size], data[4+fmt_size:]
if len(header) != fmt_size:
raise DDSException('Truncated header in')
# depack
header = unpack(fmt, header)
meta = self.meta
for name, index in DDSFile.fields:
meta[name] = header[index]
# check header validity
if meta.size != fmt_size:
raise DDSException('Invalid header size (%d instead of %d)' %
(meta.size, fmt_size))
if meta.pf_size != pf_size:
raise DDSException('Invalid pixelformat size (%d instead of %d)' %
(meta.pf_size, pf_size))
if not check_flags(meta.flags,
DDSD_CAPS | DDSD_PIXELFORMAT | DDSD_WIDTH | DDSD_HEIGHT):
raise DDSException('Not enough flags')
if not check_flags(meta.caps1, DDSCAPS_TEXTURE):
raise DDSException('Not a DDS texture')
self.count = 1
if check_flags(meta.flags, DDSD_MIPMAPCOUNT):
if not check_flags(meta.caps1, DDSCAPS_COMPLEX | DDSCAPS_MIPMAP):
raise DDSException('Invalid mipmap without flags')
self.count = meta.mipmapCount
hasrgb = check_flags(meta.pf_flags, DDPF_RGB)
hasalpha = check_flags(meta.pf_flags, DDPF_ALPHAPIXELS)
hasluminance = check_flags(meta.pf_flags, DDPF_LUMINANCE)
bpp = None
dxt = block = pitch = 0
if hasrgb or hasalpha or hasluminance:
bpp = meta.pf_rgbBitCount
if hasrgb and hasluminance:
raise DDSException('File have RGB and Luminance')
if hasrgb:
dxt = 0
elif hasalpha and not hasluminance:
dxt = 1
elif hasluminance and not hasalpha:
dxt = 2
elif hasalpha and hasluminance:
dxt = 3
elif check_flags(meta.pf_flags, DDPF_FOURCC):
dxt = meta.pf_fourcc
if dxt not in (DDS_DXT1, DDS_DXT2, DDS_DXT3, DDS_DXT4, DDS_DXT5):
raise DDSException('Unsupported FOURCC')
else:
raise DDSException('Unsupported format specified')
if bpp:
block = align_value(bpp, 8) // 8
pitch = align_value(block * meta.width, 4)
if check_flags(meta.flags, DDSD_LINEARSIZE):
if dxt in (0, 1, 2, 3):
size = pitch * meta.height
else:
size = dxt_size(meta.width, meta.height, dxt)
w = meta.width
h = meta.height
images = self.images
images_size = self.images_size
for i in range(self.count):
if dxt in (0, 1, 2, 3):
size = align_value(block * w, 4) * h
else:
size = dxt_size(w, h, dxt)
image, data = data[:size], data[size:]
if len(image) < size:
raise DDSException('Truncated image for mipmap %d' % i)
images_size.append((w, h))
images.append(image)
if w == 1 and h == 1:
break
w = max(1, w // 2)
h = max(1, h // 2)
if len(images) == 0:
raise DDSException('No images available')
if len(images) < self.count:
raise DDSException('Not enough images')
self._dxt = dxt
def save(self, filename):
if len(self.images) == 0:
raise DDSException('No images to save')
fields = dict(DDSFile.fields)
fields_keys = list(fields.keys())
fields_index = list(fields.values())
mget = self.meta.get
header = []
for idx in range(31):
if idx in fields_index:
value = mget(fields_keys[fields_index.index(idx)], 0)
else:
value = 0
header.append(value)
with open(filename, 'wb') as fd:
fd.write('DDS ')
fd.write(pack('I' * 31, *header))
for image in self.images:
fd.write(image)
def add_image(self, level, bpp, fmt, width, height, data):
assert bpp == 32
assert fmt in ('rgb', 'rgba', 'dxt1', 'dxt2', 'dxt3', 'dxt4', 'dxt5')
assert width > 0
assert height > 0
assert level >= 0
meta = self.meta
images = self.images
if len(images) == 0:
assert level == 0
# first image, set defaults !
for k in meta.keys():
meta[k] = 0
self._fmt = fmt
meta.size = calcsize('I' * 31)
meta.pf_size = calcsize('I' * 8)
meta.pf_flags = 0
meta.flags = DDSD_CAPS | DDSD_PIXELFORMAT | DDSD_WIDTH | DDSD_HEIGHT
meta.width = width
meta.height = height
meta.caps1 = DDSCAPS_TEXTURE
meta.flags |= DDSD_LINEARSIZE
meta.pitchOrLinearSize = len(data)
meta.pf_rgbBitCount = 32
meta.pf_rBitMask = 0x00ff0000
meta.pf_gBitMask = 0x0000ff00
meta.pf_bBitMask = 0x000000ff
meta.pf_aBitMask = 0xff000000
if fmt in ('rgb', 'rgba'):
assert True
assert bpp == 32
meta.pf_flags |= DDPF_RGB
meta.pf_rgbBitCount = 32
meta.pf_rBitMask = 0x00ff0000
meta.pf_gBitMask = 0x0000ff00
meta.pf_bBitMask = 0x000000ff
meta.pf_aBitMask = 0x00000000
if fmt == 'rgba':
meta.pf_flags |= DDPF_ALPHAPIXELS
meta.pf_aBitMask = 0xff000000
else:
meta.pf_flags |= DDPF_FOURCC
if fmt == 'dxt1':
meta.pf_fourcc = DDS_DXT1
elif fmt == 'dxt2':
meta.pf_fourcc = DDS_DXT2
elif fmt == 'dxt3':
meta.pf_fourcc = DDS_DXT3
elif fmt == 'dxt4':
meta.pf_fourcc = DDS_DXT4
elif fmt == 'dxt5':
meta.pf_fourcc = DDS_DXT5
images.append(data)
else:
assert level == len(images)
assert fmt == self._fmt
images.append(data)
meta.flags |= DDSD_MIPMAPCOUNT
meta.caps1 |= DDSCAPS_COMPLEX | DDSCAPS_MIPMAP
meta.mipmapCount = len(images)
def __repr__(self):
return '<DDSFile filename=%r size=%r dxt=%r len(images)=%r>' % (
self.filename, self.size, self.dxt, len(self.images))
def _get_size(self):
meta = self.meta
return meta.width, meta.height
def _set_size(self, size):
self.meta.update({'width': size[0], 'height': size[1]})
size = property(_get_size, _set_size)
def _get_dxt(self):
return dxt_to_str(self._dxt)
def _set_dxt(self, dxt):
self._dxt = str_to_dxt(dxt)
dxt = property(_get_dxt, _set_dxt)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
print('Usage: python ddsfile.py <file1> <file2> ...')
sys.exit(0)
for filename in sys.argv[1:]:
print('=== Loading', filename)
try:
dds = DDSFile(filename=filename)
print(dds)
dds.save('bleh.dds')
except IOError as e:
print('ERR>', e)
except DDSException as e:
print('DDS>', e) | PypiClean |
/LiPD-0.2.8.9.tar.gz/LiPD-0.2.8.9/docs/_build/html/source/readme.html | <!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LiPD — LiPD_Utilities documentation</title>
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<link rel="index" title="Index"
href="../genindex.html"/>
<link rel="search" title="Search" href="../search.html"/>
<link rel="top" title="LiPD_Utilities documentation" href="../index.html"/>
<script src="../_static/js/modernizr.min.js"></script>
</head>
<body class="wy-body-for-nav" role="document">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search">
<a href="../index.html" class="icon icon-home"> LiPD_Utilities
</a>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
<input type="text" name="q" placeholder="Search docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<!-- Local TOC -->
<div class="local-toc"><ul>
<li><a class="reference internal" href="#">LiPD</a><ul>
<li><a class="reference internal" href="#what-is-it">What is it?</a></li>
<li><a class="reference internal" href="#installation">Installation</a></li>
<li><a class="reference internal" href="#usage">Usage</a></li>
<li><a class="reference internal" href="#getting-started">Getting started</a></li>
<li><a class="reference internal" href="#features">Features</a></li>
<li><a class="reference internal" href="#requirements">Requirements</a></li>
<li><a class="reference internal" href="#further-information">Further information</a></li>
<li><a class="reference internal" href="#contact">Contact</a></li>
<li><a class="reference internal" href="#license">License</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" role="navigation" aria-label="top navigation">
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="../index.html">LiPD_Utilities</a>
</nav>
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="../index.html">Docs</a> »</li>
<li>LiPD</li>
<li class="wy-breadcrumbs-aside">
<a href="../_sources/source/readme.rst.txt" rel="nofollow"> View page source</a>
</li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<div class="section" id="lipd">
<h1>LiPD<a class="headerlink" href="#lipd" title="Permalink to this headline">¶</a></h1>
<p>Input/output and manipulation utilities for LiPD files in Matlab, R and
Python.</p>
<div class="section" id="what-is-it">
<h2>What is it?<a class="headerlink" href="#what-is-it" title="Permalink to this headline">¶</a></h2>
<p>LiPD is short for Linked PaleoData. LiPD is the data standard for
paleoclimatology and the exchange of data amongst paleoclimate experts.
This package will help you convert your existing database of
paleoclimate observations into LiPD files. Moreover, it contains tools
to analyze and manipulate LiPD data.</p>
</div>
<div class="section" id="installation">
<h2>Installation<a class="headerlink" href="#installation" title="Permalink to this headline">¶</a></h2>
<p>LiPD is a package containing multiple modules. Install globally on your
system with:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">pip</span> <span class="n">install</span> <span class="n">LiPD</span>
</pre></div>
</div>
<p>Python v3.4+ is required</p>
</div>
<div class="section" id="usage">
<h2>Usage<a class="headerlink" href="#usage" title="Permalink to this headline">¶</a></h2>
<p>Using your preferred Python IDE or a Python console, you can import the
LiPD package using:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">lipd</span>
</pre></div>
</div>
<p>Now you can call any function in the LiPD package.</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">lipd</span><span class="o">.</span><span class="n">readLipds</span><span class="p">()</span>
</pre></div>
</div>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">lipd</span><span class="o">.</span><span class="n">excel</span><span class="p">()</span>
</pre></div>
</div>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">lipd</span><span class="o">.</span><span class="n">doi</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="section" id="getting-started">
<h2>Getting started<a class="headerlink" href="#getting-started" title="Permalink to this headline">¶</a></h2>
<p>Examples and guides are located on the github at:</p>
<p><a class="reference external" href="https://github.com/nickmckay/LiPD-utilities/tree/master/Examples">https://github.com/nickmckay/LiPD-utilities/tree/master/Examples</a></p>
</div>
<div class="section" id="features">
<h2>Features<a class="headerlink" href="#features" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li>Convert Excel –> LiPD</li>
<li>Convert NOAA <–> LiPD</li>
<li>Read LiPD file for data analysis</li>
<li>Write LiPD file</li>
<li>Extract/collapse/filter/query on a time series</li>
</ul>
</div>
<div class="section" id="requirements">
<h2>Requirements<a class="headerlink" href="#requirements" title="Permalink to this headline">¶</a></h2>
<p>For a list of modules that are installed with this package, please refer
to the file called REQUIREMENTS.</p>
</div>
<div class="section" id="further-information">
<h2>Further information<a class="headerlink" href="#further-information" title="Permalink to this headline">¶</a></h2>
<p>Github: <a class="reference external" href="https://github.com/nickmckay/LiPD-utilities">https://github.com/nickmckay/LiPD-utilities</a></p>
<p>Linked Earth Wiki: wiki.linked.earth</p>
</div>
<div class="section" id="contact">
<h2>Contact<a class="headerlink" href="#contact" title="Permalink to this headline">¶</a></h2>
<p>If you are having issues, please let me know. Contact me at
<a class="reference external" href="mailto:heiser%40nau.edu">heiser<span>@</span>nau<span>.</span>edu</a>.</p>
</div>
<div class="section" id="license">
<h2>License<a class="headerlink" href="#license" title="Permalink to this headline">¶</a></h2>
<p>The project is licensed under the GNU Public License. Please refer to
the file called LICENSE.</p>
</div>
</div>
</div>
</div>
<footer>
<hr/>
<div role="contentinfo">
<p>
© Copyright 2017, N. McKay; C. Heiser.
</p>
</div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT:'../',
VERSION:'',
COLLAPSE_INDEX:false,
FILE_SUFFIX:'.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="../_static/jquery.js"></script>
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<script type="text/javascript" src="../_static/js/theme.js"></script>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.StickyNav.enable();
});
</script>
</body>
</html> | PypiClean |
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/lines.py | from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, artist, cbook, colors as mcolors, docstring, rcParams
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
# Imported here for backward compatibility, even though they don't
# really belong.
from . import _path
from .markers import (
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
_log = logging.getLogger(__name__)
def _get_dash_pattern(style):
"""Convert linestyle to dash pattern."""
# go from short hand -> full strings
if isinstance(style, str):
style = ls_mapper.get(style, style)
# un-dashed styles
if style in ['solid', 'None']:
offset = 0
dashes = None
# dashed styles
elif style in ['dashed', 'dashdot', 'dotted']:
offset = 0
dashes = tuple(rcParams['lines.{}_pattern'.format(style)])
#
elif isinstance(style, tuple):
offset, dashes = style
if offset is None:
raise ValueError(f'Unrecognized linestyle: {style!r}')
else:
raise ValueError(f'Unrecognized linestyle: {style!r}')
# normalize offset to be positive and shorter than the dash cycle
if dashes is not None:
dsum = sum(dashes)
if dsum:
offset %= dsum
return offset, dashes
def _scale_dashes(offset, dashes, lw):
if not rcParams['lines.scale_dashes']:
return offset, dashes
scaled_offset = offset * lw
scaled_dashes = ([x * lw if x is not None else None for x in dashes]
if dashes is not None else None)
return scaled_offset, scaled_dashes
def segment_hits(cx, cy, x, y, radius):
"""
Return the indices of the segments in the polyline with coordinates (*cx*,
*cy*) that are within a distance *radius* of the point (*x*, *y*).
"""
# Process single points specially
if len(x) <= 1:
res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
return res
# We need to lop the last element off a lot.
xr, yr = x[:-1], y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx, dy = x[1:] - xr, y[1:] - yr
Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
candidates = (u >= 0) & (u <= 1)
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px, py = xr + u * dx, yr + u * dy
line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
return np.concatenate((points, lines))
def _mark_every_path(markevery, tpath, affine, ax):
"""
Helper function that sorts out how to deal the input
`markevery` and returns the points where markers should be drawn.
Takes in the `markevery` value and the line path and returns the
sub-sampled path.
"""
# pull out the two bits of data we want from the path
codes, verts = tpath.codes, tpath.vertices
def _slice_or_none(in_v, slc):
"""Helper function to cope with `codes` being an ndarray or `None`."""
if in_v is None:
return None
return in_v[slc]
# if just an int, assume starting at 0 and make a tuple
if isinstance(markevery, Integral):
markevery = (0, markevery)
# if just a float, assume starting at 0.0 and make a tuple
elif isinstance(markevery, Real):
markevery = (0.0, markevery)
if isinstance(markevery, tuple):
if len(markevery) != 2:
raise ValueError('`markevery` is a tuple but its len is not 2; '
'markevery={}'.format(markevery))
start, step = markevery
# if step is an int, old behavior
if isinstance(step, Integral):
# tuple of 2 int is for backwards compatibility,
if not isinstance(start, Integral):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'an int, but the first element is not an int; markevery={}'
.format(markevery))
# just return, we are done here
return Path(verts[slice(start, None, step)],
_slice_or_none(codes, slice(start, None, step)))
elif isinstance(step, Real):
if not isinstance(start, Real):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'a float, but the first element is not a float or an int; '
'markevery={}'.format(markevery))
if ax is None:
raise ValueError(
"markevery is specified relative to the axes size, but "
"the line does not have a Axes as parent")
# calc cumulative distance along path (in display coords):
disp_coords = affine.transform(tpath.vertices)
delta = np.empty((len(disp_coords), 2))
delta[0, :] = 0
delta[1:, :] = disp_coords[1:, :] - disp_coords[:-1, :]
delta = np.hypot(*delta.T).cumsum()
# calc distance between markers along path based on the axes
# bounding box diagonal being a distance of unity:
(x0, y0), (x1, y1) = ax.transAxes.transform([[0, 0], [1, 1]])
scale = np.hypot(x1 - x0, y1 - y0)
marker_delta = np.arange(start * scale, delta[-1], step * scale)
# find closest actual data point that is closest to
# the theoretical distance along the path:
inds = np.abs(delta[np.newaxis, :] - marker_delta[:, np.newaxis])
inds = inds.argmin(axis=1)
inds = np.unique(inds)
# return, we are done here
return Path(verts[inds], _slice_or_none(codes, inds))
else:
raise ValueError(
f"markevery={markevery!r} is a tuple with len 2, but its "
f"second element is not an int or a float")
elif isinstance(markevery, slice):
# mazol tov, it's already a slice, just return
return Path(verts[markevery], _slice_or_none(codes, markevery))
elif np.iterable(markevery):
# fancy indexing
try:
return Path(verts[markevery], _slice_or_none(codes, markevery))
except (ValueError, IndexError) as err:
raise ValueError(
f"markevery={markevery!r} is iterable but not a valid numpy "
f"fancy index") from err
else:
raise ValueError(f"markevery={markevery!r} is not a recognized value")
@docstring.interpd
@cbook._define_aliases({
"antialiased": ["aa"],
"color": ["c"],
"drawstyle": ["ds"],
"linestyle": ["ls"],
"linewidth": ["lw"],
"markeredgecolor": ["mec"],
"markeredgewidth": ["mew"],
"markerfacecolor": ["mfc"],
"markerfacecoloralt": ["mfcalt"],
"markersize": ["ms"],
})
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, e.g., one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-': '_draw_solid',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
':': '_draw_dotted',
'None': '_draw_nothing',
' ': '_draw_nothing',
'': '_draw_nothing',
}
_drawStyles_l = {
'default': '_draw_lines',
'steps-mid': '_draw_steps_mid',
'steps-pre': '_draw_steps_pre',
'steps-post': '_draw_steps_post',
}
_drawStyles_s = {
'steps': '_draw_steps_pre',
}
# drawStyles should now be deprecated.
drawStyles = {**_drawStyles_l, **_drawStyles_s}
# Need a list ordered with long names first:
drawStyleKeys = [*_drawStyles_l, *_drawStyles_s]
# Referenced here to maintain API. These are defined in
# MarkerStyle
markers = MarkerStyle.markers
filled_markers = MarkerStyle.filled_markers
fillStyles = MarkerStyle.fillstyles
zorder = 2
@_api.deprecated("3.4")
@_api.classproperty
def validCap(cls):
return tuple(cs.value for cs in CapStyle)
@_api.deprecated("3.4")
@_api.classproperty
def validJoin(cls):
return tuple(js.value for js in JoinStyle)
def __str__(self):
if self._label != "":
return f"Line2D({self._label})"
elif self._x is None:
return "Line2D()"
elif len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))" % (
self._x[0], self._y[0], self._x[0],
self._y[0], self._x[-1], self._y[-1])
else:
return "Line2D(%s)" % ",".join(
map("({:g},{:g})".format, self._x, self._y))
def __init__(self, xdata, ydata,
linewidth=None, # all Nones default to rc
linestyle=None,
color=None,
marker=None,
markersize=None,
markeredgewidth=None,
markeredgecolor=None,
markerfacecolor=None,
markerfacecoloralt='none',
fillstyle=None,
antialiased=None,
dash_capstyle=None,
solid_capstyle=None,
dash_joinstyle=None,
solid_joinstyle=None,
pickradius=5,
drawstyle=None,
markevery=None,
**kwargs
):
"""
Create a `.Line2D` instance with *x* and *y* data in sequences of
*xdata*, *ydata*.
Additional keyword arguments are `.Line2D` properties:
%(Line2D:kwdoc)s
See :meth:`set_linestyle` for a description of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
super().__init__()
# Convert sequences to NumPy arrays.
if not np.iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not np.iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None:
linewidth = rcParams['lines.linewidth']
if linestyle is None:
linestyle = rcParams['lines.linestyle']
if marker is None:
marker = rcParams['lines.marker']
if color is None:
color = rcParams['lines.color']
if markersize is None:
markersize = rcParams['lines.markersize']
if antialiased is None:
antialiased = rcParams['lines.antialiased']
if dash_capstyle is None:
dash_capstyle = rcParams['lines.dash_capstyle']
if dash_joinstyle is None:
dash_joinstyle = rcParams['lines.dash_joinstyle']
if solid_capstyle is None:
solid_capstyle = rcParams['lines.solid_capstyle']
if solid_joinstyle is None:
solid_joinstyle = rcParams['lines.solid_joinstyle']
if drawstyle is None:
drawstyle = 'default'
self._dashcapstyle = None
self._dashjoinstyle = None
self._solidjoinstyle = None
self._solidcapstyle = None
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self._linestyles = None
self._drawstyle = None
self._linewidth = linewidth
# scaled dash + offset
self._dashSeq = None
self._dashOffset = 0
# unscaled dash + offset
# this is needed scaling the dash pattern by linewidth
self._us_dashSeq = None
self._us_dashOffset = 0
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self._color = None
self.set_color(color)
self._marker = MarkerStyle(marker, fillstyle)
self._markevery = None
self._markersize = None
self._antialiased = None
self.set_markevery(markevery)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._markeredgecolor = None
self._markeredgewidth = None
self._markerfacecolor = None
self._markerfacecoloralt = None
self.set_markerfacecolor(markerfacecolor) # Normalizes None to rc.
self.set_markerfacecoloralt(markerfacecoloralt)
self.set_markeredgecolor(markeredgecolor) # Normalizes None to rc.
self.set_markeredgewidth(markeredgewidth)
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
self.ind_offset = 0
if (isinstance(self._picker, Number) and
not isinstance(self._picker, bool)):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalidx = True
self._invalidy = True
self._x = None
self._y = None
self._xy = None
self._path = None
self._transformed_path = None
self._subslice = False
self._x_filled = None # used in subslicing; only x is needed
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether *mouseevent* occurred on the line.
An event is deemed to have occurred "on" the line if it is less
than ``self.pickradius`` (default: 5 points) away from it. Use
`~.Line2D.get_pickradius` or `~.Line2D.set_pickradius` to get or set
the pick radius.
Parameters
----------
mouseevent : `matplotlib.backend_bases.MouseEvent`
Returns
-------
contains : bool
Whether any values are within the radius.
details : dict
A dictionary ``{'ind': pointlist}``, where *pointlist* is a
list of points of the line that are within the pickradius around
the event position.
TODO: sort returned indices by distance
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
# Make sure we have data to plot
if self._invalidy or self._invalidx:
self.recache()
if len(self._xy) == 0:
return False, {}
# Convert points to pixels
transformed_path = self._get_transformed_path()
path, affine = transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure is None:
_log.warning('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi / 72. * self.pickradius
# The math involved in checking for containment (here and inside of
# segment_hits) assumes that it is OK to overflow, so temporarily set
# the error flags accordingly.
with np.errstate(all='ignore'):
# Check for collision
if self._linestyle in ['None', None]:
# If no line, return the nearby point(s)
ind, = np.nonzero(
(xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2
<= pixels ** 2)
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)
if self._drawstyle.startswith("steps"):
ind //= 2
ind += self.ind_offset
# Return the point(s) within radius
return len(ind) > 0, dict(ind=ind)
def get_pickradius(self):
"""
Return the pick radius used for containment tests.
See `.contains` for more details.
"""
return self._pickradius
def set_pickradius(self, d):
"""
Set the pick radius used for containment tests.
See `.contains` for more details.
Parameters
----------
d : float
Pick radius, in points.
"""
if not isinstance(d, Number) or d < 0:
raise ValueError("pick radius should be a distance")
self._pickradius = d
pickradius = property(get_pickradius, set_pickradius)
def get_fillstyle(self):
"""
Return the marker fill style.
See also `~.Line2D.set_fillstyle`.
"""
return self._marker.get_fillstyle()
def set_fillstyle(self, fs):
"""
Set the marker fill style.
Parameters
----------
fs : {'full', 'left', 'right', 'bottom', 'top', 'none'}
Possible values:
- 'full': Fill the whole marker with the *markerfacecolor*.
- 'left', 'right', 'bottom', 'top': Fill the marker half at
the given side with the *markerfacecolor*. The other
half of the marker is filled with *markerfacecoloralt*.
- 'none': No filling.
For examples see :ref:`marker_fill_styles`.
"""
self.set_marker(MarkerStyle(self._marker.get_marker(), fs))
self.stale = True
def set_markevery(self, every):
"""
Set the markevery property to subsample the plot when using markers.
e.g., if ``every=5``, every 5-th marker will be plotted.
Parameters
----------
every : None or int or (int, int) or slice or list[int] or float or \
(float, float) or list[bool]
Which markers to plot.
- ``every=None``: every point will be plotted.
- ``every=N``: every N-th marker will be plotted starting with
marker 0.
- ``every=(start, N)``: every N-th marker, starting at index
*start*, will be plotted.
- ``every=slice(start, end, N)``: every N-th marker, starting at
index *start*, up to but not including index *end*, will be
plotted.
- ``every=[i, j, m, ...]``: only markers at the given indices
will be plotted.
- ``every=[True, False, True, ...]``: only positions that are True
will be plotted. The list must have the same length as the data
points.
- ``every=0.1``, (i.e. a float): markers will be spaced at
approximately equal visual distances along the line; the distance
along the line between markers is determined by multiplying the
display-coordinate distance of the axes bounding-box diagonal
by the value of *every*.
- ``every=(0.5, 0.1)`` (i.e. a length-2 tuple of float): similar
to ``every=0.1`` but the first marker will be offset along the
line by 0.5 multiplied by the
display-coordinate-diagonal-distance along the line.
For examples see
:doc:`/gallery/lines_bars_and_markers/markevery_demo`.
Notes
-----
Setting *markevery* will still only draw markers at actual data points.
While the float argument form aims for uniform visual spacing, it has
to coerce from the ideal spacing to the nearest available data point.
Depending on the number and distribution of data points, the result
may still not look evenly spaced.
When using a start offset to specify the first marker, the offset will
be from the first data point which may be different from the first
the visible data point if the plot is zoomed in.
If zooming in on a plot when using float arguments then the actual
data points that have markers will change because the distance between
markers is always determined from the display-coordinates
axes-bounding-box-diagonal regardless of the actual axes data limits.
"""
self._markevery = every
self.stale = True
def get_markevery(self):
"""
Return the markevery setting for marker subsampling.
See also `~.Line2D.set_markevery`.
"""
return self._markevery
def set_picker(self, p):
"""
Set the event picker details for the line.
Parameters
----------
p : float or callable[[Artist, Event], tuple[bool, dict]]
If a float, it is used as the pick radius in points.
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox([[0, 0], [0, 0]])
trans_data_to_xy = self.get_transform().transform
bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_data(self, *args):
"""
Set the x and y data.
Parameters
----------
*args : (2, N) array or two 1D arrays
"""
if len(args) == 1:
(x, y), = args
else:
x, y = args
self.set_xdata(x)
self.set_ydata(y)
def recache_always(self):
self.recache(always=True)
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
x = _to_unmasked_float_array(xconv).ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
y = _to_unmasked_float_array(yconv).ravel()
else:
y = self._y
self._xy = np.column_stack(np.broadcast_arrays(x, y)).astype(float)
self._x, self._y = self._xy.T # views
self._subslice = False
if (self.axes and len(x) > 1000 and self._is_sorted(x) and
self.axes.name == 'rectilinear' and
self.axes.get_xscale() == 'linear' and
self._markevery is None and
self.get_clip_on() and
self.get_transform() == self.axes.transData):
self._subslice = True
nanmask = np.isnan(x)
if nanmask.any():
self._x_filled = self._x.copy()
indices = np.arange(len(x))
self._x_filled[nanmask] = np.interp(
indices[nanmask], indices[~nanmask], self._x[~nanmask])
else:
self._x_filled = self._x
if self._path is not None:
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy.T)
self._path = Path(np.asarray(xy).T,
_interpolation_steps=interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
def _transform_path(self, subslice=None):
"""
Put a TransformedPath instance at self._transformed_path;
all invalidation of the transform is then handled by the
TransformedPath instance.
"""
# Masked arrays are now handled by the Path class itself
if subslice is not None:
xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy[subslice, :].T)
_path = Path(np.asarray(xy).T,
_interpolation_steps=self._path._interpolation_steps)
else:
_path = self._path
self._transformed_path = TransformedPath(_path, self.get_transform())
def _get_transformed_path(self):
"""Return this line's `~matplotlib.transforms.TransformedPath`."""
if self._transformed_path is None:
self._transform_path()
return self._transformed_path
def set_transform(self, t):
# docstring inherited
self._invalidx = True
self._invalidy = True
super().set_transform(t)
def _is_sorted(self, x):
"""Return whether x is sorted in ascending order."""
# We don't handle the monotonically decreasing case.
return _path.is_sorted(x)
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
if self._invalidy or self._invalidx:
self.recache()
self.ind_offset = 0 # Needed for contains() method.
if self._subslice and self.axes:
x0, x1 = self.axes.get_xbound()
i0 = self._x_filled.searchsorted(x0, 'left')
i1 = self._x_filled.searchsorted(x1, 'right')
subslice = slice(max(i0 - 1, 0), i1 + 1)
self.ind_offset = subslice.start
self._transform_path(subslice)
else:
subslice = None
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.open_group('line2d', self.get_gid())
if self._lineStyles[self._linestyle] != '_draw_nothing':
tpath, affine = (self._get_transformed_path()
.get_transformed_path_and_affine())
if len(tpath.vertices):
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_url(self.get_url())
lc_rgba = mcolors.to_rgba(self._color, self._alpha)
gc.set_foreground(lc_rgba, isRGBA=True)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
gc.set_dashes(self._dashOffset, self._dashSeq)
renderer.draw_path(gc, tpath, affine.frozen())
gc.restore()
if self._marker and self._markersize > 0:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_url(self.get_url())
gc.set_linewidth(self._markeredgewidth)
gc.set_antialiased(self._antialiased)
ec_rgba = mcolors.to_rgba(
self.get_markeredgecolor(), self._alpha)
fc_rgba = mcolors.to_rgba(
self._get_markerfacecolor(), self._alpha)
fcalt_rgba = mcolors.to_rgba(
self._get_markerfacecolor(alt=True), self._alpha)
# If the edgecolor is "auto", it is set according to the *line*
# color but inherits the alpha value of the *face* color, if any.
if (cbook._str_equal(self._markeredgecolor, "auto")
and not cbook._str_lower_equal(
self.get_markerfacecolor(), "none")):
ec_rgba = ec_rgba[:3] + (fc_rgba[3],)
gc.set_foreground(ec_rgba, isRGBA=True)
if self.get_sketch_params() is not None:
scale, length, randomness = self.get_sketch_params()
gc.set_sketch_params(scale/2, length/2, 2*randomness)
marker = self._marker
# Markers *must* be drawn ignoring the drawstyle (but don't pay the
# recaching if drawstyle is already "default").
if self.get_drawstyle() != "default":
with cbook._setattr_cm(
self, _drawstyle="default", _transformed_path=None):
self.recache()
self._transform_path(subslice)
tpath, affine = (self._get_transformed_path()
.get_transformed_points_and_affine())
else:
tpath, affine = (self._get_transformed_path()
.get_transformed_points_and_affine())
if len(tpath.vertices):
# subsample the markers if markevery is not None
markevery = self.get_markevery()
if markevery is not None:
subsampled = _mark_every_path(
markevery, tpath, affine, self.axes)
else:
subsampled = tpath
snap = marker.get_snap_threshold()
if isinstance(snap, Real):
snap = renderer.points_to_pixels(self._markersize) >= snap
gc.set_snap(snap)
gc.set_joinstyle(marker.get_joinstyle())
gc.set_capstyle(marker.get_capstyle())
marker_path = marker.get_path()
marker_trans = marker.get_transform()
w = renderer.points_to_pixels(self._markersize)
if cbook._str_equal(marker.get_marker(), ","):
gc.set_linewidth(0)
else:
# Don't scale for pixels, and don't stroke them
marker_trans = marker_trans.scale(w)
renderer.draw_markers(gc, marker_path, marker_trans,
subsampled, affine.frozen(),
fc_rgba)
alt_marker_path = marker.get_alt_path()
if alt_marker_path:
alt_marker_trans = marker.get_alt_transform()
alt_marker_trans = alt_marker_trans.scale(w)
renderer.draw_markers(
gc, alt_marker_path, alt_marker_trans, subsampled,
affine.frozen(), fcalt_rgba)
gc.restore()
renderer.close_group('line2d')
self.stale = False
def get_antialiased(self):
"""Return whether antialiased rendering is used."""
return self._antialiased
def get_color(self):
"""
Return the line color.
See also `~.Line2D.set_color`.
"""
return self._color
def get_drawstyle(self):
"""
Return the drawstyle.
See also `~.Line2D.set_drawstyle`.
"""
return self._drawstyle
def get_linestyle(self):
"""
Return the linestyle.
See also `~.Line2D.set_linestyle`.
"""
return self._linestyle
def get_linewidth(self):
"""
Return the linewidth in points.
See also `~.Line2D.set_linewidth`.
"""
return self._linewidth
def get_marker(self):
"""
Return the line marker.
See also `~.Line2D.set_marker`.
"""
return self._marker.get_marker()
def get_markeredgecolor(self):
"""
Return the marker edge color.
See also `~.Line2D.set_markeredgecolor`.
"""
mec = self._markeredgecolor
if cbook._str_equal(mec, 'auto'):
if rcParams['_internal.classic_mode']:
if self._marker.get_marker() in ('.', ','):
return self._color
if (self._marker.is_filled()
and self._marker.get_fillstyle() != 'none'):
return 'k' # Bad hard-wired default...
return self._color
else:
return mec
def get_markeredgewidth(self):
"""
Return the marker edge width in points.
See also `~.Line2D.set_markeredgewidth`.
"""
return self._markeredgewidth
def _get_markerfacecolor(self, alt=False):
if self._marker.get_fillstyle() == 'none':
return 'none'
fc = self._markerfacecoloralt if alt else self._markerfacecolor
if cbook._str_lower_equal(fc, 'auto'):
return self._color
else:
return fc
def get_markerfacecolor(self):
"""
Return the marker face color.
See also `~.Line2D.set_markerfacecolor`.
"""
return self._get_markerfacecolor(alt=False)
def get_markerfacecoloralt(self):
"""
Return the alternate marker face color.
See also `~.Line2D.set_markerfacecoloralt`.
"""
return self._get_markerfacecolor(alt=True)
def get_markersize(self):
"""
Return the marker size in points.
See also `~.Line2D.set_markersize`.
"""
return self._markersize
def get_data(self, orig=True):
"""
Return the line data as an ``(xdata, ydata)`` pair.
If *orig* is *True*, return the original data.
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalidx:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalidy:
self.recache()
return self._y
def get_path(self):
"""Return the `~matplotlib.path.Path` associated with this line."""
if self._invalidy or self._invalidx:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalidy or self._invalidx:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
Set whether to use antialiased rendering.
Parameters
----------
b : bool
"""
if self._antialiased != b:
self.stale = True
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line.
Parameters
----------
color : color
"""
mcolors._check_color_like(color=color)
self._color = color
self.stale = True
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot.
The drawstyle determines how the points are connected.
Parameters
----------
drawstyle : {'default', 'steps', 'steps-pre', 'steps-mid', \
'steps-post'}, default: 'default'
For 'default', the points are connected with straight lines.
The steps variants connect the points with step-like lines,
i.e. horizontal lines with vertical steps. They differ in the
location of the step:
- 'steps-pre': The step is at the beginning of the line segment,
i.e. the line will be at the y-value of point to the right.
- 'steps-mid': The step is halfway between the points.
- 'steps-post: The step is at the end of the line segment,
i.e. the line will be at the y-value of the point to the left.
- 'steps' is equal to 'steps-pre' and is maintained for
backward-compatibility.
For examples see :doc:`/gallery/lines_bars_and_markers/step_demo`.
"""
if drawstyle is None:
drawstyle = 'default'
_api.check_in_list(self.drawStyles, drawstyle=drawstyle)
if self._drawstyle != drawstyle:
self.stale = True
# invalidate to trigger a recache of the path
self._invalidx = True
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points.
Parameters
----------
w : float
Line width, in points.
"""
w = float(w)
if self._linewidth != w:
self.stale = True
self._linewidth = w
# rescale the dashes + offset
self._dashOffset, self._dashSeq = _scale_dashes(
self._us_dashOffset, self._us_dashSeq, self._linewidth)
def set_linestyle(self, ls):
"""
Set the linestyle of the line.
Parameters
----------
ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
Possible values:
- A string:
========================================== =================
linestyle description
========================================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing
========================================== =================
- Alternatively a dash tuple of the following form can be
provided::
(offset, onoffseq)
where ``onoffseq`` is an even length tuple of on and off ink
in points. See also :meth:`set_dashes`.
For examples see :doc:`/gallery/lines_bars_and_markers/linestyles`.
"""
if isinstance(ls, str):
if ls in [' ', '', 'none']:
ls = 'None'
_api.check_in_list([*self._lineStyles, *ls_mapper_r], ls=ls)
if ls not in self._lineStyles:
ls = ls_mapper_r[ls]
self._linestyle = ls
else:
self._linestyle = '--'
# get the unscaled dashes
self._us_dashOffset, self._us_dashSeq = _get_dash_pattern(ls)
# compute the linewidth scaled dashes
self._dashOffset, self._dashSeq = _scale_dashes(
self._us_dashOffset, self._us_dashSeq, self._linewidth)
@docstring.interpd
def set_marker(self, marker):
"""
Set the line marker.
Parameters
----------
marker : marker style string, `~.path.Path` or `~.markers.MarkerStyle`
See `~matplotlib.markers` for full description of possible
arguments.
"""
self._marker = MarkerStyle(marker, self._marker.get_fillstyle())
self.stale = True
def _set_markercolor(self, name, has_rcdefault, val):
if val is None:
val = rcParams[f"lines.{name}"] if has_rcdefault else "auto"
attr = f"_{name}"
current = getattr(self, attr)
if current is None:
self.stale = True
else:
neq = current != val
# Much faster than `np.any(current != val)` if no arrays are used.
if neq.any() if isinstance(neq, np.ndarray) else neq:
self.stale = True
setattr(self, attr, val)
def set_markeredgecolor(self, ec):
"""
Set the marker edge color.
Parameters
----------
ec : color
"""
self._set_markercolor("markeredgecolor", True, ec)
def set_markerfacecolor(self, fc):
"""
Set the marker face color.
Parameters
----------
fc : color
"""
self._set_markercolor("markerfacecolor", True, fc)
def set_markerfacecoloralt(self, fc):
"""
Set the alternate marker face color.
Parameters
----------
fc : color
"""
self._set_markercolor("markerfacecoloralt", False, fc)
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points.
Parameters
----------
ew : float
Marker edge width, in points.
"""
if ew is None:
ew = rcParams['lines.markeredgewidth']
if self._markeredgewidth != ew:
self.stale = True
self._markeredgewidth = ew
def set_markersize(self, sz):
"""
Set the marker size in points.
Parameters
----------
sz : float
Marker size, in points.
"""
sz = float(sz)
if self._markersize != sz:
self.stale = True
self._markersize = sz
def set_xdata(self, x):
"""
Set the data array for x.
Parameters
----------
x : 1D array
"""
self._xorig = x
self._invalidx = True
self.stale = True
def set_ydata(self, y):
"""
Set the data array for y.
Parameters
----------
y : 1D array
"""
self._yorig = y
self._invalidy = True
self.stale = True
def set_dashes(self, seq):
"""
Set the dash sequence.
The dash sequence is a sequence of floats of even length describing
the length of dashes and spaces in points.
For example, (5, 2, 1, 2) describes a sequence of 5 point and 1 point
dashes separated by 2 point spaces.
Parameters
----------
seq : sequence of floats (on/off ink in points) or (None, None)
If *seq* is empty or ``(None, None)``, the linestyle will be set
to solid.
"""
if seq == (None, None) or len(seq) == 0:
self.set_linestyle('-')
else:
self.set_linestyle((0, seq))
def update_from(self, other):
"""Copy properties from *other* to self."""
super().update_from(other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markerfacecoloralt = other._markerfacecoloralt
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._us_dashSeq = other._us_dashSeq
self._dashOffset = other._dashOffset
self._us_dashOffset = other._us_dashOffset
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = MarkerStyle(marker=other._marker)
self._drawstyle = other._drawstyle
@docstring.interpd
def set_dash_joinstyle(self, s):
"""
How to join segments of the line if it `~Line2D.is_dashed`.
The default joinstyle is :rc:`lines.dash_joinstyle`.
Parameters
----------
s : `.JoinStyle` or %(JoinStyle)s
"""
js = JoinStyle(s)
if self._dashjoinstyle != js:
self.stale = True
self._dashjoinstyle = js
@docstring.interpd
def set_solid_joinstyle(self, s):
"""
How to join segments if the line is solid (not `~Line2D.is_dashed`).
The default joinstyle is :rc:`lines.solid_joinstyle`.
Parameters
----------
s : `.JoinStyle` or %(JoinStyle)s
"""
js = JoinStyle(s)
if self._solidjoinstyle != js:
self.stale = True
self._solidjoinstyle = js
def get_dash_joinstyle(self):
"""
Return the `.JoinStyle` for dashed lines.
See also `~.Line2D.set_dash_joinstyle`.
"""
return self._dashjoinstyle.name
def get_solid_joinstyle(self):
"""
Return the `.JoinStyle` for solid lines.
See also `~.Line2D.set_solid_joinstyle`.
"""
return self._solidjoinstyle.name
@docstring.interpd
def set_dash_capstyle(self, s):
"""
How to draw the end caps if the line is `~Line2D.is_dashed`.
The default capstyle is :rc:`lines.dash_capstyle`.
Parameters
----------
s : `.CapStyle` or %(CapStyle)s
"""
cs = CapStyle(s)
if self._dashcapstyle != cs:
self.stale = True
self._dashcapstyle = cs
@docstring.interpd
def set_solid_capstyle(self, s):
"""
How to draw the end caps if the line is solid (not `~Line2D.is_dashed`)
The default capstyle is :rc:`lines.solid_capstyle`.
Parameters
----------
s : `.CapStyle` or %(CapStyle)s
"""
cs = CapStyle(s)
if self._solidcapstyle != cs:
self.stale = True
self._solidcapstyle = cs
def get_dash_capstyle(self):
"""
Return the `.CapStyle` for dashed lines.
See also `~.Line2D.set_dash_capstyle`.
"""
return self._dashcapstyle.name
def get_solid_capstyle(self):
"""
Return the `.CapStyle` for solid lines.
See also `~.Line2D.set_solid_capstyle`.
"""
return self._solidcapstyle.name
def is_dashed(self):
"""
Return whether line has a dashed linestyle.
A custom linestyle is assumed to be dashed, we do not inspect the
``onoffseq`` directly.
See also `~.Line2D.set_linestyle`.
"""
return self._linestyle in ('--', '-.', ':')
class _AxLine(Line2D):
"""
A helper class that implements `~.Axes.axline`, by recomputing the artist
transform at draw time.
"""
def __init__(self, xy1, xy2, slope, **kwargs):
super().__init__([0, 1], [0, 1], **kwargs)
if (xy2 is None and slope is None or
xy2 is not None and slope is not None):
raise TypeError(
"Exactly one of 'xy2' and 'slope' must be given")
self._slope = slope
self._xy1 = xy1
self._xy2 = xy2
def get_transform(self):
ax = self.axes
points_transform = self._transform - ax.transData + ax.transScale
if self._xy2 is not None:
# two points were given
(x1, y1), (x2, y2) = \
points_transform.transform([self._xy1, self._xy2])
dx = x2 - x1
dy = y2 - y1
if np.allclose(x1, x2):
if np.allclose(y1, y2):
raise ValueError(
f"Cannot draw a line through two identical points "
f"(x={(x1, x2)}, y={(y1, y2)})")
slope = np.inf
else:
slope = dy / dx
else:
# one point and a slope were given
x1, y1 = points_transform.transform(self._xy1)
slope = self._slope
(vxlo, vylo), (vxhi, vyhi) = ax.transScale.transform(ax.viewLim)
# General case: find intersections with view limits in either
# direction, and draw between the middle two points.
if np.isclose(slope, 0):
start = vxlo, y1
stop = vxhi, y1
elif np.isinf(slope):
start = x1, vylo
stop = x1, vyhi
else:
_, start, stop, _ = sorted([
(vxlo, y1 + (vxlo - x1) * slope),
(vxhi, y1 + (vxhi - x1) * slope),
(x1 + (vylo - y1) / slope, vylo),
(x1 + (vyhi - y1) / slope, vyhi),
])
return (BboxTransformTo(Bbox([start, stop]))
+ ax.transLimits + ax.transAxes)
def draw(self, renderer):
self._transformed_path = None # Force regen.
super().draw(renderer)
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for `.Line2D`.
Derived classes should override the `process_selected` method to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig, ax = plt.subplots()
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a `.Line2D`. The line should already be
added to an `~.axes.Axes` and should have the picker property set.
"""
if line.axes is None:
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property '
'of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the `process_selected` method.
Parameters
----------
ind : list of int
The indices of the selected vertices.
xs, ys : array-like
The coordinates of the selected vertices.
"""
pass
def onpick(self, event):
"""When the line is picked, update the set of selected indices."""
if event.artist is not self.line:
return
self.ind ^= set(event.ind)
ind = sorted(self.ind)
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = MarkerStyle.markers
drawStyles = Line2D.drawStyles
fillStyles = MarkerStyle.fillstyles | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/exports/glances_mqtt.py | import socket
import string
import sys
from glances.logger import logger
from glances.exports.glances_export import GlancesExport
from glances.globals import json_dumps
# Import paho for MQTT
from requests import certs
import paho.mqtt.client as paho
class Export(GlancesExport):
"""This class manages the MQTT export module."""
def __init__(self, config=None, args=None):
"""Init the MQTT export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.user = None
self.password = None
self.topic = None
self.tls = 'true'
# Load the MQTT configuration file
self.export_enable = self.load_conf(
'mqtt', mandatories=['host', 'password'], options=['port', 'user', 'topic', 'tls', 'topic_structure']
)
if not self.export_enable:
exit('Missing MQTT config')
# Get the current hostname
self.hostname = socket.gethostname()
self.port = int(self.port) or 8883
self.topic = self.topic or 'glances'
self.user = self.user or 'glances'
self.tls = self.tls and self.tls.lower() == 'true'
self.topic_structure = (self.topic_structure or 'per-metric').lower()
if self.topic_structure not in ['per-metric', 'per-plugin']:
logger.critical("topic_structure must be either 'per-metric' or 'per-plugin'.")
sys.exit(2)
# Init the MQTT client
self.client = self.init()
if not self.client:
exit("MQTT client initialization failed")
def init(self):
"""Init the connection to the MQTT server."""
if not self.export_enable:
return None
try:
client = paho.Client(client_id='glances_' + self.hostname, clean_session=False)
client.username_pw_set(username=self.user, password=self.password)
if self.tls:
client.tls_set(certs.where())
client.connect(host=self.host, port=self.port)
client.loop_start()
return client
except Exception as e:
logger.critical("Connection to MQTT server %s:%s failed with error: %s " % (self.host, self.port, e))
return None
def export(self, name, columns, points):
"""Write the points in MQTT."""
WHITELIST = '_-' + string.ascii_letters + string.digits
SUBSTITUTE = '_'
def whitelisted(s, whitelist=WHITELIST, substitute=SUBSTITUTE):
return ''.join(c if c in whitelist else substitute for c in s)
if self.topic_structure == 'per-metric':
for sensor, value in zip(columns, points):
try:
sensor = [whitelisted(name) for name in sensor.split('.')]
to_export = [self.topic, self.hostname, name]
to_export.extend(sensor)
topic = '/'.join(to_export)
self.client.publish(topic, value)
except Exception as e:
logger.error("Can not export stats to MQTT server (%s)" % e)
elif self.topic_structure == 'per-plugin':
try:
topic = '/'.join([self.topic, self.hostname, name])
sensor_values = dict(zip(columns, points))
# Build the value to output
output_value = dict()
for key in sensor_values:
split_key = key.split('.')
# Add the parent keys if they don't exist
current_level = output_value
for depth in range(len(split_key) - 1):
if split_key[depth] not in current_level:
current_level[split_key[depth]] = dict()
current_level = current_level[split_key[depth]]
# Add the value
current_level[split_key[len(split_key) - 1]] = sensor_values[key]
json_value = json_dumps(output_value)
self.client.publish(topic, json_value)
except Exception as e:
logger.error("Can not export stats to MQTT server (%s)" % e) | PypiClean |
/Makima-0.1.9.1-py3-none-any.whl/makima/windows/utils/keyboard.py | import ctypes
from time import sleep
from makima.windows.call_win_api.i_keyboard import IKeyboard, Key
send_input = ctypes.windll.user32.SendInput
pointer_unsigned_long = ctypes.POINTER(ctypes.c_ulong)
class KeyboardInput(ctypes.Structure):
"""
Keyboard input C struct definition.
"""
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", pointer_unsigned_long)]
class HardwareInput(ctypes.Structure):
"""
Hardware input C struct definition.
"""
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
"""
Hardware input C struct definition.
"""
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", pointer_unsigned_long)]
class EventStorage(ctypes.Union):
"""
Event storage C struct definition.
"""
_fields_ = [("ki", KeyboardInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
"""
Input C struct definition.
"""
_fields_ = [("type", ctypes.c_ulong),
("ii", EventStorage)]
class WinKeyboard(IKeyboard):
class _KeyCodes(object):
"""
Holder for Windows keyboard codes stored as Keys.
"""
BACKSPACE = Key(0x08) # BACKSPACE key
TAB = Key(0x09) # TAB key
CLEAR = Key(0x0C) # CLEAR key
RETURN = Key(0x0D) # ENTER key
SHIFT = Key(0x10) # SHIFT key
CONTROL = Key(0x11) # CTRL key
ALT = Key(0x12) # ALT key
PAUSE = Key(0x13) # PAUSE key
CAPS_LOCK = Key(0x14) # CAPS LOCK key
ESCAPE = Key(0x1B) # ESC key
SPACE = Key(0x20) # SPACEBAR
PAGE_UP = Key(0x21) # PAGE UP key
PAGE_DOWN = Key(0x22) # PAGE DOWN key
END = Key(0x23) # END key
HOME = Key(0x24) # HOME key
LEFT = Key(0x25) # LEFT ARROW key
UP = Key(0x26) # UP ARROW key
RIGHT = Key(0x27) # RIGHT ARROW key
DOWN = Key(0x28) # DOWN ARROW key
PRINT_SCREEN = Key(0x2C) # PRINT SCREEN key
INSERT = Key(0x2D) # INS key
DELETE = Key(0x2E) # DEL key
VK_HELP = Key(0x2F) # HELP key
KEY_0 = Key(0x30) # 0 key
KEY_1 = Key(0x31) # 1 key
KEY_2 = Key(0x32) # 2 key
KEY_3 = Key(0x33) # 3 key
KEY_4 = Key(0x34) # 4 key
KEY_5 = Key(0x35) # 5 key
KEY_6 = Key(0x36) # 6 key
KEY_7 = Key(0x37) # 7 key
KEY_8 = Key(0x38) # 8 key
KEY_9 = Key(0x39) # 9 key
KEY_A = Key(0x41) # A key
KEY_B = Key(0x42) # B key
KEY_C = Key(0x43) # C key
KEY_D = Key(0x44) # D key
KEY_E = Key(0x45) # E key
KEY_F = Key(0x46) # F key
KEY_G = Key(0x47) # G key
KEY_H = Key(0x48) # H key
KEY_I = Key(0x49) # I key
KEY_J = Key(0x4A) # J key
KEY_K = Key(0x4B) # K key
KEY_L = Key(0x4C) # L key
KEY_M = Key(0x4D) # M key
KEY_N = Key(0x4E) # N key
KEY_O = Key(0x4F) # O key
KEY_P = Key(0x50) # P key
KEY_Q = Key(0x51) # Q key
KEY_R = Key(0x52) # R key
KEY_S = Key(0x53) # S key
KEY_T = Key(0x54) # T key
KEY_U = Key(0x55) # U key
KEY_V = Key(0x56) # V key
KEY_W = Key(0x57) # W key
KEY_X = Key(0x58) # X key
KEY_Y = Key(0x59) # Y key
KEY_Z = Key(0x5A) # Z key
LEFT_WIN = Key(0x5B) # Left Windows key (Natural keyboard)
RIGHT_WIN = Key(0x5C) # Right Windows key (Natural keyboard)
SLEEP = Key(0x5F) # Computer Sleep key
NUMPAD0 = Key(0x60) # Numeric keypad 0 key
NUMPAD1 = Key(0x61) # Numeric keypad 1 key
NUMPAD2 = Key(0x62) # Numeric keypad 2 key
NUMPAD3 = Key(0x63) # Numeric keypad 3 key
NUMPAD4 = Key(0x64) # Numeric keypad 4 key
NUMPAD5 = Key(0x65) # Numeric keypad 5 key
NUMPAD6 = Key(0x66) # Numeric keypad 6 key
NUMPAD7 = Key(0x67) # Numeric keypad 7 key
NUMPAD8 = Key(0x68) # Numeric keypad 8 key
NUMPAD9 = Key(0x69) # Numeric keypad 9 key
MULTIPLY = Key(0x6A) # Multiply key
ADD = Key(0x6B) # Add key
SEPARATOR = Key(0x6C) # Separator key
SUBTRACT = Key(0x6D) # Subtract key
DECIMAL = Key(0x6E) # Decimal key
DIVIDE = Key(0x6F) # Divide key
F1 = Key(0x70) # F1 key
F2 = Key(0x71) # F2 key
F3 = Key(0x72) # F3 key
F4 = Key(0x73) # F4 key
F5 = Key(0x74) # F5 key
F6 = Key(0x75) # F6 key
F7 = Key(0x76) # F7 key
F8 = Key(0x77) # F8 key
F9 = Key(0x78) # F9 key
F10 = Key(0x79) # F10 key
F11 = Key(0x7A) # F11 key
F12 = Key(0x7B) # F12 key
NUM_LOCK = Key(0x90) # NUM LOCK key
SCROLL_LOCK = Key(0x91) # SCROLL LOCK
LEFT_SHIFT = Key(0xA0) # Left SHIFT key
RIGHT_SHIFT = Key(0xA1) # Right SHIFT key
LEFT_CONTROL = Key(0xA2) # Left CONTROL key
RIGHT_CONTROL = Key(0xA3) # Right CONTROL key
OEM_1 = Key(0xBA) # For the US standard keyboard, the ';:' key
OEM_PLUS = Key(0xBB) # For any country/region, the '+' key
OEM_COMMA = Key(0xBC) # For any country/region, the ',' key
OEM_MINUS = Key(0xBD) # For any country/region, the '-' key
OEM_PERIOD = Key(0xBE) # For any country/region, the '.' key
OEM_2 = Key(0xBF) # For the US standard keyboard, the '/?' key
OEM_3 = Key(0xC0) # For the US standard keyboard, the '`~' key
OEM_4 = Key(0xDB) # For the US standard keyboard, the '[{' key
OEM_5 = Key(0xDC) # For the US standard keyboard, the '\|' key
OEM_6 = Key(0xDD) # For the US standard keyboard, the ']}' key
OEM_7 = Key(0xDE) # For the US standard keyboard, the ''/"' key
codes = _KeyCodes
def press_key(self, hex_key_code):
"""
Presses (and releases) key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
self.press_key_and_hold(hex_key_code)
self.release_key(hex_key_code)
def press_key_and_hold(self, hex_key_code):
"""
Presses (and holds) key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
extra = ctypes.c_ulong(0)
ii_ = EventStorage()
ii_.ki = KeyboardInput(hex_key_code, 0x48, 0, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
send_input(1, ctypes.pointer(x), ctypes.sizeof(x))
def release_key(self, hex_key_code):
"""
Releases key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
extra = ctypes.c_ulong(0)
ii_ = EventStorage()
ii_.ki = KeyboardInput(
hex_key_code, 0x48, 0x0002, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
send_input(1, ctypes.pointer(x), ctypes.sizeof(x))
def send(self, *args, **kwargs):
"""
Send key events as specified by Keys.
If Key contains children Keys they will be recursively
processed with current Key code pressed as a modifier key.
:param args: keys to send.
"""
delay = kwargs.get('delay', 0)
for key in args:
if key.children:
self.press_key_and_hold(key.code)
self.send(*key.children)
self.release_key(key.code)
else:
self.press_key(key.code)
self._wait_for_key_combo_to_be_processed()
sleep(delay)
@staticmethod
def _wait_for_key_combo_to_be_processed():
# For key combinations timeout is needed to be processed.
# This method is expressive shortcut to be used where needed.
sleep(.05)
@staticmethod
def copy_text(string):
import pyperclip
pyperclip.copy(string) | PypiClean |
/ChemDataExtractor-1.3.0-py3-none-any.whl/chemdataextractor/cli/dict.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import click
from ..nlp.lexicon import ChemLexicon
from ..nlp.tokenize import ChemWordTokenizer
from ..nlp.tag import DictionaryTagger
from ..nlp.cem import CsDictCemTagger, CiDictCemTagger, STOPLIST, STOP_SUB, STOP_TOKENS
try:
from html import unescape
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
NG_RE = re.compile('([\[\(](\d\d?CI|USAN|r?INN|BAN|JAN|USP)(\d\d?CI|USAN|r?INN|BAN|JAN|USP|[:\-,]|spanish|latin)*[\)\]])+$', re.I | re.U)
START_RE = re.compile('^(anhydrous|elemental|amorphous|conjugated|colloidal|activated) ', re.I | re.U)
END_RE = re.compile('[\[\(]((crude )?product|substance|solution|anhydrous|derivative|analog|salt|modified|discontinued|injectable|anesthetic|pharmaceutical|natural|nonionic|european|ester|dye|tablets?|mineral|VAN|hydrolyzed)[\)\]]$', re.I | re.U)
RATIO_RE = re.compile('[\[\(]((\d\d?)(:(\d\d?|\?|\d\.\d))+)[\)\]]$', re.I | re.U)
NUM_END_RE = re.compile(' (\d+)$', re.U)
ALPHANUM_END_RE = re.compile(' ([A-Za-z]\d*)$', re.U)
BRACKET_RE = re.compile('^\(([^\(\)]+)\)$', re.I | re.U)
GREEK_WORDS = {
'Alpha': 'Α', # \u0391
'Beta': 'Β', # \u0392
'Gamma': 'Γ', # \u0393
'Delta': 'Δ', # \u0394
'Epsilon': 'Ε', # \u0395
'Zeta': 'Ζ', # \u0396
'Eta': 'Η', # \u0397
'Theta': 'Θ', # \u0398
'Iota': 'Ι', # \u0399
'Kappa': 'Κ', # \u039a
'Lambda': 'Λ', # \u039b
'Mu': 'Μ', # \u039c
'Nu': 'Ν', # \u039d
'Xi': 'Ξ', # \u039e
'Omicron': 'Ο', # \u039f
'Pi': 'Π', # \u03a0
'Rho': 'Ρ', # \u03a1
'Sigma': 'Σ', # \u03a3
'Tau': 'Τ', # \u03a4
'Upsilon': 'Υ', # \u03a5
'Phi': 'Φ', # \u03a6
'Chi': 'Χ', # \u03a7
'Psi': 'Ψ', # \u03a8
'Omega': 'Ω', # \u03a9
'alpha': 'α', # \u03b1
'beta': 'β', # \u03b2
'gamma': 'γ', # \u03b3
'delta': 'δ', # \u03b4
'epsilon': 'ε', # \u03b5
'zeta': 'ζ', # \u03b6
'eta': 'η', # \u03b7
'theta': 'θ', # \u03b8
'iota': 'ι', # \u03b9
'kappa': 'κ', # \u03ba
'lambda': 'λ', # \u03bb
'mu': 'μ', # \u03bc
'nu': 'ν', # \u03bd
'xi': 'ξ', # \u03be
'omicron': 'ο', # \u03bf
'pi': 'π', # \u03c0
'rho': 'ρ', # \u03c1
'sigma': 'σ', # \u03c3
'tau': 'τ', # \u03c4
'upsilon': 'υ', # \u03c5
'phi': 'φ', # \u03c6
'chi': 'χ', # \u03c7
'psi': 'ψ', # \u03c8
'omega': 'ω', # \u03c9
}
UNAMBIGUOUS_GREEK_WORDS = {
'Alpha': 'Α', # \u0391
'Beta': 'Β', # \u0392
'Gamma': 'Γ', # \u0393
'Delta': 'Δ', # \u0394
'Epsilon': 'Ε', # \u0395
'Kappa': 'Κ', # \u039a
'Lambda': 'Λ', # \u039b
'Sigma': 'Σ', # \u03a3
'Upsilon': 'Υ', # \u03a5
'Omega': 'Ω', # \u03a9
'alpha': 'α', # \u03b1
'beta': 'β', # \u03b2
'gamma': 'γ', # \u03b3
'delta': 'δ', # \u03b4
'epsilon': 'ε', # \u03b5
'kappa': 'κ', # \u03ba
'lambda': 'λ', # \u03bb
'sigma': 'σ', # \u03c3
'upsilon': 'υ', # \u03c5
'omega': 'ω', # \u03c9
}
DOT_GREEK_RE = re.compile('\.(%s)\.' % '|'.join(re.escape(s) for s in GREEK_WORDS.keys()), re.U)
GREEK_RE = re.compile('([\daA\W]|^)(%s)([\d\W]|$)' % '|'.join(re.escape(s) for s in GREEK_WORDS.keys()), re.U)
UNAMBIGUOUS_GREEK_RE = re.compile('(%s)' % '|'.join(re.escape(s) for s in UNAMBIGUOUS_GREEK_WORDS.keys()), re.U)
@click.group(name='dict')
@click.pass_context
def dict_cli(ctx):
"""Chemical dictionary commands."""
pass
def _process_name(name):
"""Fix issues with Jochem names."""
# Unescape HTML entities
name = unescape(name)
# Remove bracketed stuff on the end
name = NG_RE.sub('', name).strip() # Nomenclature groups
name = END_RE.sub('', name).strip(', ') # Words
name = RATIO_RE.sub('', name).strip(', ') # Ratios
# Remove stuff off start
name = START_RE.sub('', name).strip()
# Remove balanced start and end brackets if none in between
name = BRACKET_RE.sub('\g<1>', name)
# Un-invert CAS style names
comps = name.split(', ')
if len(comps) == 2:
if comps[1].endswith('-'):
name = comps[0]
name = '%s%s' % (comps[1], name)
elif len(comps) > 2:
name = comps[0]
for i in range(1, len(comps)):
if comps[i].endswith('-'):
name = '%s%s' % (comps[i], name)
else:
name = '%s %s' % (name, comps[i])
return name
def _filter_name(name):
"""Filter words when adding to Dictionary. Return True if name should be added."""
# Remove if length 3 or less
if len(name) <= 3:
return False
# Remove if starts with IL-
if name.startswith('IL-'):
return False
lowname = name.lower()
# Remove if contains certain sequences
if any(c in lowname for c in STOP_SUB):
return False
# Remove if (case-insensitive) exact match to stoplist
if lowname in STOPLIST:
return False
comps = re.split('[ -]', lowname)
# Remove if just single character + digits separated by spaces or hyphens (or the word compound)
if all(c.isdigit() or len(c) == 1 or c == 'compound' for c in comps):
return False
# Remove if 3 or fewer letters with 2 or fewer digits
if len(comps) == 2 and len(comps[0]) <= 3 and comps[0].isalpha() and len(comps[1]) <= 3 and comps[1].isdigit():
return False
# Remove if just greek characters and numbrs
if re.match('^[Α-Ωα-ω0-9]+$', name):
return False
# Filter registry numbers? No real size benefit in DAWG.
# if REG_RE.search(name):
# keep = False
# Handle this at the token level
# if name.endswith(' derivative') or name.endswith(' analog') or name.endswith(' solution'):
# keep = False
# Filter this after matching and expanding boundaries
# if name.startswith('-') or name.endswith('-'):
# keep = False
# Filter this after matching and expanding boundaries
# if not bracket_level(name) == 0:
# print(name)
return True
def _filter_tokens(tokens):
""""""
keep = True
for token in tokens:
if token in STOP_TOKENS:
keep = False
return keep
def _get_variants(name):
"""Return variants of chemical name."""
names = [name]
oldname = name
# Map greek words to unicode characters
if DOT_GREEK_RE.search(name):
wordname = name
while True:
m = DOT_GREEK_RE.search(wordname)
if m:
wordname = wordname[:m.start(1)-1] + m.group(1) + wordname[m.end(1)+1:]
else:
break
symbolname = name
while True:
m = DOT_GREEK_RE.search(symbolname)
if m:
symbolname = symbolname[:m.start(1)-1] + GREEK_WORDS[m.group(1)] + symbolname[m.end(1)+1:]
else:
break
names = [wordname, symbolname]
else:
while True:
m = GREEK_RE.search(name)
if m:
name = name[:m.start(2)] + GREEK_WORDS[m.group(2)] + name[m.end(2):]
else:
break
while True:
m = UNAMBIGUOUS_GREEK_RE.search(name)
if m:
name = name[:m.start(1)] + GREEK_WORDS[m.group(1)] + name[m.end(1):]
else:
break
if not name == oldname:
names.append(name)
newnames = []
for name in names:
# If last word \d+, add variants with hyphen and no space preceding
if NUM_END_RE.search(name):
newnames.append(NUM_END_RE.sub('-\g<1>', name))
newnames.append(NUM_END_RE.sub('\g<1>', name))
# If last word [A-Za-z]\d* add variants with hyphen preceding.
if ALPHANUM_END_RE.search(name):
newnames.append(ALPHANUM_END_RE.sub('-\g<1>', name))
names.extend(newnames)
return names
tokenizer = ChemWordTokenizer(split_last_stop=False)
def _make_tokens(name):
""""""
tokenized_names = []
name = _process_name(name)
if _filter_name(name):
for name in _get_variants(name):
if _filter_name(name):
tokens = tokenizer.tokenize(name)
if _filter_tokens(tokens):
tokenized_names.append(tokens)
#print(tokenized_names)
return tokenized_names
@dict_cli.command()
@click.argument('jochem', type=click.File('r', encoding='utf8'))
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Dictionary file.', default=sys.stdout)
@click.option('--csoutput', '-c', type=click.File('w', encoding='utf8'), help='Case-sensitive dictionary file.', default=sys.stdout)
@click.pass_obj
def prepare_jochem(ctx, jochem, output, csoutput):
"""Process and filter jochem file to produce list of names for dictionary."""
click.echo('chemdataextractor.dict.prepare_jochem')
for i, line in enumerate(jochem):
print('JC%s' % i)
if line.startswith('TM '):
if line.endswith(' @match=ci\n'):
for tokens in _make_tokens(line[3:-11]):
output.write(' '.join(tokens))
output.write('\n')
else:
for tokens in _make_tokens(line[3:-1]):
csoutput.write(' '.join(tokens))
csoutput.write('\n')
@dict_cli.command()
@click.argument('include', type=click.File('r', encoding='utf8'))
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=sys.stdout)
@click.pass_obj
def prepare_include(ctx, include, output):
"""Process and filter include file to produce list of names for dictionary."""
click.echo('chemdataextractor.dict.prepare_include')
for i, line in enumerate(include):
print('IN%s' % i)
for tokens in _make_tokens(line.strip()):
output.write(' '.join(tokens))
output.write('\n')
@dict_cli.command()
@click.argument('inputs', type=click.File('r', encoding='utf8'), nargs=-1)
@click.option('--output', help='Output model file.', required=True)
@click.option('--cs/--no-cs', help='Whether case-sensitive.', default=False)
@click.pass_obj
def build(ctx, inputs, output, cs):
"""Build chemical name dictionary."""
click.echo('chemdataextractor.dict.build')
dt = DictionaryTagger(lexicon=ChemLexicon(), case_sensitive=cs)
names = []
for input in inputs:
for line in input:
tokens = line.split()
names.append(tokens)
dt.build(words=names)
dt.save(output)
@dict_cli.command()
@click.argument('model', required=True)
@click.option('--cs/--no-cs', default=False)
@click.option('--corpus', '-c', type=click.File('r', encoding='utf8'), required=True)
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=sys.stdout)
@click.pass_obj
def tag(ctx, model, cs, corpus, output):
"""Tag chemical entities and write CHEMDNER annotations predictions file."""
click.echo('chemdataextractor.dict.tag')
tagger = CsDictCemTagger(model=model) if cs else CiDictCemTagger(model=model)
for line in corpus:
sentence = []
goldsentence = []
for t in line.split():
token, tag = t.rsplit('/', 1)
goldsentence.append((token, tag))
sentence.append(token)
if sentence:
tokentags = tagger.tag(sentence)
for i, tokentag in enumerate(tokentags):
goldtokentag = goldsentence[i]
if goldtokentag[1] not in {'B-CM', 'I-CM'} and tokentag[1] in {'B-CM', 'I-CM'}:
print(line)
print(tokentag[0])
output.write(' '.join('/'.join(tokentag) for tokentag in tagger.tag(sentence)))
output.write('\n')
else:
output.write('\n') | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imcontrol/controller/ImConMainController.py | import dataclasses
import h5py
from imswitch.imcommon.controller import MainController, PickDatasetsController
from imswitch.imcommon.model import (
ostools, initLogger, generateAPI, generateShortcuts, SharedAttributes
)
from imswitch.imcommon.framework import Thread
from .server import ImSwitchServer
from imswitch.imcontrol.model import configfiletools
from imswitch.imcontrol.view import guitools
from . import controllers
from .CommunicationChannel import CommunicationChannel
from .MasterController import MasterController
from .PickSetupController import PickSetupController
from .basecontrollers import ImConWidgetControllerFactory
class ImConMainController(MainController):
def __init__(self, options, setupInfo, mainView, moduleCommChannel):
self.__logger = initLogger(self)
self.__logger.debug('Initializing')
self.__options = options
self.__setupInfo = setupInfo
self.__mainView = mainView
self._moduleCommChannel = moduleCommChannel
# Connect view signals
self.__mainView.sigLoadParamsFromHDF5.connect(self.loadParamsFromHDF5)
self.__mainView.sigPickSetup.connect(self.pickSetup)
self.__mainView.sigClosing.connect(self.closeEvent)
# Init communication channel and master controller
self.__commChannel = CommunicationChannel(self, self.__setupInfo)
self.__masterController = MasterController(self.__setupInfo, self.__commChannel,
self._moduleCommChannel)
# List of Controllers for the GUI Widgets
self.__factory = ImConWidgetControllerFactory(
self.__setupInfo, self.__masterController, self.__commChannel, self._moduleCommChannel
)
self.pickSetupController = self.__factory.createController(
PickSetupController, self.__mainView.pickSetupDialog
)
self.pickDatasetsController = self.__factory.createController(
PickDatasetsController, self.__mainView.pickDatasetsDialog
)
self.controllers = {}
for widgetKey, widget in self.__mainView.widgets.items():
self.controllers[widgetKey] = self.__factory.createController(
getattr(controllers, f'{widgetKey}Controller'), widget
)
# Generate API
self.__api = None
apiObjs = list(self.controllers.values()) + [self.__commChannel]
self.__api = generateAPI(
apiObjs,
missingAttributeErrorMsg=lambda attr: f'The imcontrol API does either not have any'
f' method {attr}, or the widget that defines it'
f' is not included in your currently active'
f' hardware setup file.'
)
# Generate Shorcuts
self.__shortcuts = None
shorcutObjs = list(self.__mainView.widgets.values())
self.__shortcuts = generateShortcuts(shorcutObjs)
self.__mainView.addShortcuts(self.__shortcuts)
if setupInfo.pyroServerInfo.active:
self._serverWorker = ImSwitchServer(self.__api, setupInfo)
self.__logger.debug(self.__api)
self._thread = Thread()
self._serverWorker.moveToThread(self._thread)
self._thread.started.connect(self._serverWorker.run)
self._thread.finished.connect(self._serverWorker.stop)
self._thread.start()
@property
def api(self):
return self.__api
@property
def shortcuts(self):
return self.__shortcuts
def loadParamsFromHDF5(self):
""" Set detector, positioner, laser etc. params from values saved in a
user-picked HDF5 snap/recording. """
filePath = guitools.askForFilePath(self.__mainView, 'Open HDF5 file', nameFilter='*.hdf5')
if not filePath:
return
with h5py.File(filePath) as file:
datasetsInFile = file.keys()
if len(datasetsInFile) < 1:
# File does not contain any datasets
return
elif len(datasetsInFile) == 1:
datasetToLoad = list(datasetsInFile)[0]
else:
# File contains multiple datasets
self.pickDatasetsController.setDatasets(filePath, datasetsInFile)
if not self.__mainView.showPickDatasetsDialogBlocking():
return
datasetsSelected = self.pickDatasetsController.getSelectedDatasets()
if len(datasetsSelected) != 1:
return
datasetToLoad = datasetsSelected[0]
attrs = SharedAttributes.fromHDF5File(file, datasetToLoad)
self.__commChannel.sharedAttrs.update(attrs)
def pickSetup(self):
""" Let the user change which setup is used. """
options, _ = configfiletools.loadOptions()
self.pickSetupController.setSetups(configfiletools.getSetupList())
self.pickSetupController.setSelectedSetup(options.setupFileName)
if not self.__mainView.showPickSetupDialogBlocking():
return
setupFileName = self.pickSetupController.getSelectedSetup()
if not setupFileName:
return
proceed = guitools.askYesNoQuestion(self.__mainView, 'Warning',
'The software will restart. Continue?')
if not proceed:
return
options = dataclasses.replace(options, setupFileName=setupFileName)
configfiletools.saveOptions(options)
ostools.restartSoftware()
def closeEvent(self):
self.__logger.debug('Shutting down')
self.__factory.closeAllCreatedControllers()
self.__masterController.closeEvent()
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/Flask-Assistant-0.5.4.tar.gz/Flask-Assistant-0.5.4/api_ai/models.py |
import json
import re
class Entity():
"""docstring for Entity"""
def __init__(self, name=None, entity_json=None):
if name and not entity_json:
self.name = name
self.entries = []
self.isEnum = None
self.id = None
elif entity_json:
self.update(entity_json)
else:
raise TypeError('Must provide a "name" argument if no json given')
def add_entry(self, value, synonyms=[]):
if self.isEnum:
entry = {'value': value, 'synonyms': value}
entry = {'value': value, 'synonyms': synonyms}
self.entries.append(entry)
def add_synonyms(self, entry, synonyms):
self.entries[entry].extend(synonyms)
@property
def serialize(self):
return json.dumps(self.__dict__)
def __repr__(self):
return '@' + self.name
def update(self, entity_json):
try:
self.__dict__.update(entity_json)
except TypeError:
self.__dict__.update(json.loads(entity_json))
class Intent():
"""Represents an Intent object within the API.AI REST APi.
Intents are created internally using an Assistant app's action decorated view functions
These objects provide the JSON schema for registering, updating, and removing intents in
the API.AI develoepr console via JSON requests.
"""
def __init__(self, name=None, priority=500000, fallback_intent=False, contexts=None, events=None, intent_json=None):
if name and not intent_json:
self.name = name
self.auto = True
self.contexts = contexts or []
self.templates = []
self.userSays = []
self.responses = []
self.priority = priority
self.fallbackIntent = fallback_intent
self.webhookUsed = True
self.webhookForSlotFilling = True
self.events = Intent._build_events(events)
self.id = None
elif intent_json:
self.update(intent_json)
else:
raise TypeError('Must provide a "name" argument if no json given')
@staticmethod
def _build_events(events):
return [] if events is None else [{'name': event} for event in events]
def __repr__(self):
return "<Intent: {}>".format(self.name)
def registered(self):
if self.id:
return True
def add_example(self, phrase, templ_entity_map=None): # TODO
if templ_entity_map:
example = UserDefinedExample(phrase, templ_entity_map)
else:
example = AutoAnnotedExamle(phrase)
self.userSays.append(example.serialize)
def add_action(self, action_name, parameters=[]):
self.responses = [{
'action': action_name,
'resetContexts': False,
'affectedContexts': [], # TODO: register context outs
'parameters': parameters,
'messages': [] # TODO: possibly register action responses to call from intent object directly
}]
# self.responses.append(new_response)
def add_event(self, event_name):
self.events.append({'name': event_name})
@property
def serialize(self):
return json.dumps(self.__dict__)
def update(self, intent_json):
try:
self.__dict__.update(intent_json)
except TypeError:
self.__dict__.update(json.loads(intent_json))
class ExampleBase(object):
"""docstring for ExampleBase"""
def __init__(self, phrase, user_defined=False, isTemplate=False):
self.text = phrase
self.userDefined = user_defined
self.isTemplate = isTemplate
self.data = []
@property
def serialize(self):
return {
'data': self.data,
'isTemplate': self.isTemplate,
'count': 0
}
class AutoAnnotedExamle(ExampleBase):
def __init__(self, phrase):
super(AutoAnnotedExamle, self).__init__(phrase)
self.text = phrase
self.data.append({'text': self.text, 'userDefined': False})
class UserDefinedExample(ExampleBase):
def __init__(self, phrase, entity_map):
super(UserDefinedExample, self).__init__(phrase, user_defined=True)
# import ipdb; ipdb.set_trace()
self.entity_map = entity_map
self._parse_phrase(self.text)
def _parse_phrase(self, sub_phrase):
if not sub_phrase:
return
for value in self.entity_map:
re_value = r".\b{}\b".format(value[1:]) if value.startswith(('$', '¥', '¥', '€', '£')) else r"\b{}\b".format(value)
if re.search(re_value, sub_phrase):
parts = sub_phrase.split(value, 1)
self._parse_phrase(parts[0])
self._annotate_params(value)
self._parse_phrase(parts[1])
return
self.data.append({'text': sub_phrase})
def _annotate_params(self, word):
"""Annotates a given word for the UserSays data field of an Intent object.
Annotations are created using the entity map within the user_says.yaml template.
"""
annotation = {}
annotation['text'] = word
annotation['meta'] = '@' + self.entity_map[word]
annotation['alias'] = self.entity_map[word].replace('sys.', '')
annotation['userDefined'] = True
self.data.append(annotation) | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/detective/index.js | var acorn = require('acorn-node');
var walk = require('acorn-node/walk');
var defined = require('defined');
var requireRe = /\brequire\b/;
function parse (src, opts) {
if (!opts) opts = {};
var acornOpts = {
ranges: defined(opts.ranges, opts.range),
locations: defined(opts.locations, opts.loc),
allowReserved: defined(opts.allowReserved, true),
allowImportExportEverywhere: defined(opts.allowImportExportEverywhere, false)
};
// Use acorn-node's defaults for the rest.
if (opts.ecmaVersion != null) acornOpts.ecmaVersion = opts.ecmaVersion;
if (opts.sourceType != null) acornOpts.sourceType = opts.sourceType;
if (opts.allowHashBang != null) acornOpts.allowHashBang = opts.allowHashBang;
if (opts.allowReturnOutsideFunction != null) acornOpts.allowReturnOutsideFunction = opts.allowReturnOutsideFunction;
return acorn.parse(src, acornOpts);
}
var exports = module.exports = function (src, opts) {
return exports.find(src, opts).strings;
};
exports.find = function (src, opts) {
if (!opts) opts = {};
var word = opts.word === undefined ? 'require' : opts.word;
if (typeof src !== 'string') src = String(src);
var isRequire = opts.isRequire || function (node) {
return node.callee.type === 'Identifier'
&& node.callee.name === word
;
};
var modules = { strings : [], expressions : [] };
if (opts.nodes) modules.nodes = [];
var wordRe = word === 'require' ? requireRe : RegExp('\\b' + word + '\\b');
if (!wordRe.test(src)) return modules;
var ast = parse(src, opts.parse);
function visit(node, st, c) {
var hasRequire = wordRe.test(src.slice(node.start, node.end));
if (!hasRequire) return;
walk.base[node.type](node, st, c);
if (node.type !== 'CallExpression') return;
if (isRequire(node)) {
if (node.arguments.length) {
var arg = node.arguments[0];
if (arg.type === 'Literal') {
modules.strings.push(arg.value);
}
else if (arg.type === 'TemplateLiteral'
&& arg.quasis.length === 1
&& arg.expressions.length === 0) {
modules.strings.push(arg.quasis[0].value.raw);
}
else {
modules.expressions.push(src.slice(arg.start, arg.end));
}
}
if (opts.nodes) modules.nodes.push(node);
}
}
walk.recursive(ast, null, {
Statement: visit,
Expression: visit
});
return modules;
}; | PypiClean |
/CherwellAPI-1.3.9.tar.gz/CherwellAPI-1.3.9/Examples/Example_Token Usage.py | from __future__ import print_function
from CherwellAPI import CherwellClient
import pickle
import time
#########################################################################################
# This example demonstrates the use of the REST API token that's retrieved from the Cherwell
# Server. The token contains a bearer token code once authenticated, and this is used in all
# subsequent REST API calls. This code also demonstrates that the token can be cached
# and reused if not expired.
###########################################################################################
#############################################
# Change the following to suit your instance
#############################################
base_uri = "http://<Your Cherwell Host here>"
username = "<Your UserName Here>"
password = "<Your Password here>"
api_key = "<Your Cherwell REST API Client Key here>"
# Create a new CherwellClient connection
cherwell_client = CherwellClient.Connection(base_uri, api_key, username, password)
# Show the current token
print("Current Token is: {}\n".format(cherwell_client.token))
# The token is only retrieved when a api call is made for the first time,
# to do so - get an existing Business Object ID - assuming here its not already cached
print("Incident Business Object Id is: {}".format(cherwell_client.get_business_object_id("Incident2")))
# Show the current token - if authenticated, should now have a value
print("Token is now: {}\n".format(cherwell_client.token))
# Check whether the token is expired or not
print("Current Token Expiry time in GMT is: {}".format(cherwell_client.token.token_expiry_gmt()))
print("Current Local GMT Time is: {}".format(cherwell_client.token.current_time_gmt()))
print("Current Token is Expired: {}".format(cherwell_client.token.expired()))
# Sleep until the token has expired - (Tip: set the token value in Cherwell to a couple of mins or less)
while not cherwell_client.token.expired():
print("#", end="")
time.sleep(5)
# Retrieve another BusinessObject ID with an expired token
print("Change Request Business Object Id is: {}".format(cherwell_client.get_business_object_id("ChangeRequest")))
# Check the token details again - the CherwellClient Connection should take care of refreshing the token
print("Current Token Expiry time in GMT is: {}".format(cherwell_client.token.token_expiry_gmt()))
print("Current Local GMT Time is: {}".format(cherwell_client.token.current_time_gmt()))
print("Current Token is Expired: {}".format(cherwell_client.token.expired()))
# Save the current token to disk
print("Saving Token - caching")
pickle.dump(cherwell_client.token,open("token_cached.pic", "wb"))
# Get the token back from disk
print("Retrieving Token from cache")
saved_token = pickle.load(open("token_cached.pic", "rb"))
# Create a new cherwell client passing in the saved token - so it doesn't need to create a new one
new_cherwell_client = cherwell_client = CherwellClient.Connection(base_uri, api_key, username, password, token=saved_token)
# Retrieve another BusinessObject ID with this cached token
print("Problem Business Object Id is: {}".format(cherwell_client.get_business_object_id("Problem")))
# Compare the values from the previous token to this one - should be same
print("\nCached Token Expiry time in GMT is: {}".format(new_cherwell_client.token.token_expiry_gmt()))
print("Cached Local GMT Time is: {}".format(new_cherwell_client.token.current_time_gmt()))
print("Cached Token is Expired: {}".format(new_cherwell_client.token.expired())) | PypiClean |
/Aqualid-0.7.tar.gz/Aqualid-0.7/modules/aqualid/tools/rsync.py | import sys
import os.path
import itertools
import aql
# ==============================================================================
class ErrorNoCommonSourcesDir(Exception):
def __init__(self, sources):
msg = "Can't rsync disjoined files: %s" % (sources,)
super(ErrorNoCommonSourcesDir, self).__init__(msg)
# ==============================================================================
def _to_cygwin_path(path):
if not path:
return '.'
path_sep = '/'
drive, path = aql.split_drive(path)
if drive.find(':') == 1:
drive = "/cygdrive/" + drive[0]
path = drive + path
if path[-1] in ('\\', '/'):
last_sep = path_sep
else:
last_sep = ''
path = path.replace('\\', '/')
return path + last_sep
# ==============================================================================
def _norm_local_path(path):
if not path:
return '.'
path_sep = os.path.sep
path = str(path)
if path[-1] in (path_sep, os.path.altsep):
last_sep = path_sep
else:
last_sep = ''
path = os.path.normcase(os.path.normpath(path))
return path + last_sep
# ==============================================================================
def _norm_remote_path(path):
if not path:
return '.'
path_sep = '/'
if path[-1] in (path_sep, os.path.altsep):
last_sep = path_sep
else:
last_sep = ''
path = os.path.normpath(path).replace('\\', path_sep)
return path + last_sep
# ==============================================================================
def _split_remote_path(remote_path):
if os.path.isabs(remote_path):
host = ''
user = ''
else:
# [USER@]HOST:DEST
remote_path = remote_path.strip()
user_pos = remote_path.find('@')
if user_pos == -1:
user = ''
else:
user = remote_path[:user_pos]
remote_path = remote_path[user_pos + 1:]
host_pos = remote_path.find(':')
if host_pos == -1:
host = ''
else:
host = remote_path[:host_pos]
remote_path = remote_path[host_pos + 1:]
remote_path = _norm_remote_path(remote_path)
return user, host, remote_path
# ==============================================================================
class RemotePath(object):
__slots__ = ('path', 'host', 'user')
def __init__(self, remote_path, user=None, host=None):
u, h, remote_path = _split_remote_path(remote_path)
if not user:
user = u
if not host:
host = h
self.path = remote_path
self.host = host
self.user = user
# -----------------------------------------------------------
def is_remote(self):
return bool(self.host)
# -----------------------------------------------------------
def __str__(self):
return self.get()
# -----------------------------------------------------------
def join(self, other):
if self.host:
path = self.path + '/' + _norm_remote_path(other)
else:
path = os.path.join(self.path, _norm_local_path(other))
return RemotePath(path, self.user, self.host)
# -----------------------------------------------------------
def basename(self):
if self.host:
last_slash_pos = self.path.rfind('/')
return self.path[last_slash_pos + 1:]
else:
return os.path.basename(self.path)
# -----------------------------------------------------------
def get(self, cygwin_path=False):
if self.host:
if self.user:
return "%s@%s:%s" % (self.user, self.host, self.path)
return "%s:%s" % (self.host, self.path)
else:
if cygwin_path:
return _to_cygwin_path(self.path)
return self.path
# ==============================================================================
class RSyncPushBuilder(aql.FileBuilder):
NAME_ATTRS = ('remote_path', 'source_base')
SIGNATURE_ATTRS = ('cmd', )
def __init__(self, options, remote_path, source_base=None,
host=None, login=None, key_file=None, exclude=None):
self.rsync_cygwin = (
sys.platform != 'cygwin') and options.rsync_cygwin.get()
if source_base:
self.source_base = _norm_local_path(source_base)
else:
self.source_base = None
self.remote_path = RemotePath(remote_path, login, host)
self.cmd = self.__get_cmd(options, key_file, exclude)
self.rsync = options.rsync.get()
self.file_value_type = aql.FileTimestampEntity
# -----------------------------------------------------------
def __get_cmd(self, options, key_file, excludes):
cmd = [options.rsync.get()]
cmd += options.rsync_flags.get()
if excludes:
excludes = aql.to_sequence(excludes)
cmd += itertools.chain(*itertools.product(['--exclude'], excludes))
if self.remote_path.is_remote():
ssh_flags = options.rsync_ssh_flags.get()
if key_file:
if self.rsync_cygwin:
key_file = _to_cygwin_path(key_file)
ssh_flags += ['-i', key_file]
cmd += ['-e', 'ssh %s' % ' '.join(ssh_flags)]
return cmd
# -----------------------------------------------------------
def _get_sources(self, source_entities):
sources = [_norm_local_path(src.get()) for src in source_entities]
source_base = self.source_base
if source_base:
sources_base_len = len(source_base)
for i, src in enumerate(sources):
if src.startswith(source_base):
src = src[sources_base_len:]
if not src:
src = '.'
sources[i] = src
return sources
# -----------------------------------------------------------
def _set_targets(self, source_entities, sources, targets):
remote_path = self.remote_path
source_base = self.source_base
make_entity = self.make_simple_entity if remote_path.is_remote() \
else self.make_file_entity
for src_value, src in zip(source_entities, sources):
if not source_base:
src = os.path.basename(src)
target_path = remote_path.join(src)
target_entity = make_entity(target_path.get())
targets[src_value].add_target_entity(target_entity)
# -----------------------------------------------------------
def build_batch(self, source_entities, targets):
sources = self._get_sources(source_entities)
cmd = list(self.cmd)
tmp_r, tmp_w = None, None
try:
if self.rsync_cygwin:
sources = map(_to_cygwin_path, sources)
sorted_sources = sorted(sources)
source_base = self.source_base
if source_base:
if self.rsync_cygwin:
source_base = _to_cygwin_path(source_base)
tmp_r, tmp_w = os.pipe()
os.write(tmp_w, '\n'.join(sorted_sources).encode('utf-8'))
os.close(tmp_w)
tmp_w = None
cmd += ["--files-from=-", source_base]
else:
cmd += sorted_sources
remote_path = self.remote_path.get(self.rsync_cygwin)
cmd.append(remote_path)
out = self.exec_cmd(cmd, stdin=tmp_r)
finally:
if tmp_r:
os.close(tmp_r)
if tmp_w:
os.close(tmp_w)
self._set_targets(source_entities, sources, targets)
return out
# -----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
if brief:
name = self.cmd[0]
name = os.path.splitext(os.path.basename(name))[0]
else:
name = ' '.join(self.cmd)
return name
# ==============================================================================
class RSyncPullBuilder(aql.Builder):
# rsync -avzub --exclude-from=files.flt --delete-excluded -e "ssh -i
# dev.key" c4dev@dev:/work/cp/bp2_int/components .
NAME_ATTRS = ('target_path', )
SIGNATURE_ATTRS = ('cmd', )
def __init__(self, options, target, host=None,
login=None, key_file=None, exclude=None):
self.rsync_cygwin = (
sys.platform != 'cygwin') and options.rsync_cygwin.get()
self.target_path = _norm_local_path(target)
self.host = host
self.login = login
self.cmd = self.__get_cmd(options, key_file, exclude)
self.rsync = options.rsync.get()
self.file_value_type = aql.FileTimestampEntity
# -----------------------------------------------------------
def make_entity(self, value, tags=None):
if aql.is_string(value):
remote_path = RemotePath(value, self.login, self.host)
if not remote_path.is_remote():
return self.make_file_entity(value, tags)
return self.make_simple_entity(value, tags)
# -----------------------------------------------------------
def __get_cmd(self, options, key_file, excludes):
cmd = [options.rsync.get()]
cmd += options.rsync_flags.get()
if excludes:
excludes = aql.to_sequence(excludes)
cmd += itertools.chain(*itertools.product(['--exclude'], excludes))
if self.host:
ssh_flags = options.rsync_ssh_flags.get()
if key_file:
if self.rsync_cygwin:
key_file = _to_cygwin_path(key_file)
ssh_flags += ['-i', key_file]
cmd += ['-e', 'ssh %s' % ' '.join(ssh_flags)]
return cmd
# -----------------------------------------------------------
def _get_sources_and_targets(self, source_entities):
sources = []
targets = []
target_path = self.target_path
host = self.host
login = self.login
cygwin_path = self.rsync_cygwin
for src in source_entities:
src = src.get()
remote_path = RemotePath(src, login, host)
path = os.path.join(target_path, remote_path.basename())
targets.append(path)
sources.append(remote_path.get(cygwin_path))
sources.sort()
return sources, targets
# -----------------------------------------------------------
def build(self, source_entities, targets):
sources, target_files = self._get_sources_and_targets(source_entities)
cmd = list(self.cmd)
target_path = self.target_path
if self.rsync_cygwin:
target_path = _to_cygwin_path(target_path)
cmd += sources
cmd.append(target_path)
out = self.exec_cmd(cmd)
targets.add_target_files(target_files)
return out
# -----------------------------------------------------------
def get_trace_name(self, source_entities, brief):
if brief:
name = self.cmd[0]
name = os.path.splitext(os.path.basename(name))[0]
else:
name = ' '.join(self.cmd)
return name
# ==============================================================================
@aql.tool('rsync')
class ToolRsync(aql.Tool):
@classmethod
def setup(cls, options):
rsync = cls.find_program(options, 'rsync')
options.rsync = rsync
if not options.rsync_cygwin.is_set():
options.rsync_cygwin = rsync.find('cygwin') != -1
# -----------------------------------------------------------
@classmethod
def options(cls):
options = aql.Options()
options.rsync = aql.PathOptionType(
description="File path to rsync program.")
options.rsync_cygwin = aql.BoolOptionType(
description="Is rsync uses cygwin paths.")
options.rsync_flags = aql.ListOptionType(
description="rsync tool flags", separators=None)
options.rsync_ssh_flags = aql.ListOptionType(
description="rsync tool SSH flags", separators=None)
return options
# -----------------------------------------------------------
def __init__(self, options):
super(ToolRsync, self).__init__(options)
options.rsync_flags = ['-a', '-v', '-z']
options.rsync_ssh_flags = [
'-o', 'StrictHostKeyChecking=no', '-o', 'BatchMode=yes']
options.set_group("rsync")
# -----------------------------------------------------------
def pull(self, options, target, host=None,
login=None, key_file=None, exclude=None):
return RSyncPullBuilder(options, target,
host=host, login=login,
key_file=key_file, exclude=exclude)
Pull = pull
# ----------------------------------------------------------
def push(self, options, target, source_base=None,
host=None, login=None, key_file=None, exclude=None):
builder = RSyncPushBuilder(options, target,
source_base=source_base,
host=host, login=login,
key_file=key_file, exclude=exclude)
return builder
Push = push
# ---------------------------------------------------------- | PypiClean |
/Flask-UndoRedo-1.1.1.tar.gz/Flask-UndoRedo-1.1.1/flask_undoredo.py | import json
import enum
from sqlalchemy import (
Column,
Boolean,
Integer,
String,
create_engine,
and_,
event,
func,
)
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import dml, select, delete
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class EnumEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, enum.Enum):
return obj.name
return json.JSONEncoder.default(self, obj)
class UndoRedoMixin(object):
id = Column(Integer, primary_key=True)
object_type = Column(String, nullable=False)
stack_id = Column(Integer, index=True, nullable=False)
capture_id = Column(Integer, index=True, nullable=False)
stmt = Column(String, nullable=False)
_params = Column(String, nullable=False)
@property
def params(self):
return json.loads(self._params)
@params.setter
def params(self, params):
self._params = json.dumps(params, cls=EnumEncoder)
class UndoAction(Base, UndoRedoMixin):
__tablename__ = "undo_action"
active = Column(Boolean, default=True, nullable=False)
class RedoAction(Base, UndoRedoMixin):
__tablename__ = "redo_action"
active = Column(Boolean, default=False, nullable=False)
class UndoRedoContext(object):
def __init__(self, app_session, session, object_type, stack_id):
self.app_session = app_session
self.app_engine = self.app_session.get_bind()
self.session = session
self.object_type = object_type
self.stack_id = stack_id
self.last_capture = 0
def before_exec(self, conn, clauseelement, multiparams, params):
if not isinstance(clauseelement, (dml.Delete, dml.Update)):
return
if multiparams and multiparams[0]:
return
query = select([clauseelement.table])
if clauseelement._whereclause is not None:
query = query.where(clauseelement._whereclause)
stmt_redo = clauseelement.compile(dialect=DefaultDialect())
self.session.add(
RedoAction(
object_type=self.object_type,
stack_id=self.stack_id,
capture_id=self.last_capture + 1,
stmt=str(stmt_redo),
params=stmt_redo.params,
)
)
if isinstance(clauseelement, dml.Delete):
for row in conn.execute(query):
stmt_undo = (
dml.Insert(clauseelement.table)
.values(**{k: v for (k, v) in row.items() if v is not None})
.compile(dialect=DefaultDialect())
)
self.session.add(
UndoAction(
object_type=self.object_type,
stack_id=self.stack_id,
capture_id=self.last_capture + 1,
stmt=str(stmt_undo),
params=stmt_undo.params,
)
)
elif isinstance(clauseelement, dml.Update):
for row in conn.execute(query):
stmt_undo = (
dml.Update(clauseelement.table)
.values(
**{
column.name: row[column.name]
for column in clauseelement.parameters.keys()
}
)
.where(
and_(
*[
column.__eq__(row[column.name])
for column in clauseelement.table.primary_key.columns.values()
]
)
)
.compile(dialect=DefaultDialect())
)
self.session.add(
UndoAction(
object_type=self.object_type,
stack_id=self.stack_id,
capture_id=self.last_capture + 1,
stmt=str(stmt_undo),
params=stmt_undo.params,
)
)
def after_exec(self, conn, clauseelement, multiparams, params, result):
if isinstance(clauseelement, dml.Insert):
new_pk = dict(
zip(
clauseelement.table.primary_key.columns.keys(),
result.inserted_primary_key,
)
)
where_clause = and_(
*[
column.__eq__(value)
for (column, value) in zip(
clauseelement.table.primary_key.columns.values(),
result.inserted_primary_key,
)
]
)
stmt_redo = clauseelement.values(
{
**{c.name: c.default.arg for c in result.prefetch_cols()},
**{c.name: c.server_default.arg for c in result.postfetch_cols()},
**{k: v for (k, v) in multiparams[0].items() if v is not None},
**new_pk,
}
).compile(dialect=DefaultDialect())
stmt_undo = (
delete(clauseelement.table)
.where(where_clause)
.compile(dialect=DefaultDialect())
)
self.session.add(
RedoAction(
object_type=self.object_type,
stack_id=self.stack_id,
capture_id=self.last_capture + 1,
stmt=str(stmt_redo),
params=stmt_redo.params,
)
)
self.session.add(
UndoAction(
object_type=self.object_type,
stack_id=self.stack_id,
capture_id=self.last_capture + 1,
stmt=str(stmt_undo),
params=stmt_undo.params,
)
)
def __enter__(self):
self.last_capture = (
self.session.query(UndoAction)
.filter_by(object_type=self.object_type, stack_id=self.stack_id)
.with_entities(func.coalesce(func.max(UndoAction.capture_id), 0))
.scalar()
)
event.listen(self.app_engine, "before_execute", self.before_exec)
event.listen(self.app_engine, "after_execute", self.after_exec)
def __exit__(self, exc_type, exc_val, exc_tb):
event.remove(self.app_engine, "before_execute", self.before_exec)
event.remove(self.app_engine, "after_execute", self.after_exec)
self.session.commit()
self.session.close()
class UndoRedo(object):
def __init__(self, app=None):
self.app = app
self.app_engine = None
if app is not None:
self.init_app(app)
self.session = None
def init_app(self, app):
engine = create_engine(app.config["UNDO_REDO_DATABASE_URI"])
try:
Base.metadata.create_all(engine, checkfirst=True)
except:
pass
Base.metadata.bind = engine
self.DBSession = sessionmaker(bind=engine)
def get_session(self):
session_obj = scoped_session(self.DBSession)
self.session = session_obj()
def clear_history(self, object_type, stack_id):
self.get_session()
self.session.query(UndoAction).filter_by(
object_type=object_type, stack_id=stack_id, active=False
).delete()
self.session.query(RedoAction).filter_by(
object_type=object_type, stack_id=stack_id, active=True
).delete()
self.session.commit()
self.session.close()
def capture(self, app_session, object_type, stack_id):
self.get_session()
self.clear_history(object_type, stack_id)
return UndoRedoContext(app_session, self.session, object_type, stack_id)
def get_actions(self, model, object_type, stack_id, agg_func=func.max):
subquery = (
self.session.query(model)
.filter_by(object_type=object_type, stack_id=stack_id, active=True)
.with_entities(agg_func(model.capture_id).label("capture_id"))
.subquery()
)
return self.session.query(model).join(
subquery,
and_(
model.object_type == object_type,
model.capture_id == subquery.c.capture_id,
),
)
def undo(self, session, object_type, stack_id):
self.get_session()
undo_actions = self.get_actions(UndoAction, object_type, stack_id).all()
for undo_action in undo_actions:
session.execute(undo_action.stmt, undo_action.params)
undo_action.active = False
self.session.add(undo_action)
if undo_actions:
self.session.query(RedoAction).filter_by(
object_type=object_type, capture_id=undo_actions[0].capture_id
).update({"active": True})
active_undo = (
self.session.query(UndoAction)
.filter_by(object_type=object_type, stack_id=stack_id, active=True)
.count()
)
active_redo = (
self.session.query(RedoAction)
.filter_by(object_type=object_type, stack_id=stack_id, active=True)
.count()
)
self.session.commit()
self.session.close()
return (active_undo, active_redo)
def redo(self, session, object_type, stack_id):
self.get_session()
redo_actions = self.get_actions(
RedoAction, object_type, stack_id, func.min
).all()
for redo_action in redo_actions:
session.execute(redo_action.stmt, redo_action.params)
redo_action.active = False
self.session.add(redo_action)
if redo_actions:
self.session.query(UndoAction).filter_by(
object_type=object_type, capture_id=redo_actions[0].capture_id
).update({"active": True})
active_undo = (
self.session.query(UndoAction)
.filter_by(object_type=object_type, stack_id=stack_id, active=True)
.count()
)
active_redo = (
self.session.query(RedoAction)
.filter_by(object_type=object_type, stack_id=stack_id, active=True)
.count()
)
self.session.commit()
self.session.close()
return (active_undo, active_redo) | PypiClean |
/Flask-BasicAuth-0.2.0.tar.gz/Flask-BasicAuth-0.2.0/docs/index.rst | Flask-BasicAuth
===============
Flask-BasicAuth is a Flask extension that provides an easy way to protect
certain views or your whole application with HTTP `basic access
authentication`_.
.. _basic access authentication: http://en.wikipedia.org/wiki/Basic_access_authentication
Installation
------------
The easiest way to install Flask-BasicAuth is with pip::
pip install Flask-BasicAuth
Usage
-----
Usage of Flask-BasicAuth is simple::
from flask import Flask, render_template
from flask.ext.basicauth import BasicAuth
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = 'john'
app.config['BASIC_AUTH_PASSWORD'] = 'matrix'
basic_auth = BasicAuth(app)
@app.route('/secret')
@basic_auth.required
def secret_view():
return render_template('secret.html')
If you would like to protect you entire site with basic access authentication,
just set ``BASIC_AUTH_FORCE`` configuration variable to `True`::
app.config['BASIC_AUTH_FORCE'] = True
You might find this useful, for example, if you would like to protect your
staging server from uninvited guests.
.. warning::
Please make sure that you use SSL/TLS (HTTPS) to encrypt the connection
between the client and the server, when using basic access authentication.
In basic access authentication username and password are sent in cleartext,
and if SSL/TLS is not used, the credentials could be easily intercepted.
Configuration
-------------
The following configuration values exist for Flask-BasicAuth. Flask-BasicAuth
loads these values from your main Flask config which can be populated in
various ways.
A list of configuration keys currently understood by the extension:
``BASIC_AUTH_FORCE``
If set to `True`, makes the whole site require HTTP basic access
authentication.
Defaults to `False`.
``BASIC_AUTH_REALM``
The authentication realm used for the challenge. This is typically a
description of the system being accessed.
Defaults to ``''``.
``BASIC_AUTH_USERNAME`` and ``BASIC_AUTH_PASSWORD``
The correct username and password combination that grants access for the
client to the protected resource.
You can override :meth:`BasicAuth.check_credentials <flask.ext.basicauth.BasicAuth.check_credentials>`,
if you need a different authentication logic for your application.
API reference
-------------
.. module:: flask.ext.basicauth
This part of the documentation covers all the public classes and functions
in Flask-BasicAuth.
.. autoclass:: BasicAuth
:members:
.. include:: ../CHANGES.rst
License
-------
.. include:: ../LICENSE
| PypiClean |
/HDXrate-0.2.0.tar.gz/HDXrate-0.2.0/README.rst | =======
HDXrate
=======
.. image:: https://img.shields.io/pypi/v/hdxrate.svg
:target: https://pypi.python.org/pypi/hdxrate
.. image:: https://img.shields.io/travis/Jhsmit/hdxrate.svg
:target: https://travis-ci.com/Jhsmit/hdxrate
.. image:: https://readthedocs.org/projects/hdxrate/badge/?version=latest
:target: https://hdxrate.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Python package collection for HDX intrinsic exchange rate calculation.
The calculations are based on the following papers:
Bai, Y., Milne, J. S., Mayne, L. & Englander, S. W. Primary structure effects on peptide group hydrogen exchange. `Proteins Structure, Function, and Bioinformatics <https://doi.org/10.1002/prot.340170110>`__ 17, 75–86 (1993)
Connelly, G. P., Bai, Y., Jeng, M.-F. & Englander, S. W. Isotope effects in peptide group hydrogen exchange. `Proteins <http://doi.wiley.com/10.1002/prot.340170111>`__ 17, 87–92 (1993).
Mori, S., Zijl, P. C. M. van & Shortle, D. Measurement of water–amide proton exchange rates in the denatured state of staphylococcal nuclease by a magnetization transfer technique. `Proteins Structure, Function, and Bioinformatics <https://doi.org/10.1002/(SICI)1097-0134(199707)28:3%3C325::AID-PROT3%3E3.0.CO;2-B>`__ 28, 325–332 (1997)
Nguyen, D., Mayne, L., Phillips, M. C. & Walter Englander, S. Reference Parameters for Protein Hydrogen Exchange Rates. `J. Am. Soc. Mass Spectrom. <https://pubs.acs.org/doi/abs/10.1021/jasms.8b05911>`__ 29, 1936–1939 (2018).
..
1.Rubinson, K. A. Practical corrections for p(H,D) measurements in mixed H 2 O/D 2 O biological buffers. Anal. Methods 9, 2744–2750 (2017).
See also the excel sheet on the Englander group website: http://hx2.med.upenn.edu/download.html
* Free software: GNU General Public License v3
Features
--------
Calculate intrinsic rate of amide hydrogen exchange in proteins.
Installation
------------
::
$ pip install hdxrate
or
::
$ conda install -c conda-forge hdxrate
Usage
-----
::
>>> from hdxrate import k_int_from_sequence
>>> k_int_from_sequence('HHHHH', 300, 7.)
array([0.00000000e+00, 2.62430718e+03, 6.29527446e+01, 6.29527446e+01,
9.97734191e-01])
Credits
-------
HDXrate implementation is based on PSX intrinsic rate calculation
https://github.com/Niels-Bohr-Institute-XNS-StructBiophys/PSX
Pedersen, M. C. et al. PSX, Protein–Solvent Exchange: software for calculation of deuterium-exchange effects in small-angle neutron scattering measurements from protein coordinates. `J Appl Cryst <https://doi.org/10.1107/S1600576719012469/>`__ 52, 1427–1436 (2019).
Maintenance
```````````
* Jochem Smit <[email protected]> / <[email protected]>
| PypiClean |
/Artist-Engineering_Geek-0.1.0.tar.gz/Artist-Engineering_Geek-0.1.0/src/Artist/models/progressive_gan.py | from math import sqrt
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn import init
def init_linear(linear):
init.xavier_normal(linear.weight)
linear.bias.data.zero_()
def init_conv(conv, glu=True):
init.kaiming_normal(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u.cuda()
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
class SpectralNormConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
init.kaiming_normal(conv.weight)
conv.bias.data.zero_()
self.conv = spectral_norm(conv)
def forward(self, input):
return self.conv(input)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, _input):
return self.conv(_input)
class ConvBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding, kernel_size2=None, padding2=None, pixel_norm=True,
spectral_norm=False):
super().__init__()
pad1 = padding
pad2 = padding
pad2 = padding2 if padding2 is not None else pad2
kernel1 = kernel_size
kernel2 = kernel_size
if kernel_size2 is not None:
kernel2 = kernel_size2
if spectral_norm:
self.conv = nn.Sequential(
SpectralNormConv2d(in_channel, out_channel, kernel1, padding=pad1),
nn.LeakyReLU(0.2),
SpectralNormConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.LeakyReLU(0.2)
)
else:
if pixel_norm:
self.conv = nn.Sequential(
EqualConv2d(in_channel, out_channel, kernel1, padding=pad1),
PixelNorm(),
nn.LeakyReLU(0.2),
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
PixelNorm(),
nn.LeakyReLU(0.2)
)
else:
self.conv = nn.Sequential(
EqualConv2d(in_channel, out_channel, kernel1, padding=pad1),
nn.LeakyReLU(0.2),
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.LeakyReLU(0.2)
)
def forward(self, input):
out = self.conv(input)
return out
class Generator(nn.Module):
def __init__(self, code_dim=512 - 10, n_label=10):
super().__init__()
self.label_embed = nn.Embedding(n_label, n_label)
self.code_norm = PixelNorm()
self.label_embed.weight.data.normal_()
self.progression = nn.ModuleList([
ConvBlock(512, 512, 4, 3, 3, 1),
ConvBlock(512, 512, 3, 1),
ConvBlock(512, 512, 3, 1),
ConvBlock(512, 512, 3, 1),
ConvBlock(512, 256, 3, 1),
ConvBlock(256, 128, 3, 1)
])
self.to_rgb = nn.ModuleList([
nn.Conv2d(512, 3, 1),
nn.Conv2d(512, 3, 1),
nn.Conv2d(512, 3, 1),
nn.Conv2d(512, 3, 1),
nn.Conv2d(256, 3, 1),
nn.Conv2d(128, 3, 1)
])
def forward(self, _input, label, step=0, alpha=-1):
_input = self.code_norm(_input)
label = self.label_embed(label)
out = torch.cat([_input, label], 1).unsqueeze(2).unsqueeze(3)
for i, (conv, to_rgb) in enumerate(zip(self.progression, self.to_rgb)):
if i > 0 and step > 0:
upsample = F.interpolate(out, scale_factor=2)
out = conv(upsample)
else:
out = conv(out)
if i == step:
out = to_rgb(out)
if i > 0 and 0 <= alpha < 1:
skip_rgb = self.to_rgb[i - 1](upsample)
out = (1 - alpha) * skip_rgb + alpha * out
break
return out
class Discriminator(nn.Module):
def __init__(self, n_label=10):
super().__init__()
self.progression = nn.ModuleList([
ConvBlock(128, 256, 3, 1, pixel_norm=False, spectral_norm=False),
ConvBlock(256, 512, 3, 1, pixel_norm=False, spectral_norm=False),
ConvBlock(512, 512, 3, 1, pixel_norm=False, spectral_norm=False),
ConvBlock(512, 512, 3, 1, pixel_norm=False, spectral_norm=False),
ConvBlock(512, 512, 3, 1, pixel_norm=False, spectral_norm=False),
ConvBlock(513, 512, 3, 1, 4, 0, pixel_norm=False, spectral_norm=False)]
)
self.from_rgb = nn.ModuleList([
nn.Conv2d(3, 128, 1),
nn.Conv2d(3, 256, 1),
nn.Conv2d(3, 512, 1),
nn.Conv2d(3, 512, 1),
nn.Conv2d(3, 512, 1),
nn.Conv2d(3, 512, 1)]
)
self.n_layer = len(self.progression)
self.linear = nn.Linear(512, 1 + n_label)
def forward(self, _input, step=0, alpha=-1):
for i in range(step, -1, -1):
index = self.n_layer - i - 1
if i == step:
out = self.from_rgb[index](_input)
if i == 0:
mean_std = _input.std(0).mean()
mean_std = mean_std.expand(_input.size(0), 1, 4, 4)
out = torch.cat([out, mean_std], 1)
out = self.progression[index](out)
if i > 0:
out = F.avg_pool2d(out, 2)
if i == step and 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(_input, 2)
skip_rgb = self.from_rgb[index + 1](skip_rgb)
out = (1 - alpha) * skip_rgb + alpha * out
out = out.squeeze(2).squeeze(2)
# print(input.size(), out.size(), step)
out = self.linear(out)
return out[:, 0], out[:, 1:] | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/@types/node/fs.d.ts | declare module 'fs' {
import * as stream from 'node:stream';
import { Abortable, EventEmitter } from 'node:events';
import { URL } from 'node:url';
import * as promises from 'node:fs/promises';
export { promises };
/**
* Valid types for path values in "fs".
*/
export type PathLike = string | Buffer | URL;
export type PathOrFileDescriptor = PathLike | number;
export type TimeLike = string | number | Date;
export type NoParamCallback = (err: NodeJS.ErrnoException | null) => void;
export type BufferEncodingOption =
| 'buffer'
| {
encoding: 'buffer';
};
export interface ObjectEncodingOptions {
encoding?: BufferEncoding | null | undefined;
}
export type EncodingOption = ObjectEncodingOptions | BufferEncoding | undefined | null;
export type OpenMode = number | string;
export type Mode = number | string;
export interface StatsBase<T> {
isFile(): boolean;
isDirectory(): boolean;
isBlockDevice(): boolean;
isCharacterDevice(): boolean;
isSymbolicLink(): boolean;
isFIFO(): boolean;
isSocket(): boolean;
dev: T;
ino: T;
mode: T;
nlink: T;
uid: T;
gid: T;
rdev: T;
size: T;
blksize: T;
blocks: T;
atimeMs: T;
mtimeMs: T;
ctimeMs: T;
birthtimeMs: T;
atime: Date;
mtime: Date;
ctime: Date;
birthtime: Date;
}
export interface Stats extends StatsBase<number> {}
/**
* A `fs.Stats` object provides information about a file.
*
* Objects returned from {@link stat}, {@link lstat} and {@link fstat} and
* their synchronous counterparts are of this type.
* If `bigint` in the `options` passed to those methods is true, the numeric values
* will be `bigint` instead of `number`, and the object will contain additional
* nanosecond-precision properties suffixed with `Ns`.
*
* ```console
* Stats {
* dev: 2114,
* ino: 48064969,
* mode: 33188,
* nlink: 1,
* uid: 85,
* gid: 100,
* rdev: 0,
* size: 527,
* blksize: 4096,
* blocks: 8,
* atimeMs: 1318289051000.1,
* mtimeMs: 1318289051000.1,
* ctimeMs: 1318289051000.1,
* birthtimeMs: 1318289051000.1,
* atime: Mon, 10 Oct 2011 23:24:11 GMT,
* mtime: Mon, 10 Oct 2011 23:24:11 GMT,
* ctime: Mon, 10 Oct 2011 23:24:11 GMT,
* birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
* ```
*
* `bigint` version:
*
* ```console
* BigIntStats {
* dev: 2114n,
* ino: 48064969n,
* mode: 33188n,
* nlink: 1n,
* uid: 85n,
* gid: 100n,
* rdev: 0n,
* size: 527n,
* blksize: 4096n,
* blocks: 8n,
* atimeMs: 1318289051000n,
* mtimeMs: 1318289051000n,
* ctimeMs: 1318289051000n,
* birthtimeMs: 1318289051000n,
* atimeNs: 1318289051000000000n,
* mtimeNs: 1318289051000000000n,
* ctimeNs: 1318289051000000000n,
* birthtimeNs: 1318289051000000000n,
* atime: Mon, 10 Oct 2011 23:24:11 GMT,
* mtime: Mon, 10 Oct 2011 23:24:11 GMT,
* ctime: Mon, 10 Oct 2011 23:24:11 GMT,
* birthtime: Mon, 10 Oct 2011 23:24:11 GMT }
* ```
* @since v0.1.21
*/
export class Stats {}
export interface StatsFsBase<T> {
/** Type of file system. */
type: T;
/** Optimal transfer block size. */
bsize: T;
/** Total data blocks in file system. */
blocks: T;
/** Free blocks in file system. */
bfree: T;
/** Available blocks for unprivileged users */
bavail: T;
/** Total file nodes in file system. */
files: T;
/** Free file nodes in file system. */
ffree: T;
}
export interface StatsFs extends StatsFsBase<number> {}
/**
* Provides information about a mounted file system
*
* Objects returned from {@link statfs} and {@link statfsSync} are of this type.
* If `bigint` in the `options` passed to those methods is true, the numeric values
* will be `bigint` instead of `number`.
* @since v18.15.0
*/
export class StatsFs {}
export interface BigIntStatsFs extends StatsFsBase<bigint> {}
export interface StatFsOptions {
bigint?: boolean | undefined;
}
/**
* A representation of a directory entry, which can be a file or a subdirectory
* within the directory, as returned by reading from an `fs.Dir`. The
* directory entry is a combination of the file name and file type pairs.
*
* Additionally, when {@link readdir} or {@link readdirSync} is called with
* the `withFileTypes` option set to `true`, the resulting array is filled with `fs.Dirent` objects, rather than strings or `Buffer` s.
* @since v10.10.0
*/
export class Dirent {
/**
* Returns `true` if the `fs.Dirent` object describes a regular file.
* @since v10.10.0
*/
isFile(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a file system
* directory.
* @since v10.10.0
*/
isDirectory(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a block device.
* @since v10.10.0
*/
isBlockDevice(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a character device.
* @since v10.10.0
*/
isCharacterDevice(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a symbolic link.
* @since v10.10.0
*/
isSymbolicLink(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a first-in-first-out
* (FIFO) pipe.
* @since v10.10.0
*/
isFIFO(): boolean;
/**
* Returns `true` if the `fs.Dirent` object describes a socket.
* @since v10.10.0
*/
isSocket(): boolean;
/**
* The file name that this `fs.Dirent` object refers to. The type of this
* value is determined by the `options.encoding` passed to {@link readdir} or {@link readdirSync}.
* @since v10.10.0
*/
name: string;
}
/**
* A class representing a directory stream.
*
* Created by {@link opendir}, {@link opendirSync}, or `fsPromises.opendir()`.
*
* ```js
* import { opendir } from 'fs/promises';
*
* try {
* const dir = await opendir('./');
* for await (const dirent of dir)
* console.log(dirent.name);
* } catch (err) {
* console.error(err);
* }
* ```
*
* When using the async iterator, the `fs.Dir` object will be automatically
* closed after the iterator exits.
* @since v12.12.0
*/
export class Dir implements AsyncIterable<Dirent> {
/**
* The read-only path of this directory as was provided to {@link opendir},{@link opendirSync}, or `fsPromises.opendir()`.
* @since v12.12.0
*/
readonly path: string;
/**
* Asynchronously iterates over the directory via `readdir(3)` until all entries have been read.
*/
[Symbol.asyncIterator](): AsyncIterableIterator<Dirent>;
/**
* Asynchronously close the directory's underlying resource handle.
* Subsequent reads will result in errors.
*
* A promise is returned that will be resolved after the resource has been
* closed.
* @since v12.12.0
*/
close(): Promise<void>;
close(cb: NoParamCallback): void;
/**
* Synchronously close the directory's underlying resource handle.
* Subsequent reads will result in errors.
* @since v12.12.0
*/
closeSync(): void;
/**
* Asynchronously read the next directory entry via [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) as an `fs.Dirent`.
*
* A promise is returned that will be resolved with an `fs.Dirent`, or `null`if there are no more directory entries to read.
*
* Directory entries returned by this function are in no particular order as
* provided by the operating system's underlying directory mechanisms.
* Entries added or removed while iterating over the directory might not be
* included in the iteration results.
* @since v12.12.0
* @return containing {fs.Dirent|null}
*/
read(): Promise<Dirent | null>;
read(cb: (err: NodeJS.ErrnoException | null, dirEnt: Dirent | null) => void): void;
/**
* Synchronously read the next directory entry as an `fs.Dirent`. See the
* POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more detail.
*
* If there are no more directory entries to read, `null` will be returned.
*
* Directory entries returned by this function are in no particular order as
* provided by the operating system's underlying directory mechanisms.
* Entries added or removed while iterating over the directory might not be
* included in the iteration results.
* @since v12.12.0
*/
readSync(): Dirent | null;
}
/**
* Class: fs.StatWatcher
* @since v14.3.0, v12.20.0
* Extends `EventEmitter`
* A successful call to {@link watchFile} method will return a new fs.StatWatcher object.
*/
export interface StatWatcher extends EventEmitter {
/**
* When called, requests that the Node.js event loop _not_ exit so long as the `fs.StatWatcher` is active. Calling `watcher.ref()` multiple times will have
* no effect.
*
* By default, all `fs.StatWatcher` objects are "ref'ed", making it normally
* unnecessary to call `watcher.ref()` unless `watcher.unref()` had been
* called previously.
* @since v14.3.0, v12.20.0
*/
ref(): this;
/**
* When called, the active `fs.StatWatcher` object will not require the Node.js
* event loop to remain active. If there is no other activity keeping the
* event loop running, the process may exit before the `fs.StatWatcher` object's
* callback is invoked. Calling `watcher.unref()` multiple times will have
* no effect.
* @since v14.3.0, v12.20.0
*/
unref(): this;
}
export interface FSWatcher extends EventEmitter {
/**
* Stop watching for changes on the given `fs.FSWatcher`. Once stopped, the `fs.FSWatcher` object is no longer usable.
* @since v0.5.8
*/
close(): void;
/**
* events.EventEmitter
* 1. change
* 2. error
*/
addListener(event: string, listener: (...args: any[]) => void): this;
addListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
addListener(event: 'error', listener: (error: Error) => void): this;
addListener(event: 'close', listener: () => void): this;
on(event: string, listener: (...args: any[]) => void): this;
on(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
on(event: 'error', listener: (error: Error) => void): this;
on(event: 'close', listener: () => void): this;
once(event: string, listener: (...args: any[]) => void): this;
once(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
once(event: 'error', listener: (error: Error) => void): this;
once(event: 'close', listener: () => void): this;
prependListener(event: string, listener: (...args: any[]) => void): this;
prependListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
prependListener(event: 'error', listener: (error: Error) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependOnceListener(event: string, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'change', listener: (eventType: string, filename: string | Buffer) => void): this;
prependOnceListener(event: 'error', listener: (error: Error) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
}
/**
* Instances of `fs.ReadStream` are created and returned using the {@link createReadStream} function.
* @since v0.1.93
*/
export class ReadStream extends stream.Readable {
close(callback?: (err?: NodeJS.ErrnoException | null) => void): void;
/**
* The number of bytes that have been read so far.
* @since v6.4.0
*/
bytesRead: number;
/**
* The path to the file the stream is reading from as specified in the first
* argument to `fs.createReadStream()`. If `path` is passed as a string, then`readStream.path` will be a string. If `path` is passed as a `Buffer`, then`readStream.path` will be a
* `Buffer`. If `fd` is specified, then`readStream.path` will be `undefined`.
* @since v0.1.93
*/
path: string | Buffer;
/**
* This property is `true` if the underlying file has not been opened yet,
* i.e. before the `'ready'` event is emitted.
* @since v11.2.0, v10.16.0
*/
pending: boolean;
/**
* events.EventEmitter
* 1. open
* 2. close
* 3. ready
*/
addListener(event: 'close', listener: () => void): this;
addListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
addListener(event: 'end', listener: () => void): this;
addListener(event: 'error', listener: (err: Error) => void): this;
addListener(event: 'open', listener: (fd: number) => void): this;
addListener(event: 'pause', listener: () => void): this;
addListener(event: 'readable', listener: () => void): this;
addListener(event: 'ready', listener: () => void): this;
addListener(event: 'resume', listener: () => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
on(event: 'close', listener: () => void): this;
on(event: 'data', listener: (chunk: Buffer | string) => void): this;
on(event: 'end', listener: () => void): this;
on(event: 'error', listener: (err: Error) => void): this;
on(event: 'open', listener: (fd: number) => void): this;
on(event: 'pause', listener: () => void): this;
on(event: 'readable', listener: () => void): this;
on(event: 'ready', listener: () => void): this;
on(event: 'resume', listener: () => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: 'close', listener: () => void): this;
once(event: 'data', listener: (chunk: Buffer | string) => void): this;
once(event: 'end', listener: () => void): this;
once(event: 'error', listener: (err: Error) => void): this;
once(event: 'open', listener: (fd: number) => void): this;
once(event: 'pause', listener: () => void): this;
once(event: 'readable', listener: () => void): this;
once(event: 'ready', listener: () => void): this;
once(event: 'resume', listener: () => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
prependListener(event: 'end', listener: () => void): this;
prependListener(event: 'error', listener: (err: Error) => void): this;
prependListener(event: 'open', listener: (fd: number) => void): this;
prependListener(event: 'pause', listener: () => void): this;
prependListener(event: 'readable', listener: () => void): this;
prependListener(event: 'ready', listener: () => void): this;
prependListener(event: 'resume', listener: () => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
prependOnceListener(event: 'data', listener: (chunk: Buffer | string) => void): this;
prependOnceListener(event: 'end', listener: () => void): this;
prependOnceListener(event: 'error', listener: (err: Error) => void): this;
prependOnceListener(event: 'open', listener: (fd: number) => void): this;
prependOnceListener(event: 'pause', listener: () => void): this;
prependOnceListener(event: 'readable', listener: () => void): this;
prependOnceListener(event: 'ready', listener: () => void): this;
prependOnceListener(event: 'resume', listener: () => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
}
/**
* * Extends `stream.Writable`
*
* Instances of `fs.WriteStream` are created and returned using the {@link createWriteStream} function.
* @since v0.1.93
*/
export class WriteStream extends stream.Writable {
/**
* Closes `writeStream`. Optionally accepts a
* callback that will be executed once the `writeStream`is closed.
* @since v0.9.4
*/
close(callback?: (err?: NodeJS.ErrnoException | null) => void): void;
/**
* The number of bytes written so far. Does not include data that is still queued
* for writing.
* @since v0.4.7
*/
bytesWritten: number;
/**
* The path to the file the stream is writing to as specified in the first
* argument to {@link createWriteStream}. If `path` is passed as a string, then`writeStream.path` will be a string. If `path` is passed as a `Buffer`, then`writeStream.path` will be a
* `Buffer`.
* @since v0.1.93
*/
path: string | Buffer;
/**
* This property is `true` if the underlying file has not been opened yet,
* i.e. before the `'ready'` event is emitted.
* @since v11.2.0
*/
pending: boolean;
/**
* events.EventEmitter
* 1. open
* 2. close
* 3. ready
*/
addListener(event: 'close', listener: () => void): this;
addListener(event: 'drain', listener: () => void): this;
addListener(event: 'error', listener: (err: Error) => void): this;
addListener(event: 'finish', listener: () => void): this;
addListener(event: 'open', listener: (fd: number) => void): this;
addListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
addListener(event: 'ready', listener: () => void): this;
addListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
on(event: 'close', listener: () => void): this;
on(event: 'drain', listener: () => void): this;
on(event: 'error', listener: (err: Error) => void): this;
on(event: 'finish', listener: () => void): this;
on(event: 'open', listener: (fd: number) => void): this;
on(event: 'pipe', listener: (src: stream.Readable) => void): this;
on(event: 'ready', listener: () => void): this;
on(event: 'unpipe', listener: (src: stream.Readable) => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: 'close', listener: () => void): this;
once(event: 'drain', listener: () => void): this;
once(event: 'error', listener: (err: Error) => void): this;
once(event: 'finish', listener: () => void): this;
once(event: 'open', listener: (fd: number) => void): this;
once(event: 'pipe', listener: (src: stream.Readable) => void): this;
once(event: 'ready', listener: () => void): this;
once(event: 'unpipe', listener: (src: stream.Readable) => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: 'close', listener: () => void): this;
prependListener(event: 'drain', listener: () => void): this;
prependListener(event: 'error', listener: (err: Error) => void): this;
prependListener(event: 'finish', listener: () => void): this;
prependListener(event: 'open', listener: (fd: number) => void): this;
prependListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
prependListener(event: 'ready', listener: () => void): this;
prependListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: 'close', listener: () => void): this;
prependOnceListener(event: 'drain', listener: () => void): this;
prependOnceListener(event: 'error', listener: (err: Error) => void): this;
prependOnceListener(event: 'finish', listener: () => void): this;
prependOnceListener(event: 'open', listener: (fd: number) => void): this;
prependOnceListener(event: 'pipe', listener: (src: stream.Readable) => void): this;
prependOnceListener(event: 'ready', listener: () => void): this;
prependOnceListener(event: 'unpipe', listener: (src: stream.Readable) => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
}
/**
* Asynchronously rename file at `oldPath` to the pathname provided
* as `newPath`. In the case that `newPath` already exists, it will
* be overwritten. If there is a directory at `newPath`, an error will
* be raised instead. No arguments other than a possible exception are
* given to the completion callback.
*
* See also: [`rename(2)`](http://man7.org/linux/man-pages/man2/rename.2.html).
*
* ```js
* import { rename } from 'fs';
*
* rename('oldFile.txt', 'newFile.txt', (err) => {
* if (err) throw err;
* console.log('Rename complete!');
* });
* ```
* @since v0.0.2
*/
export function rename(oldPath: PathLike, newPath: PathLike, callback: NoParamCallback): void;
export namespace rename {
/**
* Asynchronous rename(2) - Change the name or location of a file or directory.
* @param oldPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* @param newPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(oldPath: PathLike, newPath: PathLike): Promise<void>;
}
/**
* Renames the file from `oldPath` to `newPath`. Returns `undefined`.
*
* See the POSIX [`rename(2)`](http://man7.org/linux/man-pages/man2/rename.2.html) documentation for more details.
* @since v0.1.21
*/
export function renameSync(oldPath: PathLike, newPath: PathLike): void;
/**
* Truncates the file. No arguments other than a possible exception are
* given to the completion callback. A file descriptor can also be passed as the
* first argument. In this case, `fs.ftruncate()` is called.
*
* ```js
* import { truncate } from 'fs';
* // Assuming that 'path/file.txt' is a regular file.
* truncate('path/file.txt', (err) => {
* if (err) throw err;
* console.log('path/file.txt was truncated');
* });
* ```
*
* Passing a file descriptor is deprecated and may result in an error being thrown
* in the future.
*
* See the POSIX [`truncate(2)`](http://man7.org/linux/man-pages/man2/truncate.2.html) documentation for more details.
* @since v0.8.6
* @param [len=0]
*/
export function truncate(path: PathLike, len: number | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous truncate(2) - Truncate a file to a specified length.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function truncate(path: PathLike, callback: NoParamCallback): void;
export namespace truncate {
/**
* Asynchronous truncate(2) - Truncate a file to a specified length.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param len If not specified, defaults to `0`.
*/
function __promisify__(path: PathLike, len?: number | null): Promise<void>;
}
/**
* Truncates the file. Returns `undefined`. A file descriptor can also be
* passed as the first argument. In this case, `fs.ftruncateSync()` is called.
*
* Passing a file descriptor is deprecated and may result in an error being thrown
* in the future.
* @since v0.8.6
* @param [len=0]
*/
export function truncateSync(path: PathLike, len?: number | null): void;
/**
* Truncates the file descriptor. No arguments other than a possible exception are
* given to the completion callback.
*
* See the POSIX [`ftruncate(2)`](http://man7.org/linux/man-pages/man2/ftruncate.2.html) documentation for more detail.
*
* If the file referred to by the file descriptor was larger than `len` bytes, only
* the first `len` bytes will be retained in the file.
*
* For example, the following program retains only the first four bytes of the
* file:
*
* ```js
* import { open, close, ftruncate } from 'fs';
*
* function closeFd(fd) {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
*
* open('temp.txt', 'r+', (err, fd) => {
* if (err) throw err;
*
* try {
* ftruncate(fd, 4, (err) => {
* closeFd(fd);
* if (err) throw err;
* });
* } catch (err) {
* closeFd(fd);
* if (err) throw err;
* }
* });
* ```
*
* If the file previously was shorter than `len` bytes, it is extended, and the
* extended part is filled with null bytes (`'\0'`):
*
* If `len` is negative then `0` will be used.
* @since v0.8.6
* @param [len=0]
*/
export function ftruncate(fd: number, len: number | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous ftruncate(2) - Truncate a file to a specified length.
* @param fd A file descriptor.
*/
export function ftruncate(fd: number, callback: NoParamCallback): void;
export namespace ftruncate {
/**
* Asynchronous ftruncate(2) - Truncate a file to a specified length.
* @param fd A file descriptor.
* @param len If not specified, defaults to `0`.
*/
function __promisify__(fd: number, len?: number | null): Promise<void>;
}
/**
* Truncates the file descriptor. Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link ftruncate}.
* @since v0.8.6
* @param [len=0]
*/
export function ftruncateSync(fd: number, len?: number | null): void;
/**
* Asynchronously changes owner and group of a file. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`chown(2)`](http://man7.org/linux/man-pages/man2/chown.2.html) documentation for more detail.
* @since v0.1.97
*/
export function chown(path: PathLike, uid: number, gid: number, callback: NoParamCallback): void;
export namespace chown {
/**
* Asynchronous chown(2) - Change ownership of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, uid: number, gid: number): Promise<void>;
}
/**
* Synchronously changes owner and group of a file. Returns `undefined`.
* This is the synchronous version of {@link chown}.
*
* See the POSIX [`chown(2)`](http://man7.org/linux/man-pages/man2/chown.2.html) documentation for more detail.
* @since v0.1.97
*/
export function chownSync(path: PathLike, uid: number, gid: number): void;
/**
* Sets the owner of the file. No arguments other than a possible exception are
* given to the completion callback.
*
* See the POSIX [`fchown(2)`](http://man7.org/linux/man-pages/man2/fchown.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchown(fd: number, uid: number, gid: number, callback: NoParamCallback): void;
export namespace fchown {
/**
* Asynchronous fchown(2) - Change ownership of a file.
* @param fd A file descriptor.
*/
function __promisify__(fd: number, uid: number, gid: number): Promise<void>;
}
/**
* Sets the owner of the file. Returns `undefined`.
*
* See the POSIX [`fchown(2)`](http://man7.org/linux/man-pages/man2/fchown.2.html) documentation for more detail.
* @since v0.4.7
* @param uid The file's new owner's user id.
* @param gid The file's new group's group id.
*/
export function fchownSync(fd: number, uid: number, gid: number): void;
/**
* Set the owner of the symbolic link. No arguments other than a possible
* exception are given to the completion callback.
*
* See the POSIX [`lchown(2)`](http://man7.org/linux/man-pages/man2/lchown.2.html) documentation for more detail.
*/
export function lchown(path: PathLike, uid: number, gid: number, callback: NoParamCallback): void;
export namespace lchown {
/**
* Asynchronous lchown(2) - Change ownership of a file. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, uid: number, gid: number): Promise<void>;
}
/**
* Set the owner for the path. Returns `undefined`.
*
* See the POSIX [`lchown(2)`](http://man7.org/linux/man-pages/man2/lchown.2.html) documentation for more details.
* @param uid The file's new owner's user id.
* @param gid The file's new group's group id.
*/
export function lchownSync(path: PathLike, uid: number, gid: number): void;
/**
* Changes the access and modification times of a file in the same way as {@link utimes}, with the difference that if the path refers to a symbolic
* link, then the link is not dereferenced: instead, the timestamps of the
* symbolic link itself are changed.
*
* No arguments other than a possible exception are given to the completion
* callback.
* @since v14.5.0, v12.19.0
*/
export function lutimes(path: PathLike, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace lutimes {
/**
* Changes the access and modification times of a file in the same way as `fsPromises.utimes()`,
* with the difference that if the path refers to a symbolic link, then the link is not
* dereferenced: instead, the timestamps of the symbolic link itself are changed.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(path: PathLike, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Change the file system timestamps of the symbolic link referenced by `path`.
* Returns `undefined`, or throws an exception when parameters are incorrect or
* the operation fails. This is the synchronous version of {@link lutimes}.
* @since v14.5.0, v12.19.0
*/
export function lutimesSync(path: PathLike, atime: TimeLike, mtime: TimeLike): void;
/**
* Asynchronously changes the permissions of a file. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`chmod(2)`](http://man7.org/linux/man-pages/man2/chmod.2.html) documentation for more detail.
*
* ```js
* import { chmod } from 'fs';
*
* chmod('my_file.txt', 0o775, (err) => {
* if (err) throw err;
* console.log('The permissions for file "my_file.txt" have been changed!');
* });
* ```
* @since v0.1.30
*/
export function chmod(path: PathLike, mode: Mode, callback: NoParamCallback): void;
export namespace chmod {
/**
* Asynchronous chmod(2) - Change permissions of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(path: PathLike, mode: Mode): Promise<void>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link chmod}.
*
* See the POSIX [`chmod(2)`](http://man7.org/linux/man-pages/man2/chmod.2.html) documentation for more detail.
* @since v0.6.7
*/
export function chmodSync(path: PathLike, mode: Mode): void;
/**
* Sets the permissions on the file. No arguments other than a possible exception
* are given to the completion callback.
*
* See the POSIX [`fchmod(2)`](http://man7.org/linux/man-pages/man2/fchmod.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchmod(fd: number, mode: Mode, callback: NoParamCallback): void;
export namespace fchmod {
/**
* Asynchronous fchmod(2) - Change permissions of a file.
* @param fd A file descriptor.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(fd: number, mode: Mode): Promise<void>;
}
/**
* Sets the permissions on the file. Returns `undefined`.
*
* See the POSIX [`fchmod(2)`](http://man7.org/linux/man-pages/man2/fchmod.2.html) documentation for more detail.
* @since v0.4.7
*/
export function fchmodSync(fd: number, mode: Mode): void;
/**
* Changes the permissions on a symbolic link. No arguments other than a possible
* exception are given to the completion callback.
*
* This method is only implemented on macOS.
*
* See the POSIX [`lchmod(2)`](https://www.freebsd.org/cgi/man.cgi?query=lchmod&sektion=2) documentation for more detail.
* @deprecated Since v0.4.7
*/
export function lchmod(path: PathLike, mode: Mode, callback: NoParamCallback): void;
/** @deprecated */
export namespace lchmod {
/**
* Asynchronous lchmod(2) - Change permissions of a file. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer.
*/
function __promisify__(path: PathLike, mode: Mode): Promise<void>;
}
/**
* Changes the permissions on a symbolic link. Returns `undefined`.
*
* This method is only implemented on macOS.
*
* See the POSIX [`lchmod(2)`](https://www.freebsd.org/cgi/man.cgi?query=lchmod&sektion=2) documentation for more detail.
* @deprecated Since v0.4.7
*/
export function lchmodSync(path: PathLike, mode: Mode): void;
/**
* Asynchronous [`stat(2)`](http://man7.org/linux/man-pages/man2/stat.2.html). The callback gets two arguments `(err, stats)` where`stats` is an `fs.Stats` object.
*
* In case of an error, the `err.code` will be one of `Common System Errors`.
*
* Using `fs.stat()` to check for the existence of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()` is not recommended.
* Instead, user code should open/read/write the file directly and handle the
* error raised if the file is not available.
*
* To check if a file exists without manipulating it afterwards, {@link access} is recommended.
*
* For example, given the following directory structure:
*
* ```text
* - txtDir
* -- file.txt
* - app.js
* ```
*
* The next program will check for the stats of the given paths:
*
* ```js
* import { stat } from 'fs';
*
* const pathsToCheck = ['./txtDir', './txtDir/file.txt'];
*
* for (let i = 0; i < pathsToCheck.length; i++) {
* stat(pathsToCheck[i], (err, stats) => {
* console.log(stats.isDirectory());
* console.log(stats);
* });
* }
* ```
*
* The resulting output will resemble:
*
* ```console
* true
* Stats {
* dev: 16777220,
* mode: 16877,
* nlink: 3,
* uid: 501,
* gid: 20,
* rdev: 0,
* blksize: 4096,
* ino: 14214262,
* size: 96,
* blocks: 0,
* atimeMs: 1561174653071.963,
* mtimeMs: 1561174614583.3518,
* ctimeMs: 1561174626623.5366,
* birthtimeMs: 1561174126937.2893,
* atime: 2019-06-22T03:37:33.072Z,
* mtime: 2019-06-22T03:36:54.583Z,
* ctime: 2019-06-22T03:37:06.624Z,
* birthtime: 2019-06-22T03:28:46.937Z
* }
* false
* Stats {
* dev: 16777220,
* mode: 33188,
* nlink: 1,
* uid: 501,
* gid: 20,
* rdev: 0,
* blksize: 4096,
* ino: 14214074,
* size: 8,
* blocks: 8,
* atimeMs: 1561174616618.8555,
* mtimeMs: 1561174614584,
* ctimeMs: 1561174614583.8145,
* birthtimeMs: 1561174007710.7478,
* atime: 2019-06-22T03:36:56.619Z,
* mtime: 2019-06-22T03:36:54.584Z,
* ctime: 2019-06-22T03:36:54.584Z,
* birthtime: 2019-06-22T03:26:47.711Z
* }
* ```
* @since v0.0.2
*/
export function stat(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function stat(
path: PathLike,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function stat(
path: PathLike,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function stat(path: PathLike, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace stat {
/**
* Asynchronous stat(2) - Get file status.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(
path: PathLike,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
path: PathLike,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(path: PathLike, options?: StatOptions): Promise<Stats | BigIntStats>;
}
export interface StatSyncFn extends Function {
(path: PathLike, options?: undefined): Stats;
(
path: PathLike,
options?: StatSyncOptions & {
bigint?: false | undefined;
throwIfNoEntry: false;
}
): Stats | undefined;
(
path: PathLike,
options: StatSyncOptions & {
bigint: true;
throwIfNoEntry: false;
}
): BigIntStats | undefined;
(
path: PathLike,
options?: StatSyncOptions & {
bigint?: false | undefined;
}
): Stats;
(
path: PathLike,
options: StatSyncOptions & {
bigint: true;
}
): BigIntStats;
(
path: PathLike,
options: StatSyncOptions & {
bigint: boolean;
throwIfNoEntry?: false | undefined;
}
): Stats | BigIntStats;
(path: PathLike, options?: StatSyncOptions): Stats | BigIntStats | undefined;
}
/**
* Synchronous stat(2) - Get file status.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export const statSync: StatSyncFn;
/**
* Invokes the callback with the `fs.Stats` for the file descriptor.
*
* See the POSIX [`fstat(2)`](http://man7.org/linux/man-pages/man2/fstat.2.html) documentation for more detail.
* @since v0.1.95
*/
export function fstat(fd: number, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function fstat(
fd: number,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function fstat(
fd: number,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function fstat(fd: number, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace fstat {
/**
* Asynchronous fstat(2) - Get file status.
* @param fd A file descriptor.
*/
function __promisify__(
fd: number,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
fd: number,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(fd: number, options?: StatOptions): Promise<Stats | BigIntStats>;
}
/**
* Retrieves the `fs.Stats` for the file descriptor.
*
* See the POSIX [`fstat(2)`](http://man7.org/linux/man-pages/man2/fstat.2.html) documentation for more detail.
* @since v0.1.95
*/
export function fstatSync(
fd: number,
options?: StatOptions & {
bigint?: false | undefined;
}
): Stats;
export function fstatSync(
fd: number,
options: StatOptions & {
bigint: true;
}
): BigIntStats;
export function fstatSync(fd: number, options?: StatOptions): Stats | BigIntStats;
/**
* Retrieves the `fs.Stats` for the symbolic link referred to by the path.
* The callback gets two arguments `(err, stats)` where `stats` is a `fs.Stats` object. `lstat()` is identical to `stat()`, except that if `path` is a symbolic
* link, then the link itself is stat-ed, not the file that it refers to.
*
* See the POSIX [`lstat(2)`](http://man7.org/linux/man-pages/man2/lstat.2.html) documentation for more details.
* @since v0.1.30
*/
export function lstat(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void): void;
export function lstat(
path: PathLike,
options:
| (StatOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: Stats) => void
): void;
export function lstat(
path: PathLike,
options: StatOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStats) => void
): void;
export function lstat(path: PathLike, options: StatOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: Stats | BigIntStats) => void): void;
export namespace lstat {
/**
* Asynchronous lstat(2) - Get file status. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(
path: PathLike,
options?: StatOptions & {
bigint?: false | undefined;
}
): Promise<Stats>;
function __promisify__(
path: PathLike,
options: StatOptions & {
bigint: true;
}
): Promise<BigIntStats>;
function __promisify__(path: PathLike, options?: StatOptions): Promise<Stats | BigIntStats>;
}
/**
* Asynchronous statfs(2). Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* In case of an error, the err.code will be one of Common System Errors.
* @param path A path to an existing file or directory on the file system to be queried.
* @param callback
*/
export function statfs(path: PathLike, callback: (err: NodeJS.ErrnoException | null, stats: StatsFs) => void): void;
export function statfs(
path: PathLike,
options:
| (StatFsOptions & {
bigint?: false | undefined;
})
| undefined,
callback: (err: NodeJS.ErrnoException | null, stats: StatsFs) => void
): void;
export function statfs(
path: PathLike,
options: StatFsOptions & {
bigint: true;
},
callback: (err: NodeJS.ErrnoException | null, stats: BigIntStatsFs) => void
): void;
export function statfs(path: PathLike, options: StatFsOptions | undefined, callback: (err: NodeJS.ErrnoException | null, stats: StatsFs | BigIntStatsFs) => void): void;
export namespace statfs {
/**
* Asynchronous statfs(2) - Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* @param path A path to an existing file or directory on the file system to be queried.
*/
function __promisify__(
path: PathLike,
options?: StatFsOptions & {
bigint?: false | undefined;
}
): Promise<StatsFs>;
function __promisify__(
path: PathLike,
options: StatFsOptions & {
bigint: true;
}
): Promise<BigIntStatsFs>;
function __promisify__(path: PathLike, options?: StatFsOptions): Promise<StatsFs | BigIntStatsFs>;
}
/**
* Synchronous statfs(2). Returns information about the mounted file system which contains path. The callback gets two arguments (err, stats) where stats is an <fs.StatFs> object.
* In case of an error, the err.code will be one of Common System Errors.
* @param path A path to an existing file or directory on the file system to be queried.
* @param callback
*/
export function statfsSync(
path: PathLike,
options?: StatFsOptions & {
bigint?: false | undefined;
}
): StatsFs;
export function statfsSync(
path: PathLike,
options: StatFsOptions & {
bigint: true;
}
): BigIntStatsFs;
export function statfsSync(path: PathLike, options?: StatFsOptions): StatsFs | BigIntStatsFs;
/**
* Synchronous lstat(2) - Get file status. Does not dereference symbolic links.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export const lstatSync: StatSyncFn;
/**
* Creates a new link from the `existingPath` to the `newPath`. See the POSIX [`link(2)`](http://man7.org/linux/man-pages/man2/link.2.html) documentation for more detail. No arguments other than
* a possible
* exception are given to the completion callback.
* @since v0.1.31
*/
export function link(existingPath: PathLike, newPath: PathLike, callback: NoParamCallback): void;
export namespace link {
/**
* Asynchronous link(2) - Create a new link (also known as a hard link) to an existing file.
* @param existingPath A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param newPath A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(existingPath: PathLike, newPath: PathLike): Promise<void>;
}
/**
* Creates a new link from the `existingPath` to the `newPath`. See the POSIX [`link(2)`](http://man7.org/linux/man-pages/man2/link.2.html) documentation for more detail. Returns `undefined`.
* @since v0.1.31
*/
export function linkSync(existingPath: PathLike, newPath: PathLike): void;
/**
* Creates the link called `path` pointing to `target`. No arguments other than a
* possible exception are given to the completion callback.
*
* See the POSIX [`symlink(2)`](http://man7.org/linux/man-pages/man2/symlink.2.html) documentation for more details.
*
* The `type` argument is only available on Windows and ignored on other platforms.
* It can be set to `'dir'`, `'file'`, or `'junction'`. If the `type` argument is
* not set, Node.js will autodetect `target` type and use `'file'` or `'dir'`. If
* the `target` does not exist, `'file'` will be used. Windows junction points
* require the destination path to be absolute. When using `'junction'`, the`target` argument will automatically be normalized to absolute path.
*
* Relative targets are relative to the link’s parent directory.
*
* ```js
* import { symlink } from 'fs';
*
* symlink('./mew', './mewtwo', callback);
* ```
*
* The above example creates a symbolic link `mewtwo` which points to `mew` in the
* same directory:
*
* ```bash
* $ tree .
* .
* ├── mew
* └── mewtwo -> ./mew
* ```
* @since v0.1.31
*/
export function symlink(target: PathLike, path: PathLike, type: symlink.Type | undefined | null, callback: NoParamCallback): void;
/**
* Asynchronous symlink(2) - Create a new symbolic link to an existing file.
* @param target A path to an existing file. If a URL is provided, it must use the `file:` protocol.
* @param path A path to the new symlink. If a URL is provided, it must use the `file:` protocol.
*/
export function symlink(target: PathLike, path: PathLike, callback: NoParamCallback): void;
export namespace symlink {
/**
* Asynchronous symlink(2) - Create a new symbolic link to an existing file.
* @param target A path to an existing file. If a URL is provided, it must use the `file:` protocol.
* @param path A path to the new symlink. If a URL is provided, it must use the `file:` protocol.
* @param type May be set to `'dir'`, `'file'`, or `'junction'` (default is `'file'`) and is only available on Windows (ignored on other platforms).
* When using `'junction'`, the `target` argument will automatically be normalized to an absolute path.
*/
function __promisify__(target: PathLike, path: PathLike, type?: string | null): Promise<void>;
type Type = 'dir' | 'file' | 'junction';
}
/**
* Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link symlink}.
* @since v0.1.31
*/
export function symlinkSync(target: PathLike, path: PathLike, type?: symlink.Type | null): void;
/**
* Reads the contents of the symbolic link referred to by `path`. The callback gets
* two arguments `(err, linkString)`.
*
* See the POSIX [`readlink(2)`](http://man7.org/linux/man-pages/man2/readlink.2.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the link path passed to the callback. If the `encoding` is set to `'buffer'`,
* the link path returned will be passed as a `Buffer` object.
* @since v0.1.31
*/
export function readlink(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: string) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlink(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: Buffer) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlink(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, linkString: string | Buffer) => void): void;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function readlink(path: PathLike, callback: (err: NodeJS.ErrnoException | null, linkString: string) => void): void;
export namespace readlink {
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string>;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string | Buffer>;
}
/**
* Returns the symbolic link's string value.
*
* See the POSIX [`readlink(2)`](http://man7.org/linux/man-pages/man2/readlink.2.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the link path returned. If the `encoding` is set to `'buffer'`,
* the link path returned will be passed as a `Buffer` object.
* @since v0.1.31
*/
export function readlinkSync(path: PathLike, options?: EncodingOption): string;
/**
* Synchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlinkSync(path: PathLike, options: BufferEncodingOption): Buffer;
/**
* Synchronous readlink(2) - read value of a symbolic link.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readlinkSync(path: PathLike, options?: EncodingOption): string | Buffer;
/**
* Asynchronously computes the canonical pathname by resolving `.`, `..` and
* symbolic links.
*
* A canonical pathname is not necessarily unique. Hard links and bind mounts can
* expose a file system entity through many pathnames.
*
* This function behaves like [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html), with some exceptions:
*
* 1. No case conversion is performed on case-insensitive file systems.
* 2. The maximum number of symbolic links is platform-independent and generally
* (much) higher than what the native [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html) implementation supports.
*
* The `callback` gets two arguments `(err, resolvedPath)`. May use `process.cwd`to resolve relative paths.
*
* Only paths that can be converted to UTF8 strings are supported.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the path passed to the callback. If the `encoding` is set to `'buffer'`,
* the path returned will be passed as a `Buffer` object.
*
* If `path` resolves to a socket or a pipe, the function will return a system
* dependent name for that object.
* @since v0.1.31
*/
export function realpath(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpath(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: Buffer) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpath(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string | Buffer) => void): void;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function realpath(path: PathLike, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
export namespace realpath {
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string>;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(path: PathLike, options?: EncodingOption): Promise<string | Buffer>;
/**
* Asynchronous [`realpath(3)`](http://man7.org/linux/man-pages/man3/realpath.3.html).
*
* The `callback` gets two arguments `(err, resolvedPath)`.
*
* Only paths that can be converted to UTF8 strings are supported.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the path passed to the callback. If the `encoding` is set to `'buffer'`,
* the path returned will be passed as a `Buffer` object.
*
* On Linux, when Node.js is linked against musl libc, the procfs file system must
* be mounted on `/proc` in order for this function to work. Glibc does not have
* this restriction.
* @since v9.2.0
*/
function native(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
function native(path: PathLike, options: BufferEncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: Buffer) => void): void;
function native(path: PathLike, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string | Buffer) => void): void;
function native(path: PathLike, callback: (err: NodeJS.ErrnoException | null, resolvedPath: string) => void): void;
}
/**
* Returns the resolved pathname.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link realpath}.
* @since v0.1.31
*/
export function realpathSync(path: PathLike, options?: EncodingOption): string;
/**
* Synchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpathSync(path: PathLike, options: BufferEncodingOption): Buffer;
/**
* Synchronous realpath(3) - return the canonicalized absolute pathname.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function realpathSync(path: PathLike, options?: EncodingOption): string | Buffer;
export namespace realpathSync {
function native(path: PathLike, options?: EncodingOption): string;
function native(path: PathLike, options: BufferEncodingOption): Buffer;
function native(path: PathLike, options?: EncodingOption): string | Buffer;
}
/**
* Asynchronously removes a file or symbolic link. No arguments other than a
* possible exception are given to the completion callback.
*
* ```js
* import { unlink } from 'fs';
* // Assuming that 'path/file.txt' is a regular file.
* unlink('path/file.txt', (err) => {
* if (err) throw err;
* console.log('path/file.txt was deleted');
* });
* ```
*
* `fs.unlink()` will not work on a directory, empty or otherwise. To remove a
* directory, use {@link rmdir}.
*
* See the POSIX [`unlink(2)`](http://man7.org/linux/man-pages/man2/unlink.2.html) documentation for more details.
* @since v0.0.2
*/
export function unlink(path: PathLike, callback: NoParamCallback): void;
export namespace unlink {
/**
* Asynchronous unlink(2) - delete a name and possibly the file it refers to.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike): Promise<void>;
}
/**
* Synchronous [`unlink(2)`](http://man7.org/linux/man-pages/man2/unlink.2.html). Returns `undefined`.
* @since v0.1.21
*/
export function unlinkSync(path: PathLike): void;
export interface RmDirOptions {
/**
* If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
* `EPERM` error is encountered, Node.js will retry the operation with a linear
* backoff wait of `retryDelay` ms longer on each try. This option represents the
* number of retries. This option is ignored if the `recursive` option is not
* `true`.
* @default 0
*/
maxRetries?: number | undefined;
/**
* @deprecated since v14.14.0 In future versions of Node.js and will trigger a warning
* `fs.rmdir(path, { recursive: true })` will throw if `path` does not exist or is a file.
* Use `fs.rm(path, { recursive: true, force: true })` instead.
*
* If `true`, perform a recursive directory removal. In
* recursive mode, operations are retried on failure.
* @default false
*/
recursive?: boolean | undefined;
/**
* The amount of time in milliseconds to wait between retries.
* This option is ignored if the `recursive` option is not `true`.
* @default 100
*/
retryDelay?: number | undefined;
}
/**
* Asynchronous [`rmdir(2)`](http://man7.org/linux/man-pages/man2/rmdir.2.html). No arguments other than a possible exception are given
* to the completion callback.
*
* Using `fs.rmdir()` on a file (not a directory) results in an `ENOENT` error on
* Windows and an `ENOTDIR` error on POSIX.
*
* To get a behavior similar to the `rm -rf` Unix command, use {@link rm} with options `{ recursive: true, force: true }`.
* @since v0.0.2
*/
export function rmdir(path: PathLike, callback: NoParamCallback): void;
export function rmdir(path: PathLike, options: RmDirOptions, callback: NoParamCallback): void;
export namespace rmdir {
/**
* Asynchronous rmdir(2) - delete a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
function __promisify__(path: PathLike, options?: RmDirOptions): Promise<void>;
}
/**
* Synchronous [`rmdir(2)`](http://man7.org/linux/man-pages/man2/rmdir.2.html). Returns `undefined`.
*
* Using `fs.rmdirSync()` on a file (not a directory) results in an `ENOENT` error
* on Windows and an `ENOTDIR` error on POSIX.
*
* To get a behavior similar to the `rm -rf` Unix command, use {@link rmSync} with options `{ recursive: true, force: true }`.
* @since v0.1.21
*/
export function rmdirSync(path: PathLike, options?: RmDirOptions): void;
export interface RmOptions {
/**
* When `true`, exceptions will be ignored if `path` does not exist.
* @default false
*/
force?: boolean | undefined;
/**
* If an `EBUSY`, `EMFILE`, `ENFILE`, `ENOTEMPTY`, or
* `EPERM` error is encountered, Node.js will retry the operation with a linear
* backoff wait of `retryDelay` ms longer on each try. This option represents the
* number of retries. This option is ignored if the `recursive` option is not
* `true`.
* @default 0
*/
maxRetries?: number | undefined;
/**
* If `true`, perform a recursive directory removal. In
* recursive mode, operations are retried on failure.
* @default false
*/
recursive?: boolean | undefined;
/**
* The amount of time in milliseconds to wait between retries.
* This option is ignored if the `recursive` option is not `true`.
* @default 100
*/
retryDelay?: number | undefined;
}
/**
* Asynchronously removes files and directories (modeled on the standard POSIX `rm`utility). No arguments other than a possible exception are given to the
* completion callback.
* @since v14.14.0
*/
export function rm(path: PathLike, callback: NoParamCallback): void;
export function rm(path: PathLike, options: RmOptions, callback: NoParamCallback): void;
export namespace rm {
/**
* Asynchronously removes files and directories (modeled on the standard POSIX `rm` utility).
*/
function __promisify__(path: PathLike, options?: RmOptions): Promise<void>;
}
/**
* Synchronously removes files and directories (modeled on the standard POSIX `rm`utility). Returns `undefined`.
* @since v14.14.0
*/
export function rmSync(path: PathLike, options?: RmOptions): void;
export interface MakeDirectoryOptions {
/**
* Indicates whether parent folders should be created.
* If a folder was created, the path to the first created folder will be returned.
* @default false
*/
recursive?: boolean | undefined;
/**
* A file mode. If a string is passed, it is parsed as an octal integer. If not specified
* @default 0o777
*/
mode?: Mode | undefined;
}
/**
* Asynchronously creates a directory.
*
* The callback is given a possible exception and, if `recursive` is `true`, the
* first directory path created, `(err[, path])`.`path` can still be `undefined` when `recursive` is `true`, if no directory was
* created.
*
* The optional `options` argument can be an integer specifying `mode` (permission
* and sticky bits), or an object with a `mode` property and a `recursive`property indicating whether parent directories should be created. Calling`fs.mkdir()` when `path` is a directory that
* exists results in an error only
* when `recursive` is false.
*
* ```js
* import { mkdir } from 'fs';
*
* // Creates /tmp/a/apple, regardless of whether `/tmp` and /tmp/a exist.
* mkdir('/tmp/a/apple', { recursive: true }, (err) => {
* if (err) throw err;
* });
* ```
*
* On Windows, using `fs.mkdir()` on the root directory even with recursion will
* result in an error:
*
* ```js
* import { mkdir } from 'fs';
*
* mkdir('/', { recursive: true }, (err) => {
* // => [Error: EPERM: operation not permitted, mkdir 'C:\']
* });
* ```
*
* See the POSIX [`mkdir(2)`](http://man7.org/linux/man-pages/man2/mkdir.2.html) documentation for more details.
* @since v0.1.8
*/
export function mkdir(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
},
callback: (err: NodeJS.ErrnoException | null, path?: string) => void
): void;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdir(
path: PathLike,
options:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
| undefined,
callback: NoParamCallback
): void;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdir(path: PathLike, options: Mode | MakeDirectoryOptions | null | undefined, callback: (err: NodeJS.ErrnoException | null, path?: string) => void): void;
/**
* Asynchronous mkdir(2) - create a directory with a mode of `0o777`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function mkdir(path: PathLike, callback: NoParamCallback): void;
export namespace mkdir {
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
}
): Promise<string | undefined>;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(
path: PathLike,
options?:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
): Promise<void>;
/**
* Asynchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
function __promisify__(path: PathLike, options?: Mode | MakeDirectoryOptions | null): Promise<string | undefined>;
}
/**
* Synchronously creates a directory. Returns `undefined`, or if `recursive` is`true`, the first directory path created.
* This is the synchronous version of {@link mkdir}.
*
* See the POSIX [`mkdir(2)`](http://man7.org/linux/man-pages/man2/mkdir.2.html) documentation for more details.
* @since v0.1.21
*/
export function mkdirSync(
path: PathLike,
options: MakeDirectoryOptions & {
recursive: true;
}
): string | undefined;
/**
* Synchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdirSync(
path: PathLike,
options?:
| Mode
| (MakeDirectoryOptions & {
recursive?: false | undefined;
})
| null
): void;
/**
* Synchronous mkdir(2) - create a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options Either the file mode, or an object optionally specifying the file mode and whether parent folders
* should be created. If a string is passed, it is parsed as an octal integer. If not specified, defaults to `0o777`.
*/
export function mkdirSync(path: PathLike, options?: Mode | MakeDirectoryOptions | null): string | undefined;
/**
* Creates a unique temporary directory.
*
* Generates six random characters to be appended behind a required`prefix` to create a unique temporary directory. Due to platform
* inconsistencies, avoid trailing `X` characters in `prefix`. Some platforms,
* notably the BSDs, can return more than six random characters, and replace
* trailing `X` characters in `prefix` with random characters.
*
* The created directory path is passed as a string to the callback's second
* parameter.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use.
*
* ```js
* import { mkdtemp } from 'fs';
*
* mkdtemp(path.join(os.tmpdir(), 'foo-'), (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Prints: /tmp/foo-itXde2 or C:\Users\...\AppData\Local\Temp\foo-itXde2
* });
* ```
*
* The `fs.mkdtemp()` method will append the six randomly selected characters
* directly to the `prefix` string. For instance, given a directory `/tmp`, if the
* intention is to create a temporary directory _within_`/tmp`, the `prefix`must end with a trailing platform-specific path separator
* (`require('path').sep`).
*
* ```js
* import { tmpdir } from 'os';
* import { mkdtemp } from 'fs';
*
* // The parent directory for the new temporary directory
* const tmpDir = tmpdir();
*
* // This method is *INCORRECT*:
* mkdtemp(tmpDir, (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Will print something similar to `/tmpabc123`.
* // A new temporary directory is created at the file system root
* // rather than *within* the /tmp directory.
* });
*
* // This method is *CORRECT*:
* import { sep } from 'path';
* mkdtemp(`${tmpDir}${sep}`, (err, directory) => {
* if (err) throw err;
* console.log(directory);
* // Will print something similar to `/tmp/abc123`.
* // A new temporary directory is created within
* // the /tmp directory.
* });
* ```
* @since v5.10.0
*/
export function mkdtemp(prefix: string, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, folder: string) => void): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtemp(
prefix: string,
options:
| 'buffer'
| {
encoding: 'buffer';
},
callback: (err: NodeJS.ErrnoException | null, folder: Buffer) => void
): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtemp(prefix: string, options: EncodingOption, callback: (err: NodeJS.ErrnoException | null, folder: string | Buffer) => void): void;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
*/
export function mkdtemp(prefix: string, callback: (err: NodeJS.ErrnoException | null, folder: string) => void): void;
export namespace mkdtemp {
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options?: EncodingOption): Promise<string>;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options: BufferEncodingOption): Promise<Buffer>;
/**
* Asynchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(prefix: string, options?: EncodingOption): Promise<string | Buffer>;
}
/**
* Returns the created directory path.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link mkdtemp}.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use.
* @since v5.10.0
*/
export function mkdtempSync(prefix: string, options?: EncodingOption): string;
/**
* Synchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtempSync(prefix: string, options: BufferEncodingOption): Buffer;
/**
* Synchronously creates a unique temporary directory.
* Generates six random characters to be appended behind a required prefix to create a unique temporary directory.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function mkdtempSync(prefix: string, options?: EncodingOption): string | Buffer;
/**
* Reads the contents of a directory. The callback gets two arguments `(err, files)`where `files` is an array of the names of the files in the directory excluding`'.'` and `'..'`.
*
* See the POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the filenames passed to the callback. If the `encoding` is set to `'buffer'`,
* the filenames returned will be passed as `Buffer` objects.
*
* If `options.withFileTypes` is set to `true`, the `files` array will contain `fs.Dirent` objects.
* @since v0.1.8
*/
export function readdir(
path: PathLike,
options:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
}
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, files: string[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdir(
path: PathLike,
options:
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
}
| 'buffer',
callback: (err: NodeJS.ErrnoException | null, files: Buffer[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdir(
path: PathLike,
options:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
})
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, files: string[] | Buffer[]) => void
): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function readdir(path: PathLike, callback: (err: NodeJS.ErrnoException | null, files: string[]) => void): void;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent.
*/
export function readdir(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
},
callback: (err: NodeJS.ErrnoException | null, files: Dirent[]) => void
): void;
export namespace readdir {
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options?:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
}
| BufferEncoding
| null
): Promise<string[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options:
| 'buffer'
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
}
): Promise<Buffer[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
function __promisify__(
path: PathLike,
options?:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
})
| BufferEncoding
| null
): Promise<string[] | Buffer[]>;
/**
* Asynchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent
*/
function __promisify__(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
}
): Promise<Dirent[]>;
}
/**
* Reads the contents of the directory.
*
* See the POSIX [`readdir(3)`](http://man7.org/linux/man-pages/man3/readdir.3.html) documentation for more details.
*
* The optional `options` argument can be a string specifying an encoding, or an
* object with an `encoding` property specifying the character encoding to use for
* the filenames returned. If the `encoding` is set to `'buffer'`,
* the filenames returned will be passed as `Buffer` objects.
*
* If `options.withFileTypes` is set to `true`, the result will contain `fs.Dirent` objects.
* @since v0.1.21
*/
export function readdirSync(
path: PathLike,
options?:
| {
encoding: BufferEncoding | null;
withFileTypes?: false | undefined;
}
| BufferEncoding
| null
): string[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdirSync(
path: PathLike,
options:
| {
encoding: 'buffer';
withFileTypes?: false | undefined;
}
| 'buffer'
): Buffer[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options The encoding (or an object specifying the encoding), used as the encoding of the result. If not provided, `'utf8'` is used.
*/
export function readdirSync(
path: PathLike,
options?:
| (ObjectEncodingOptions & {
withFileTypes?: false | undefined;
})
| BufferEncoding
| null
): string[] | Buffer[];
/**
* Synchronous readdir(3) - read a directory.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param options If called with `withFileTypes: true` the result data will be an array of Dirent.
*/
export function readdirSync(
path: PathLike,
options: ObjectEncodingOptions & {
withFileTypes: true;
}
): Dirent[];
/**
* Closes the file descriptor. No arguments other than a possible exception are
* given to the completion callback.
*
* Calling `fs.close()` on any file descriptor (`fd`) that is currently in use
* through any other `fs` operation may lead to undefined behavior.
*
* See the POSIX [`close(2)`](http://man7.org/linux/man-pages/man2/close.2.html) documentation for more detail.
* @since v0.0.2
*/
export function close(fd: number, callback?: NoParamCallback): void;
export namespace close {
/**
* Asynchronous close(2) - close a file descriptor.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Closes the file descriptor. Returns `undefined`.
*
* Calling `fs.closeSync()` on any file descriptor (`fd`) that is currently in use
* through any other `fs` operation may lead to undefined behavior.
*
* See the POSIX [`close(2)`](http://man7.org/linux/man-pages/man2/close.2.html) documentation for more detail.
* @since v0.1.21
*/
export function closeSync(fd: number): void;
/**
* Asynchronous file open. See the POSIX [`open(2)`](http://man7.org/linux/man-pages/man2/open.2.html) documentation for more details.
*
* `mode` sets the file mode (permission and sticky bits), but only if the file was
* created. On Windows, only the write permission can be manipulated; see {@link chmod}.
*
* The callback gets two arguments `(err, fd)`.
*
* Some characters (`< > : " / \ | ? *`) are reserved under Windows as documented
* by [Naming Files, Paths, and Namespaces](https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file). Under NTFS, if the filename contains
* a colon, Node.js will open a file system stream, as described by [this MSDN page](https://docs.microsoft.com/en-us/windows/desktop/FileIO/using-streams).
*
* Functions based on `fs.open()` exhibit this behavior as well:`fs.writeFile()`, `fs.readFile()`, etc.
* @since v0.0.2
* @param [flags='r'] See `support of file system `flags``.
* @param [mode=0o666]
*/
export function open(path: PathLike, flags: OpenMode | undefined, mode: Mode | undefined | null, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
/**
* Asynchronous open(2) - open and possibly create a file. If the file is created, its mode will be `0o666`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param [flags='r'] See `support of file system `flags``.
*/
export function open(path: PathLike, flags: OpenMode | undefined, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
/**
* Asynchronous open(2) - open and possibly create a file. If the file is created, its mode will be `0o666`.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
*/
export function open(path: PathLike, callback: (err: NodeJS.ErrnoException | null, fd: number) => void): void;
export namespace open {
/**
* Asynchronous open(2) - open and possibly create a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param mode A file mode. If a string is passed, it is parsed as an octal integer. If not supplied, defaults to `0o666`.
*/
function __promisify__(path: PathLike, flags: OpenMode, mode?: Mode | null): Promise<number>;
}
/**
* Returns an integer representing the file descriptor.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link open}.
* @since v0.1.21
* @param [flags='r']
* @param [mode=0o666]
*/
export function openSync(path: PathLike, flags: OpenMode, mode?: Mode | null): number;
/**
* Change the file system timestamps of the object referenced by `path`.
*
* The `atime` and `mtime` arguments follow these rules:
*
* * Values can be either numbers representing Unix epoch time in seconds,`Date`s, or a numeric string like `'123456789.0'`.
* * If the value can not be converted to a number, or is `NaN`, `Infinity` or`-Infinity`, an `Error` will be thrown.
* @since v0.4.2
*/
export function utimes(path: PathLike, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace utimes {
/**
* Asynchronously change file timestamps of the file referenced by the supplied path.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(path: PathLike, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Returns `undefined`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link utimes}.
* @since v0.4.2
*/
export function utimesSync(path: PathLike, atime: TimeLike, mtime: TimeLike): void;
/**
* Change the file system timestamps of the object referenced by the supplied file
* descriptor. See {@link utimes}.
* @since v0.4.2
*/
export function futimes(fd: number, atime: TimeLike, mtime: TimeLike, callback: NoParamCallback): void;
export namespace futimes {
/**
* Asynchronously change file timestamps of the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param atime The last access time. If a string is provided, it will be coerced to number.
* @param mtime The last modified time. If a string is provided, it will be coerced to number.
*/
function __promisify__(fd: number, atime: TimeLike, mtime: TimeLike): Promise<void>;
}
/**
* Synchronous version of {@link futimes}. Returns `undefined`.
* @since v0.4.2
*/
export function futimesSync(fd: number, atime: TimeLike, mtime: TimeLike): void;
/**
* Request that all data for the open file descriptor is flushed to the storage
* device. The specific implementation is operating system and device specific.
* Refer to the POSIX [`fsync(2)`](http://man7.org/linux/man-pages/man2/fsync.2.html) documentation for more detail. No arguments other
* than a possible exception are given to the completion callback.
* @since v0.1.96
*/
export function fsync(fd: number, callback: NoParamCallback): void;
export namespace fsync {
/**
* Asynchronous fsync(2) - synchronize a file's in-core state with the underlying storage device.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Request that all data for the open file descriptor is flushed to the storage
* device. The specific implementation is operating system and device specific.
* Refer to the POSIX [`fsync(2)`](http://man7.org/linux/man-pages/man2/fsync.2.html) documentation for more detail. Returns `undefined`.
* @since v0.1.96
*/
export function fsyncSync(fd: number): void;
/**
* Write `buffer` to the file specified by `fd`.
*
* `offset` determines the part of the buffer to be written, and `length` is
* an integer specifying the number of bytes to write.
*
* `position` refers to the offset from the beginning of the file where this data
* should be written. If `typeof position !== 'number'`, the data will be written
* at the current position. See [`pwrite(2)`](http://man7.org/linux/man-pages/man2/pwrite.2.html).
*
* The callback will be given three arguments `(err, bytesWritten, buffer)` where`bytesWritten` specifies how many _bytes_ were written from `buffer`.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesWritten` and `buffer` properties.
*
* It is unsafe to use `fs.write()` multiple times on the same file without waiting
* for the callback. For this scenario, {@link createWriteStream} is
* recommended.
*
* On Linux, positional writes don't work when the file is opened in append mode.
* The kernel ignores the position argument and always appends the data to
* the end of the file.
* @since v0.0.2
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
length: number | undefined | null,
position: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
* @param length The number of bytes to write. If not supplied, defaults to `buffer.length - offset`.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
length: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void
): void;
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
*/
export function write<TBuffer extends NodeJS.ArrayBufferView>(fd: number, buffer: TBuffer, callback: (err: NodeJS.ErrnoException | null, written: number, buffer: TBuffer) => void): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
export function write(
fd: number,
string: string,
position: number | undefined | null,
encoding: BufferEncoding | undefined | null,
callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void
): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
*/
export function write(fd: number, string: string, position: number | undefined | null, callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void): void;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
*/
export function write(fd: number, string: string, callback: (err: NodeJS.ErrnoException | null, written: number, str: string) => void): void;
export namespace write {
/**
* Asynchronously writes `buffer` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param offset The part of the buffer to be written. If not supplied, defaults to `0`.
* @param length The number of bytes to write. If not supplied, defaults to `buffer.length - offset`.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
*/
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer?: TBuffer,
offset?: number,
length?: number,
position?: number | null
): Promise<{
bytesWritten: number;
buffer: TBuffer;
}>;
/**
* Asynchronously writes `string` to the file referenced by the supplied file descriptor.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
function __promisify__(
fd: number,
string: string,
position?: number | null,
encoding?: BufferEncoding | null
): Promise<{
bytesWritten: number;
buffer: string;
}>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link write}.
* @since v0.1.21
* @return The number of bytes written.
*/
export function writeSync(fd: number, buffer: NodeJS.ArrayBufferView, offset?: number | null, length?: number | null, position?: number | null): number;
/**
* Synchronously writes `string` to the file referenced by the supplied file descriptor, returning the number of bytes written.
* @param fd A file descriptor.
* @param string A string to write.
* @param position The offset from the beginning of the file where this data should be written. If not supplied, defaults to the current position.
* @param encoding The expected string encoding.
*/
export function writeSync(fd: number, string: string, position?: number | null, encoding?: BufferEncoding | null): number;
export type ReadPosition = number | bigint;
export interface ReadSyncOptions {
/**
* @default 0
*/
offset?: number | undefined;
/**
* @default `length of buffer`
*/
length?: number | undefined;
/**
* @default null
*/
position?: ReadPosition | null | undefined;
}
export interface ReadAsyncOptions<TBuffer extends NodeJS.ArrayBufferView> extends ReadSyncOptions {
buffer?: TBuffer;
}
/**
* Read data from the file specified by `fd`.
*
* The callback is given the three arguments, `(err, bytesRead, buffer)`.
*
* If the file is not modified concurrently, the end-of-file is reached when the
* number of bytes read is zero.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesRead` and `buffer` properties.
* @since v0.0.2
* @param buffer The buffer that the data will be written to.
* @param offset The position in `buffer` to write the data to.
* @param length The number of bytes to read.
* @param position Specifies where to begin reading from in the file. If `position` is `null` or `-1 `, data will be read from the current file position, and the file position will be updated. If
* `position` is an integer, the file position will be unchanged.
*/
export function read<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number,
length: number,
position: ReadPosition | null,
callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: TBuffer) => void
): void;
/**
* Similar to the above `fs.read` function, this version takes an optional `options` object.
* If not otherwise specified in an `options` object,
* `buffer` defaults to `Buffer.alloc(16384)`,
* `offset` defaults to `0`,
* `length` defaults to `buffer.byteLength`, `- offset` as of Node 17.6.0
* `position` defaults to `null`
* @since v12.17.0, 13.11.0
*/
export function read<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
options: ReadAsyncOptions<TBuffer>,
callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: TBuffer) => void
): void;
export function read(fd: number, callback: (err: NodeJS.ErrnoException | null, bytesRead: number, buffer: NodeJS.ArrayBufferView) => void): void;
export namespace read {
/**
* @param fd A file descriptor.
* @param buffer The buffer that the data will be written to.
* @param offset The offset in the buffer at which to start writing.
* @param length The number of bytes to read.
* @param position The offset from the beginning of the file from which data should be read. If `null`, data will be read from the current position.
*/
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
buffer: TBuffer,
offset: number,
length: number,
position: number | null
): Promise<{
bytesRead: number;
buffer: TBuffer;
}>;
function __promisify__<TBuffer extends NodeJS.ArrayBufferView>(
fd: number,
options: ReadAsyncOptions<TBuffer>
): Promise<{
bytesRead: number;
buffer: TBuffer;
}>;
function __promisify__(fd: number): Promise<{
bytesRead: number;
buffer: NodeJS.ArrayBufferView;
}>;
}
/**
* Returns the number of `bytesRead`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link read}.
* @since v0.1.21
*/
export function readSync(fd: number, buffer: NodeJS.ArrayBufferView, offset: number, length: number, position: ReadPosition | null): number;
/**
* Similar to the above `fs.readSync` function, this version takes an optional `options` object.
* If no `options` object is specified, it will default with the above values.
*/
export function readSync(fd: number, buffer: NodeJS.ArrayBufferView, opts?: ReadSyncOptions): number;
/**
* Asynchronously reads the entire contents of a file.
*
* ```js
* import { readFile } from 'fs';
*
* readFile('/etc/passwd', (err, data) => {
* if (err) throw err;
* console.log(data);
* });
* ```
*
* The callback is passed two arguments `(err, data)`, where `data` is the
* contents of the file.
*
* If no encoding is specified, then the raw buffer is returned.
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { readFile } from 'fs';
*
* readFile('/etc/passwd', 'utf8', callback);
* ```
*
* When the path is a directory, the behavior of `fs.readFile()` and {@link readFileSync} is platform-specific. On macOS, Linux, and Windows, an
* error will be returned. On FreeBSD, a representation of the directory's contents
* will be returned.
*
* ```js
* import { readFile } from 'fs';
*
* // macOS, Linux, and Windows
* readFile('<directory>', (err, data) => {
* // => [Error: EISDIR: illegal operation on a directory, read <directory>]
* });
*
* // FreeBSD
* readFile('<directory>', (err, data) => {
* // => null, <data>
* });
* ```
*
* It is possible to abort an ongoing request using an `AbortSignal`. If a
* request is aborted the callback is called with an `AbortError`:
*
* ```js
* import { readFile } from 'fs';
*
* const controller = new AbortController();
* const signal = controller.signal;
* readFile(fileInfo[0].name, { signal }, (err, buf) => {
* // ...
* });
* // When you want to abort the request
* controller.abort();
* ```
*
* The `fs.readFile()` function buffers the entire file. To minimize memory costs,
* when possible prefer streaming via `fs.createReadStream()`.
*
* Aborting an ongoing request does not abort individual operating
* system requests but rather the internal buffering `fs.readFile` performs.
* @since v0.1.29
* @param path filename or file descriptor
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| ({
encoding?: null | undefined;
flag?: string | undefined;
} & Abortable)
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, data: Buffer) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| ({
encoding: BufferEncoding;
flag?: string | undefined;
} & Abortable)
| BufferEncoding,
callback: (err: NodeJS.ErrnoException | null, data: string) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFile(
path: PathOrFileDescriptor,
options:
| (ObjectEncodingOptions & {
flag?: string | undefined;
} & Abortable)
| BufferEncoding
| undefined
| null,
callback: (err: NodeJS.ErrnoException | null, data: string | Buffer) => void
): void;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
*/
export function readFile(path: PathOrFileDescriptor, callback: (err: NodeJS.ErrnoException | null, data: Buffer) => void): void;
export namespace readFile {
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options An object that may contain an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options?: {
encoding?: null | undefined;
flag?: string | undefined;
} | null
): Promise<Buffer>;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options:
| {
encoding: BufferEncoding;
flag?: string | undefined;
}
| BufferEncoding
): Promise<string>;
/**
* Asynchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
function __promisify__(
path: PathOrFileDescriptor,
options?:
| (ObjectEncodingOptions & {
flag?: string | undefined;
})
| BufferEncoding
| null
): Promise<string | Buffer>;
}
/**
* Returns the contents of the `path`.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link readFile}.
*
* If the `encoding` option is specified then this function returns a
* string. Otherwise it returns a buffer.
*
* Similar to {@link readFile}, when the path is a directory, the behavior of`fs.readFileSync()` is platform-specific.
*
* ```js
* import { readFileSync } from 'fs';
*
* // macOS, Linux, and Windows
* readFileSync('<directory>');
* // => [Error: EISDIR: illegal operation on a directory, read <directory>]
*
* // FreeBSD
* readFileSync('<directory>'); // => <data>
* ```
* @since v0.1.8
* @param path filename or file descriptor
*/
export function readFileSync(
path: PathOrFileDescriptor,
options?: {
encoding?: null | undefined;
flag?: string | undefined;
} | null
): Buffer;
/**
* Synchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFileSync(
path: PathOrFileDescriptor,
options:
| {
encoding: BufferEncoding;
flag?: string | undefined;
}
| BufferEncoding
): string;
/**
* Synchronously reads the entire contents of a file.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param options Either the encoding for the result, or an object that contains the encoding and an optional flag.
* If a flag is not provided, it defaults to `'r'`.
*/
export function readFileSync(
path: PathOrFileDescriptor,
options?:
| (ObjectEncodingOptions & {
flag?: string | undefined;
})
| BufferEncoding
| null
): string | Buffer;
export type WriteFileOptions =
| (ObjectEncodingOptions &
Abortable & {
mode?: Mode | undefined;
flag?: string | undefined;
})
| BufferEncoding
| null;
/**
* When `file` is a filename, asynchronously writes data to the file, replacing the
* file if it already exists. `data` can be a string or a buffer.
*
* When `file` is a file descriptor, the behavior is similar to calling`fs.write()` directly (which is recommended). See the notes below on using
* a file descriptor.
*
* The `encoding` option is ignored if `data` is a buffer.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { writeFile } from 'fs';
* import { Buffer } from 'buffer';
*
* const data = new Uint8Array(Buffer.from('Hello Node.js'));
* writeFile('message.txt', data, (err) => {
* if (err) throw err;
* console.log('The file has been saved!');
* });
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { writeFile } from 'fs';
*
* writeFile('message.txt', 'Hello Node.js', 'utf8', callback);
* ```
*
* It is unsafe to use `fs.writeFile()` multiple times on the same file without
* waiting for the callback. For this scenario, {@link createWriteStream} is
* recommended.
*
* Similarly to `fs.readFile` \- `fs.writeFile` is a convenience method that
* performs multiple `write` calls internally to write the buffer passed to it.
* For performance sensitive code consider using {@link createWriteStream}.
*
* It is possible to use an `AbortSignal` to cancel an `fs.writeFile()`.
* Cancelation is "best effort", and some amount of data is likely still
* to be written.
*
* ```js
* import { writeFile } from 'fs';
* import { Buffer } from 'buffer';
*
* const controller = new AbortController();
* const { signal } = controller;
* const data = new Uint8Array(Buffer.from('Hello Node.js'));
* writeFile('message.txt', data, { signal }, (err) => {
* // When a request is aborted - the callback is called with an AbortError
* });
* // When the request should be aborted
* controller.abort();
* ```
*
* Aborting an ongoing request does not abort individual operating
* system requests but rather the internal buffering `fs.writeFile` performs.
* @since v0.1.29
* @param file filename or file descriptor
*/
export function writeFile(file: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options: WriteFileOptions, callback: NoParamCallback): void;
/**
* Asynchronously writes data to a file, replacing the file if it already exists.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
*/
export function writeFile(path: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, callback: NoParamCallback): void;
export namespace writeFile {
/**
* Asynchronously writes data to a file, replacing the file if it already exists.
* @param path A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
* @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `mode` is not supplied, the default of `0o666` is used.
* If `mode` is a string, it is parsed as an octal integer.
* If `flag` is not supplied, the default of `'w'` is used.
*/
function __promisify__(path: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options?: WriteFileOptions): Promise<void>;
}
/**
* Returns `undefined`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link writeFile}.
* @since v0.1.29
* @param file filename or file descriptor
*/
export function writeFileSync(file: PathOrFileDescriptor, data: string | NodeJS.ArrayBufferView, options?: WriteFileOptions): void;
/**
* Asynchronously append data to a file, creating the file if it does not yet
* exist. `data` can be a string or a `Buffer`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { appendFile } from 'fs';
*
* appendFile('message.txt', 'data to append', (err) => {
* if (err) throw err;
* console.log('The "data to append" was appended to file!');
* });
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { appendFile } from 'fs';
*
* appendFile('message.txt', 'data to append', 'utf8', callback);
* ```
*
* The `path` may be specified as a numeric file descriptor that has been opened
* for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
* not be closed automatically.
*
* ```js
* import { open, close, appendFile } from 'fs';
*
* function closeFd(fd) {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
*
* open('message.txt', 'a', (err, fd) => {
* if (err) throw err;
*
* try {
* appendFile(fd, 'data to append', 'utf8', (err) => {
* closeFd(fd);
* if (err) throw err;
* });
* } catch (err) {
* closeFd(fd);
* throw err;
* }
* });
* ```
* @since v0.6.7
* @param path filename or file descriptor
*/
export function appendFile(path: PathOrFileDescriptor, data: string | Uint8Array, options: WriteFileOptions, callback: NoParamCallback): void;
/**
* Asynchronously append data to a file, creating the file if it does not exist.
* @param file A path to a file. If a URL is provided, it must use the `file:` protocol.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
*/
export function appendFile(file: PathOrFileDescriptor, data: string | Uint8Array, callback: NoParamCallback): void;
export namespace appendFile {
/**
* Asynchronously append data to a file, creating the file if it does not exist.
* @param file A path to a file. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
* If a file descriptor is provided, the underlying file will _not_ be closed automatically.
* @param data The data to write. If something other than a Buffer or Uint8Array is provided, the value is coerced to a string.
* @param options Either the encoding for the file, or an object optionally specifying the encoding, file mode, and flag.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `mode` is not supplied, the default of `0o666` is used.
* If `mode` is a string, it is parsed as an octal integer.
* If `flag` is not supplied, the default of `'a'` is used.
*/
function __promisify__(file: PathOrFileDescriptor, data: string | Uint8Array, options?: WriteFileOptions): Promise<void>;
}
/**
* Synchronously append data to a file, creating the file if it does not yet
* exist. `data` can be a string or a `Buffer`.
*
* The `mode` option only affects the newly created file. See {@link open} for more details.
*
* ```js
* import { appendFileSync } from 'fs';
*
* try {
* appendFileSync('message.txt', 'data to append');
* console.log('The "data to append" was appended to file!');
* } catch (err) {
* // Handle the error
* }
* ```
*
* If `options` is a string, then it specifies the encoding:
*
* ```js
* import { appendFileSync } from 'fs';
*
* appendFileSync('message.txt', 'data to append', 'utf8');
* ```
*
* The `path` may be specified as a numeric file descriptor that has been opened
* for appending (using `fs.open()` or `fs.openSync()`). The file descriptor will
* not be closed automatically.
*
* ```js
* import { openSync, closeSync, appendFileSync } from 'fs';
*
* let fd;
*
* try {
* fd = openSync('message.txt', 'a');
* appendFileSync(fd, 'data to append', 'utf8');
* } catch (err) {
* // Handle the error
* } finally {
* if (fd !== undefined)
* closeSync(fd);
* }
* ```
* @since v0.6.7
* @param path filename or file descriptor
*/
export function appendFileSync(path: PathOrFileDescriptor, data: string | Uint8Array, options?: WriteFileOptions): void;
/**
* Watch for changes on `filename`. The callback `listener` will be called each
* time the file is accessed.
*
* The `options` argument may be omitted. If provided, it should be an object. The`options` object may contain a boolean named `persistent` that indicates
* whether the process should continue to run as long as files are being watched.
* The `options` object may specify an `interval` property indicating how often the
* target should be polled in milliseconds.
*
* The `listener` gets two arguments the current stat object and the previous
* stat object:
*
* ```js
* import { watchFile } from 'fs';
*
* watchFile('message.text', (curr, prev) => {
* console.log(`the current mtime is: ${curr.mtime}`);
* console.log(`the previous mtime was: ${prev.mtime}`);
* });
* ```
*
* These stat objects are instances of `fs.Stat`. If the `bigint` option is `true`,
* the numeric values in these objects are specified as `BigInt`s.
*
* To be notified when the file was modified, not just accessed, it is necessary
* to compare `curr.mtimeMs` and `prev.mtimeMs`.
*
* When an `fs.watchFile` operation results in an `ENOENT` error, it
* will invoke the listener once, with all the fields zeroed (or, for dates, the
* Unix Epoch). If the file is created later on, the listener will be called
* again, with the latest stat objects. This is a change in functionality since
* v0.10.
*
* Using {@link watch} is more efficient than `fs.watchFile` and`fs.unwatchFile`. `fs.watch` should be used instead of `fs.watchFile` and`fs.unwatchFile` when possible.
*
* When a file being watched by `fs.watchFile()` disappears and reappears,
* then the contents of `previous` in the second callback event (the file's
* reappearance) will be the same as the contents of `previous` in the first
* callback event (its disappearance).
*
* This happens when:
*
* * the file is deleted, followed by a restore
* * the file is renamed and then renamed a second time back to its original name
* @since v0.1.31
*/
export interface WatchFileOptions {
bigint?: boolean | undefined;
persistent?: boolean | undefined;
interval?: number | undefined;
}
/**
* Watch for changes on `filename`. The callback `listener` will be called each
* time the file is accessed.
*
* The `options` argument may be omitted. If provided, it should be an object. The`options` object may contain a boolean named `persistent` that indicates
* whether the process should continue to run as long as files are being watched.
* The `options` object may specify an `interval` property indicating how often the
* target should be polled in milliseconds.
*
* The `listener` gets two arguments the current stat object and the previous
* stat object:
*
* ```js
* import { watchFile } from 'fs';
*
* watchFile('message.text', (curr, prev) => {
* console.log(`the current mtime is: ${curr.mtime}`);
* console.log(`the previous mtime was: ${prev.mtime}`);
* });
* ```
*
* These stat objects are instances of `fs.Stat`. If the `bigint` option is `true`,
* the numeric values in these objects are specified as `BigInt`s.
*
* To be notified when the file was modified, not just accessed, it is necessary
* to compare `curr.mtimeMs` and `prev.mtimeMs`.
*
* When an `fs.watchFile` operation results in an `ENOENT` error, it
* will invoke the listener once, with all the fields zeroed (or, for dates, the
* Unix Epoch). If the file is created later on, the listener will be called
* again, with the latest stat objects. This is a change in functionality since
* v0.10.
*
* Using {@link watch} is more efficient than `fs.watchFile` and`fs.unwatchFile`. `fs.watch` should be used instead of `fs.watchFile` and`fs.unwatchFile` when possible.
*
* When a file being watched by `fs.watchFile()` disappears and reappears,
* then the contents of `previous` in the second callback event (the file's
* reappearance) will be the same as the contents of `previous` in the first
* callback event (its disappearance).
*
* This happens when:
*
* * the file is deleted, followed by a restore
* * the file is renamed and then renamed a second time back to its original name
* @since v0.1.31
*/
export function watchFile(
filename: PathLike,
options:
| (WatchFileOptions & {
bigint?: false | undefined;
})
| undefined,
listener: StatsListener
): StatWatcher;
export function watchFile(
filename: PathLike,
options:
| (WatchFileOptions & {
bigint: true;
})
| undefined,
listener: BigIntStatsListener
): StatWatcher;
/**
* Watch for changes on `filename`. The callback `listener` will be called each time the file is accessed.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param listener The callback listener will be called each time the file is accessed.
*/
export function watchFile(filename: PathLike, listener: StatsListener): StatWatcher;
/**
* Stop watching for changes on `filename`. If `listener` is specified, only that
* particular listener is removed. Otherwise, _all_ listeners are removed,
* effectively stopping watching of `filename`.
*
* Calling `fs.unwatchFile()` with a filename that is not being watched is a
* no-op, not an error.
*
* Using {@link watch} is more efficient than `fs.watchFile()` and`fs.unwatchFile()`. `fs.watch()` should be used instead of `fs.watchFile()`and `fs.unwatchFile()` when possible.
* @since v0.1.31
* @param listener Optional, a listener previously attached using `fs.watchFile()`
*/
export function unwatchFile(filename: PathLike, listener?: StatsListener): void;
export function unwatchFile(filename: PathLike, listener?: BigIntStatsListener): void;
export interface WatchOptions extends Abortable {
encoding?: BufferEncoding | 'buffer' | undefined;
persistent?: boolean | undefined;
recursive?: boolean | undefined;
}
export type WatchEventType = 'rename' | 'change';
export type WatchListener<T> = (event: WatchEventType, filename: T | null) => void;
export type StatsListener = (curr: Stats, prev: Stats) => void;
export type BigIntStatsListener = (curr: BigIntStats, prev: BigIntStats) => void;
/**
* Watch for changes on `filename`, where `filename` is either a file or a
* directory.
*
* The second argument is optional. If `options` is provided as a string, it
* specifies the `encoding`. Otherwise `options` should be passed as an object.
*
* The listener callback gets two arguments `(eventType, filename)`. `eventType`is either `'rename'` or `'change'`, and `filename` is the name of the file
* which triggered the event.
*
* On most platforms, `'rename'` is emitted whenever a filename appears or
* disappears in the directory.
*
* The listener callback is attached to the `'change'` event fired by `fs.FSWatcher`, but it is not the same thing as the `'change'` value of`eventType`.
*
* If a `signal` is passed, aborting the corresponding AbortController will close
* the returned `fs.FSWatcher`.
* @since v0.5.10
* @param listener
*/
export function watch(
filename: PathLike,
options:
| (WatchOptions & {
encoding: 'buffer';
})
| 'buffer',
listener?: WatchListener<Buffer>
): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param options Either the encoding for the filename provided to the listener, or an object optionally specifying encoding, persistent, and recursive options.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `persistent` is not supplied, the default of `true` is used.
* If `recursive` is not supplied, the default of `false` is used.
*/
export function watch(filename: PathLike, options?: WatchOptions | BufferEncoding | null, listener?: WatchListener<string>): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* @param options Either the encoding for the filename provided to the listener, or an object optionally specifying encoding, persistent, and recursive options.
* If `encoding` is not supplied, the default of `'utf8'` is used.
* If `persistent` is not supplied, the default of `true` is used.
* If `recursive` is not supplied, the default of `false` is used.
*/
export function watch(filename: PathLike, options: WatchOptions | string, listener?: WatchListener<string | Buffer>): FSWatcher;
/**
* Watch for changes on `filename`, where `filename` is either a file or a directory, returning an `FSWatcher`.
* @param filename A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
*/
export function watch(filename: PathLike, listener?: WatchListener<string>): FSWatcher;
/**
* Test whether or not the given path exists by checking with the file system.
* Then call the `callback` argument with either true or false:
*
* ```js
* import { exists } from 'fs';
*
* exists('/etc/passwd', (e) => {
* console.log(e ? 'it exists' : 'no passwd!');
* });
* ```
*
* **The parameters for this callback are not consistent with other Node.js**
* **callbacks.** Normally, the first parameter to a Node.js callback is an `err`parameter, optionally followed by other parameters. The `fs.exists()` callback
* has only one boolean parameter. This is one reason `fs.access()` is recommended
* instead of `fs.exists()`.
*
* Using `fs.exists()` to check for the existence of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()` is not recommended. Doing
* so introduces a race condition, since other processes may change the file's
* state between the two calls. Instead, user code should open/read/write the
* file directly and handle the error raised if the file does not exist.
*
* **write (NOT RECOMMENDED)**
*
* ```js
* import { exists, open, close } from 'fs';
*
* exists('myfile', (e) => {
* if (e) {
* console.error('myfile already exists');
* } else {
* open('myfile', 'wx', (err, fd) => {
* if (err) throw err;
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* }
* });
* ```
*
* **write (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
* open('myfile', 'wx', (err, fd) => {
* if (err) {
* if (err.code === 'EEXIST') {
* console.error('myfile already exists');
* return;
* }
*
* throw err;
* }
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* **read (NOT RECOMMENDED)**
*
* ```js
* import { open, close, exists } from 'fs';
*
* exists('myfile', (e) => {
* if (e) {
* open('myfile', 'r', (err, fd) => {
* if (err) throw err;
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* } else {
* console.error('myfile does not exist');
* }
* });
* ```
*
* **read (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'r', (err, fd) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* The "not recommended" examples above check for existence and then use the
* file; the "recommended" examples are better because they use the file directly
* and handle the error, if any.
*
* In general, check for the existence of a file only if the file won’t be
* used directly, for example when its existence is a signal from another
* process.
* @since v0.0.2
* @deprecated Since v1.0.0 - Use {@link stat} or {@link access} instead.
*/
export function exists(path: PathLike, callback: (exists: boolean) => void): void;
/** @deprecated */
export namespace exists {
/**
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(path: PathLike): Promise<boolean>;
}
/**
* Returns `true` if the path exists, `false` otherwise.
*
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link exists}.
*
* `fs.exists()` is deprecated, but `fs.existsSync()` is not. The `callback`parameter to `fs.exists()` accepts parameters that are inconsistent with other
* Node.js callbacks. `fs.existsSync()` does not use a callback.
*
* ```js
* import { existsSync } from 'fs';
*
* if (existsSync('/etc/passwd'))
* console.log('The path exists.');
* ```
* @since v0.1.21
*/
export function existsSync(path: PathLike): boolean;
export namespace constants {
// File Access Constants
/** Constant for fs.access(). File is visible to the calling process. */
const F_OK: number;
/** Constant for fs.access(). File can be read by the calling process. */
const R_OK: number;
/** Constant for fs.access(). File can be written by the calling process. */
const W_OK: number;
/** Constant for fs.access(). File can be executed by the calling process. */
const X_OK: number;
// File Copy Constants
/** Constant for fs.copyFile. Flag indicating the destination file should not be overwritten if it already exists. */
const COPYFILE_EXCL: number;
/**
* Constant for fs.copyFile. copy operation will attempt to create a copy-on-write reflink.
* If the underlying platform does not support copy-on-write, then a fallback copy mechanism is used.
*/
const COPYFILE_FICLONE: number;
/**
* Constant for fs.copyFile. Copy operation will attempt to create a copy-on-write reflink.
* If the underlying platform does not support copy-on-write, then the operation will fail with an error.
*/
const COPYFILE_FICLONE_FORCE: number;
// File Open Constants
/** Constant for fs.open(). Flag indicating to open a file for read-only access. */
const O_RDONLY: number;
/** Constant for fs.open(). Flag indicating to open a file for write-only access. */
const O_WRONLY: number;
/** Constant for fs.open(). Flag indicating to open a file for read-write access. */
const O_RDWR: number;
/** Constant for fs.open(). Flag indicating to create the file if it does not already exist. */
const O_CREAT: number;
/** Constant for fs.open(). Flag indicating that opening a file should fail if the O_CREAT flag is set and the file already exists. */
const O_EXCL: number;
/**
* Constant for fs.open(). Flag indicating that if path identifies a terminal device,
* opening the path shall not cause that terminal to become the controlling terminal for the process
* (if the process does not already have one).
*/
const O_NOCTTY: number;
/** Constant for fs.open(). Flag indicating that if the file exists and is a regular file, and the file is opened successfully for write access, its length shall be truncated to zero. */
const O_TRUNC: number;
/** Constant for fs.open(). Flag indicating that data will be appended to the end of the file. */
const O_APPEND: number;
/** Constant for fs.open(). Flag indicating that the open should fail if the path is not a directory. */
const O_DIRECTORY: number;
/**
* constant for fs.open().
* Flag indicating reading accesses to the file system will no longer result in
* an update to the atime information associated with the file.
* This flag is available on Linux operating systems only.
*/
const O_NOATIME: number;
/** Constant for fs.open(). Flag indicating that the open should fail if the path is a symbolic link. */
const O_NOFOLLOW: number;
/** Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O. */
const O_SYNC: number;
/** Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O with write operations waiting for data integrity. */
const O_DSYNC: number;
/** Constant for fs.open(). Flag indicating to open the symbolic link itself rather than the resource it is pointing to. */
const O_SYMLINK: number;
/** Constant for fs.open(). When set, an attempt will be made to minimize caching effects of file I/O. */
const O_DIRECT: number;
/** Constant for fs.open(). Flag indicating to open the file in nonblocking mode when possible. */
const O_NONBLOCK: number;
// File Type Constants
/** Constant for fs.Stats mode property for determining a file's type. Bit mask used to extract the file type code. */
const S_IFMT: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a regular file. */
const S_IFREG: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a directory. */
const S_IFDIR: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a character-oriented device file. */
const S_IFCHR: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a block-oriented device file. */
const S_IFBLK: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a FIFO/pipe. */
const S_IFIFO: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a symbolic link. */
const S_IFLNK: number;
/** Constant for fs.Stats mode property for determining a file's type. File type constant for a socket. */
const S_IFSOCK: number;
// File Mode Constants
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by owner. */
const S_IRWXU: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by owner. */
const S_IRUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by owner. */
const S_IWUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by owner. */
const S_IXUSR: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by group. */
const S_IRWXG: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by group. */
const S_IRGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by group. */
const S_IWGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by group. */
const S_IXGRP: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by others. */
const S_IRWXO: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by others. */
const S_IROTH: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by others. */
const S_IWOTH: number;
/** Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by others. */
const S_IXOTH: number;
/**
* When set, a memory file mapping is used to access the file. This flag
* is available on Windows operating systems only. On other operating systems,
* this flag is ignored.
*/
const UV_FS_O_FILEMAP: number;
}
/**
* Tests a user's permissions for the file or directory specified by `path`.
* The `mode` argument is an optional integer that specifies the accessibility
* checks to be performed. `mode` should be either the value `fs.constants.F_OK`or a mask consisting of the bitwise OR of any of `fs.constants.R_OK`,`fs.constants.W_OK`, and `fs.constants.X_OK`
* (e.g.`fs.constants.W_OK | fs.constants.R_OK`). Check `File access constants` for
* possible values of `mode`.
*
* The final argument, `callback`, is a callback function that is invoked with
* a possible error argument. If any of the accessibility checks fail, the error
* argument will be an `Error` object. The following examples check if`package.json` exists, and if it is readable or writable.
*
* ```js
* import { access, constants } from 'fs';
*
* const file = 'package.json';
*
* // Check if the file exists in the current directory.
* access(file, constants.F_OK, (err) => {
* console.log(`${file} ${err ? 'does not exist' : 'exists'}`);
* });
*
* // Check if the file is readable.
* access(file, constants.R_OK, (err) => {
* console.log(`${file} ${err ? 'is not readable' : 'is readable'}`);
* });
*
* // Check if the file is writable.
* access(file, constants.W_OK, (err) => {
* console.log(`${file} ${err ? 'is not writable' : 'is writable'}`);
* });
*
* // Check if the file is readable and writable.
* access(file, constants.R_OK | constants.W_OK, (err) => {
* console.log(`${file} ${err ? 'is not' : 'is'} readable and writable`);
* });
* ```
*
* Do not use `fs.access()` to check for the accessibility of a file before calling`fs.open()`, `fs.readFile()` or `fs.writeFile()`. Doing
* so introduces a race condition, since other processes may change the file's
* state between the two calls. Instead, user code should open/read/write the
* file directly and handle the error raised if the file is not accessible.
*
* **write (NOT RECOMMENDED)**
*
* ```js
* import { access, open, close } from 'fs';
*
* access('myfile', (err) => {
* if (!err) {
* console.error('myfile already exists');
* return;
* }
*
* open('myfile', 'wx', (err, fd) => {
* if (err) throw err;
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* });
* ```
*
* **write (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'wx', (err, fd) => {
* if (err) {
* if (err.code === 'EEXIST') {
* console.error('myfile already exists');
* return;
* }
*
* throw err;
* }
*
* try {
* writeMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* **read (NOT RECOMMENDED)**
*
* ```js
* import { access, open, close } from 'fs';
* access('myfile', (err) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* open('myfile', 'r', (err, fd) => {
* if (err) throw err;
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* });
* ```
*
* **read (RECOMMENDED)**
*
* ```js
* import { open, close } from 'fs';
*
* open('myfile', 'r', (err, fd) => {
* if (err) {
* if (err.code === 'ENOENT') {
* console.error('myfile does not exist');
* return;
* }
*
* throw err;
* }
*
* try {
* readMyData(fd);
* } finally {
* close(fd, (err) => {
* if (err) throw err;
* });
* }
* });
* ```
*
* The "not recommended" examples above check for accessibility and then use the
* file; the "recommended" examples are better because they use the file directly
* and handle the error, if any.
*
* In general, check for the accessibility of a file only if the file will not be
* used directly, for example when its accessibility is a signal from another
* process.
*
* On Windows, access-control policies (ACLs) on a directory may limit access to
* a file or directory. The `fs.access()` function, however, does not check the
* ACL and therefore may report that a path is accessible even if the ACL restricts
* the user from reading or writing to it.
* @since v0.11.15
* @param [mode=fs.constants.F_OK]
*/
export function access(path: PathLike, mode: number | undefined, callback: NoParamCallback): void;
/**
* Asynchronously tests a user's permissions for the file specified by path.
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
*/
export function access(path: PathLike, callback: NoParamCallback): void;
export namespace access {
/**
* Asynchronously tests a user's permissions for the file specified by path.
* @param path A path to a file or directory. If a URL is provided, it must use the `file:` protocol.
* URL support is _experimental_.
*/
function __promisify__(path: PathLike, mode?: number): Promise<void>;
}
/**
* Synchronously tests a user's permissions for the file or directory specified
* by `path`. The `mode` argument is an optional integer that specifies the
* accessibility checks to be performed. `mode` should be either the value`fs.constants.F_OK` or a mask consisting of the bitwise OR of any of`fs.constants.R_OK`, `fs.constants.W_OK`, and
* `fs.constants.X_OK` (e.g.`fs.constants.W_OK | fs.constants.R_OK`). Check `File access constants` for
* possible values of `mode`.
*
* If any of the accessibility checks fail, an `Error` will be thrown. Otherwise,
* the method will return `undefined`.
*
* ```js
* import { accessSync, constants } from 'fs';
*
* try {
* accessSync('etc/passwd', constants.R_OK | constants.W_OK);
* console.log('can read/write');
* } catch (err) {
* console.error('no access!');
* }
* ```
* @since v0.11.15
* @param [mode=fs.constants.F_OK]
*/
export function accessSync(path: PathLike, mode?: number): void;
interface StreamOptions {
flags?: string | undefined;
encoding?: BufferEncoding | undefined;
fd?: number | promises.FileHandle | undefined;
mode?: number | undefined;
autoClose?: boolean | undefined;
/**
* @default false
*/
emitClose?: boolean | undefined;
start?: number | undefined;
highWaterMark?: number | undefined;
}
interface ReadStreamOptions extends StreamOptions {
end?: number | undefined;
}
/**
* Unlike the 16 kb default `highWaterMark` for a `stream.Readable`, the stream
* returned by this method has a default `highWaterMark` of 64 kb.
*
* `options` can include `start` and `end` values to read a range of bytes from
* the file instead of the entire file. Both `start` and `end` are inclusive and
* start counting at 0, allowed values are in the
* \[0, [`Number.MAX_SAFE_INTEGER`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER)\] range. If `fd` is specified and `start` is
* omitted or `undefined`, `fs.createReadStream()` reads sequentially from the
* current file position. The `encoding` can be any one of those accepted by `Buffer`.
*
* If `fd` is specified, `ReadStream` will ignore the `path` argument and will use
* the specified file descriptor. This means that no `'open'` event will be
* emitted. `fd` should be blocking; non-blocking `fd`s should be passed to `net.Socket`.
*
* If `fd` points to a character device that only supports blocking reads
* (such as keyboard or sound card), read operations do not finish until data is
* available. This can prevent the process from exiting and the stream from
* closing naturally.
*
* By default, the stream will emit a `'close'` event after it has been
* destroyed. Set the `emitClose` option to `false` to change this behavior.
*
* By providing the `fs` option, it is possible to override the corresponding `fs`implementations for `open`, `read`, and `close`. When providing the `fs` option,
* an override for `read` is required. If no `fd` is provided, an override for`open` is also required. If `autoClose` is `true`, an override for `close` is
* also required.
*
* ```js
* import { createReadStream } from 'fs';
*
* // Create a stream from some character device.
* const stream = createReadStream('/dev/input/event0');
* setTimeout(() => {
* stream.close(); // This may not close the stream.
* // Artificially marking end-of-stream, as if the underlying resource had
* // indicated end-of-file by itself, allows the stream to close.
* // This does not cancel pending read operations, and if there is such an
* // operation, the process may still not be able to exit successfully
* // until it finishes.
* stream.push(null);
* stream.read(0);
* }, 100);
* ```
*
* If `autoClose` is false, then the file descriptor won't be closed, even if
* there's an error. It is the application's responsibility to close it and make
* sure there's no file descriptor leak. If `autoClose` is set to true (default
* behavior), on `'error'` or `'end'` the file descriptor will be closed
* automatically.
*
* `mode` sets the file mode (permission and sticky bits), but only if the
* file was created.
*
* An example to read the last 10 bytes of a file which is 100 bytes long:
*
* ```js
* import { createReadStream } from 'fs';
*
* createReadStream('sample.txt', { start: 90, end: 99 });
* ```
*
* If `options` is a string, then it specifies the encoding.
* @since v0.1.31
*/
export function createReadStream(path: PathLike, options?: BufferEncoding | ReadStreamOptions): ReadStream;
/**
* `options` may also include a `start` option to allow writing data at some
* position past the beginning of the file, allowed values are in the
* \[0, [`Number.MAX_SAFE_INTEGER`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER)\] range. Modifying a file rather than
* replacing it may require the `flags` option to be set to `r+` rather than the
* default `w`. The `encoding` can be any one of those accepted by `Buffer`.
*
* If `autoClose` is set to true (default behavior) on `'error'` or `'finish'`the file descriptor will be closed automatically. If `autoClose` is false,
* then the file descriptor won't be closed, even if there's an error.
* It is the application's responsibility to close it and make sure there's no
* file descriptor leak.
*
* By default, the stream will emit a `'close'` event after it has been
* destroyed. Set the `emitClose` option to `false` to change this behavior.
*
* By providing the `fs` option it is possible to override the corresponding `fs`implementations for `open`, `write`, `writev` and `close`. Overriding `write()`without `writev()` can reduce
* performance as some optimizations (`_writev()`)
* will be disabled. When providing the `fs` option, overrides for at least one of`write` and `writev` are required. If no `fd` option is supplied, an override
* for `open` is also required. If `autoClose` is `true`, an override for `close`is also required.
*
* Like `fs.ReadStream`, if `fd` is specified, `fs.WriteStream` will ignore the`path` argument and will use the specified file descriptor. This means that no`'open'` event will be
* emitted. `fd` should be blocking; non-blocking `fd`s
* should be passed to `net.Socket`.
*
* If `options` is a string, then it specifies the encoding.
* @since v0.1.31
*/
export function createWriteStream(path: PathLike, options?: BufferEncoding | StreamOptions): WriteStream;
/**
* Forces all currently queued I/O operations associated with the file to the
* operating system's synchronized I/O completion state. Refer to the POSIX [`fdatasync(2)`](http://man7.org/linux/man-pages/man2/fdatasync.2.html) documentation for details. No arguments other
* than a possible
* exception are given to the completion callback.
* @since v0.1.96
*/
export function fdatasync(fd: number, callback: NoParamCallback): void;
export namespace fdatasync {
/**
* Asynchronous fdatasync(2) - synchronize a file's in-core state with storage device.
* @param fd A file descriptor.
*/
function __promisify__(fd: number): Promise<void>;
}
/**
* Forces all currently queued I/O operations associated with the file to the
* operating system's synchronized I/O completion state. Refer to the POSIX [`fdatasync(2)`](http://man7.org/linux/man-pages/man2/fdatasync.2.html) documentation for details. Returns `undefined`.
* @since v0.1.96
*/
export function fdatasyncSync(fd: number): void;
/**
* Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
* already exists. No arguments other than a possible exception are given to the
* callback function. Node.js makes no guarantees about the atomicity of the copy
* operation. If an error occurs after the destination file has been opened for
* writing, Node.js will attempt to remove the destination.
*
* `mode` is an optional integer that specifies the behavior
* of the copy operation. It is possible to create a mask consisting of the bitwise
* OR of two or more values (e.g.`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
*
* * `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
* exists.
* * `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
* copy-on-write reflink. If the platform does not support copy-on-write, then a
* fallback copy mechanism is used.
* * `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
* create a copy-on-write reflink. If the platform does not support
* copy-on-write, then the operation will fail.
*
* ```js
* import { copyFile, constants } from 'fs';
*
* function callback(err) {
* if (err) throw err;
* console.log('source.txt was copied to destination.txt');
* }
*
* // destination.txt will be created or overwritten by default.
* copyFile('source.txt', 'destination.txt', callback);
*
* // By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
* copyFile('source.txt', 'destination.txt', constants.COPYFILE_EXCL, callback);
* ```
* @since v8.5.0
* @param src source filename to copy
* @param dest destination filename of the copy operation
* @param [mode=0] modifiers for copy operation.
*/
export function copyFile(src: PathLike, dest: PathLike, callback: NoParamCallback): void;
export function copyFile(src: PathLike, dest: PathLike, mode: number, callback: NoParamCallback): void;
export namespace copyFile {
function __promisify__(src: PathLike, dst: PathLike, mode?: number): Promise<void>;
}
/**
* Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
* already exists. Returns `undefined`. Node.js makes no guarantees about the
* atomicity of the copy operation. If an error occurs after the destination file
* has been opened for writing, Node.js will attempt to remove the destination.
*
* `mode` is an optional integer that specifies the behavior
* of the copy operation. It is possible to create a mask consisting of the bitwise
* OR of two or more values (e.g.`fs.constants.COPYFILE_EXCL | fs.constants.COPYFILE_FICLONE`).
*
* * `fs.constants.COPYFILE_EXCL`: The copy operation will fail if `dest` already
* exists.
* * `fs.constants.COPYFILE_FICLONE`: The copy operation will attempt to create a
* copy-on-write reflink. If the platform does not support copy-on-write, then a
* fallback copy mechanism is used.
* * `fs.constants.COPYFILE_FICLONE_FORCE`: The copy operation will attempt to
* create a copy-on-write reflink. If the platform does not support
* copy-on-write, then the operation will fail.
*
* ```js
* import { copyFileSync, constants } from 'fs';
*
* // destination.txt will be created or overwritten by default.
* copyFileSync('source.txt', 'destination.txt');
* console.log('source.txt was copied to destination.txt');
*
* // By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
* copyFileSync('source.txt', 'destination.txt', constants.COPYFILE_EXCL);
* ```
* @since v8.5.0
* @param src source filename to copy
* @param dest destination filename of the copy operation
* @param [mode=0] modifiers for copy operation.
*/
export function copyFileSync(src: PathLike, dest: PathLike, mode?: number): void;
/**
* Write an array of `ArrayBufferView`s to the file specified by `fd` using`writev()`.
*
* `position` is the offset from the beginning of the file where this data
* should be written. If `typeof position !== 'number'`, the data will be written
* at the current position.
*
* The callback will be given three arguments: `err`, `bytesWritten`, and`buffers`. `bytesWritten` is how many bytes were written from `buffers`.
*
* If this method is `util.promisify()` ed, it returns a promise for an`Object` with `bytesWritten` and `buffers` properties.
*
* It is unsafe to use `fs.writev()` multiple times on the same file without
* waiting for the callback. For this scenario, use {@link createWriteStream}.
*
* On Linux, positional writes don't work when the file is opened in append mode.
* The kernel ignores the position argument and always appends the data to
* the end of the file.
* @since v12.9.0
*/
export function writev(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, cb: (err: NodeJS.ErrnoException | null, bytesWritten: number, buffers: NodeJS.ArrayBufferView[]) => void): void;
export function writev(
fd: number,
buffers: ReadonlyArray<NodeJS.ArrayBufferView>,
position: number,
cb: (err: NodeJS.ErrnoException | null, bytesWritten: number, buffers: NodeJS.ArrayBufferView[]) => void
): void;
export interface WriteVResult {
bytesWritten: number;
buffers: NodeJS.ArrayBufferView[];
}
export namespace writev {
function __promisify__(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): Promise<WriteVResult>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link writev}.
* @since v12.9.0
* @return The number of bytes written.
*/
export function writevSync(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): number;
/**
* Read from a file specified by `fd` and write to an array of `ArrayBufferView`s
* using `readv()`.
*
* `position` is the offset from the beginning of the file from where data
* should be read. If `typeof position !== 'number'`, the data will be read
* from the current position.
*
* The callback will be given three arguments: `err`, `bytesRead`, and`buffers`. `bytesRead` is how many bytes were read from the file.
*
* If this method is invoked as its `util.promisify()` ed version, it returns
* a promise for an `Object` with `bytesRead` and `buffers` properties.
* @since v13.13.0, v12.17.0
*/
export function readv(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, cb: (err: NodeJS.ErrnoException | null, bytesRead: number, buffers: NodeJS.ArrayBufferView[]) => void): void;
export function readv(
fd: number,
buffers: ReadonlyArray<NodeJS.ArrayBufferView>,
position: number,
cb: (err: NodeJS.ErrnoException | null, bytesRead: number, buffers: NodeJS.ArrayBufferView[]) => void
): void;
export interface ReadVResult {
bytesRead: number;
buffers: NodeJS.ArrayBufferView[];
}
export namespace readv {
function __promisify__(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): Promise<ReadVResult>;
}
/**
* For detailed information, see the documentation of the asynchronous version of
* this API: {@link readv}.
* @since v13.13.0, v12.17.0
* @return The number of bytes read.
*/
export function readvSync(fd: number, buffers: ReadonlyArray<NodeJS.ArrayBufferView>, position?: number): number;
export interface OpenDirOptions {
encoding?: BufferEncoding | undefined;
/**
* Number of directory entries that are buffered
* internally when reading from the directory. Higher values lead to better
* performance but higher memory usage.
* @default 32
*/
bufferSize?: number | undefined;
}
/**
* Synchronously open a directory. See [`opendir(3)`](http://man7.org/linux/man-pages/man3/opendir.3.html).
*
* Creates an `fs.Dir`, which contains all further functions for reading from
* and cleaning up the directory.
*
* The `encoding` option sets the encoding for the `path` while opening the
* directory and subsequent read operations.
* @since v12.12.0
*/
export function opendirSync(path: PathLike, options?: OpenDirOptions): Dir;
/**
* Asynchronously open a directory. See the POSIX [`opendir(3)`](http://man7.org/linux/man-pages/man3/opendir.3.html) documentation for
* more details.
*
* Creates an `fs.Dir`, which contains all further functions for reading from
* and cleaning up the directory.
*
* The `encoding` option sets the encoding for the `path` while opening the
* directory and subsequent read operations.
* @since v12.12.0
*/
export function opendir(path: PathLike, cb: (err: NodeJS.ErrnoException | null, dir: Dir) => void): void;
export function opendir(path: PathLike, options: OpenDirOptions, cb: (err: NodeJS.ErrnoException | null, dir: Dir) => void): void;
export namespace opendir {
function __promisify__(path: PathLike, options?: OpenDirOptions): Promise<Dir>;
}
export interface BigIntStats extends StatsBase<bigint> {
atimeNs: bigint;
mtimeNs: bigint;
ctimeNs: bigint;
birthtimeNs: bigint;
}
export interface BigIntOptions {
bigint: true;
}
export interface StatOptions {
bigint?: boolean | undefined;
}
export interface StatSyncOptions extends StatOptions {
throwIfNoEntry?: boolean | undefined;
}
interface CopyOptionsBase {
/**
* Dereference symlinks
* @default false
*/
dereference?: boolean;
/**
* When `force` is `false`, and the destination
* exists, throw an error.
* @default false
*/
errorOnExist?: boolean;
/**
* Overwrite existing file or directory. _The copy
* operation will ignore errors if you set this to false and the destination
* exists. Use the `errorOnExist` option to change this behavior.
* @default true
*/
force?: boolean;
/**
* When `true` timestamps from `src` will
* be preserved.
* @default false
*/
preserveTimestamps?: boolean;
/**
* Copy directories recursively.
* @default false
*/
recursive?: boolean;
/**
* When true, path resolution for symlinks will be skipped
* @default false
*/
verbatimSymlinks?: boolean;
}
export interface CopyOptions extends CopyOptionsBase {
/**
* Function to filter copied files/directories. Return
* `true` to copy the item, `false` to ignore it.
*/
filter?(source: string, destination: string): boolean | Promise<boolean>;
}
export interface CopySyncOptions extends CopyOptionsBase {
/**
* Function to filter copied files/directories. Return
* `true` to copy the item, `false` to ignore it.
*/
filter?(source: string, destination: string): boolean;
}
/**
* Asynchronously copies the entire directory structure from `src` to `dest`,
* including subdirectories and files.
*
* When copying a directory to another directory, globs are not supported and
* behavior is similar to `cp dir1/ dir2/`.
* @since v16.7.0
* @experimental
* @param src source path to copy.
* @param dest destination path to copy to.
*/
export function cp(source: string | URL, destination: string | URL, callback: (err: NodeJS.ErrnoException | null) => void): void;
export function cp(source: string | URL, destination: string | URL, opts: CopyOptions, callback: (err: NodeJS.ErrnoException | null) => void): void;
/**
* Synchronously copies the entire directory structure from `src` to `dest`,
* including subdirectories and files.
*
* When copying a directory to another directory, globs are not supported and
* behavior is similar to `cp dir1/ dir2/`.
* @since v16.7.0
* @experimental
* @param src source path to copy.
* @param dest destination path to copy to.
*/
export function cpSync(source: string | URL, destination: string | URL, opts?: CopySyncOptions): void;
}
declare module 'node:fs' {
export * from 'fs';
} | PypiClean |
/ICPOptimize-2.4.tar.gz/ICPOptimize-2.4/ICP/ClosurePool.py | from math import inf
from threading import Thread
from queue import Empty, Queue
def RunAndEnqueue(Q, Fx, slot, args=(), kwargs={}):
rv = Fx(*args, **kwargs)
Q.put((slot, rv))
# Class for running a thread pool in which each worker has state associated with it
# in the form of a closure
class ClosurePool:
def __str__(self):
return '{}({})'.format(type(self).__name__, self.nThrd)
def __repr__(self):
return self.__str__()
# Init the ClosurePool. Expects an iterable of closures that will be used as workers
def __init__(self, WL):
self.WL = list(WL)
self.kArr = [None for _ in self.WL] # Result key
self.nRun = 0
self.nThrd = len(WL)
self.Q = Queue()
self.slot = 0
self.thrd = [None for _ in self.WL]
def Full(self):
return self.nRun >= self.nThrd
def Get(self, default=None, wait=False):
if self.nRun <= 0:
return None, default
try:
self.slot, rv = self.Q.get(wait)
self.thrd[self.slot] = None
self.nRun -= 1
return self.kArr[self.slot], rv
except Empty:
pass
return None, default
def GetAll(self, n=inf, wait=True):
while (self.nRun > 0) and (n > 0):
try:
self.slot, rv = self.Q.get(wait)
self.thrd[self.slot] = None
self.nRun -= 1
yield self.kArr[self.slot], rv
except Empty:
break
n -= 1
# Launch another thread with return value associated with a specified key
def Start(self, key=None, args=(), kwargs={}):
if self.nRun >= self.nThrd:
return False
# Find empty slot
while self.thrd[self.slot] is not None:
self.slot = (self.slot + 1) % self.nThrd
self.thrd[self.slot] = Thread(target=RunAndEnqueue, args=(
self.Q, self.WL[self.slot], self.slot, args, kwargs))
self.thrd[self.slot].start()
self.kArr[self.slot] = key
self.nRun += 1
return True
# Class with the same interface as ClosurePool but for single threaded behavior
class DummyPool:
def __str__(self):
return '{}()'.format(type(self).__name__)
def __repr__(self):
return self.__str__()
# Expects a list of exactly 1 closure
def __init__(self, WL):
self.WL = WL[0]
self.res = None
self.key = None
def Full(self):
return False
def Get(self, default=None, wait=False):
k, r = self.key, self.res
self.key = self.res = None
return k, r
def GetAll(self, n=inf, wait=True):
if self.key is not None:
k, r = self.key, self.res
self.key = self.res = None
yield k, r # Provide same unpacking behavior
def Start(self, key=None, args=(), kwargs={}):
self.res = self.WL(*args, **kwargs) # Run on same thread
self.key = key
return True | PypiClean |
/BAxUS-0.0.8.tar.gz/BAxUS-0.0.8/baxus/util/projections.py | from abc import ABC
from copy import deepcopy
from logging import warning, info, debug
from typing import Optional, Dict, List
import numpy as np
from numpy.random import RandomState
from baxus.util.behaviors.embedding_configuration import EmbeddingType
from baxus.util.exceptions import OutOfBoundsException, UnknownBehaviorError
class ProjectionModel(ABC):
def project_up(self, Y: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def project_down(self, X: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class IdentityProjector(ProjectionModel):
def __init__(self, lb, ub):
self.lb = lb
self.ub = ub
def project_up(self, Y: np.ndarray) -> np.ndarray:
return Y
def project_down(self, X: np.ndarray) -> np.ndarray:
return X
class AxUS(ProjectionModel):
"""
AxUS embedding. Also support HeSBO embedding by choosing RANDOM bin sizing
"""
def __init__(
self,
input_dim: int,
target_dim: int,
seed: Optional[int] = None,
bin_sizing=EmbeddingType.BAXUS,
):
self.seed = seed
self.target_dim: int = target_dim
self.input_dim: int = input_dim
self.bin_sizing = bin_sizing
self._S = None
self._random_state = RandomState(self.seed)
self._reset()
def _target_to_input_dim(
self, input_to_target_dim_h: Dict[int, int]
) -> Dict[int, List[int]]:
"""
Revert the input to target dim mapping
:param input_to_target_dim_h:
:return: the target to input dim mapping
"""
input_to_target_dim_h = deepcopy(input_to_target_dim_h)
target_to_input_dim: Dict[int, List[int]] = {
i: [] for i in range(self.target_dim)
}
for k, v in input_to_target_dim_h.items():
target_to_input_dim[v].append(k)
return target_to_input_dim
def _input_to_target_dim(
self, target_to_input_dim: Dict[int, List[int]]
) -> Dict[int, int]:
"""
Revert the target to input dim mapping
:param target_to_input_dim:
:return: the input to target dim mapping
"""
target_to_input_dim = deepcopy(target_to_input_dim)
input_to_target_dim = {
i: [k for k, v in target_to_input_dim.items() if i in v][0]
for i in range(self.input_dim)
}
return input_to_target_dim
def _reset(self):
"""
Reset the AxUS embedding. Sample a new AxUS embedding.
:return:
"""
if self.target_dim > self.input_dim:
warning(
"HeSBO: Got a target dim larger than the input dim. Setting target dim to input dim."
)
self.target_dim = self.input_dim
if self.target_dim == self.input_dim:
info("HeSBO: Target dim = input dim. Using identity mapping.")
_input_to_target_dim_h: Dict[int, int] = {
i: i for i in range(self.input_dim)
}
else:
if self.bin_sizing == EmbeddingType.BAXUS:
debug("Creating uniform HeSBO embedding.")
input_dim_permutation = np.random.permutation(
list(range(self.input_dim))
)
input_dim_bins = np.array_split(input_dim_permutation, self.target_dim)
_target_to_input_dim_h: Dict[int, List[int]] = {
input_dim_nr: input_dim_bin
for input_dim_nr, input_dim_bin in enumerate(input_dim_bins)
}
_input_to_target_dim_h = self._input_to_target_dim(
_target_to_input_dim_h
)
elif self.bin_sizing == EmbeddingType.HESBO:
debug("Creating random HeSBO embedding.")
_input_to_target_dim_h: Dict[int, int] = {
i: self._random_state.choice(list(range(self.target_dim)))
for i in range(self.input_dim)
}
else:
raise UnknownBehaviorError(
f"No such HeSBO bin-sizing behavior: {self.bin_sizing}"
)
self.target_to_input_dim: Dict[int, List[int]] = self._target_to_input_dim(
_input_to_target_dim_h
)
self.input_dim_to_sign_sigma: Dict[int, int] = {
i: int(self._random_state.choice([1, -1])) for i in range(self.input_dim)
}
self.S_prime: np.ndarray = self._compute_proj_mtrx(
target_dim=self.target_dim,
input_dim=self.input_dim,
input_dim_to_sign_sigma=self.input_dim_to_sign_sigma,
target_to_input_dim=self.target_to_input_dim,
)
self._S = None
@staticmethod
def _compute_proj_mtrx(
target_dim: int,
input_dim: int,
input_dim_to_sign_sigma: Dict[int, int],
target_to_input_dim: Dict[int, List[int]],
) -> np.ndarray:
"""
Compute the projection matrix S', mapping from ambient to the target space.
:param target_dim:
:param input_dim:
:param input_dim_to_sign_sigma:
:param target_to_input_dim:
:return:
"""
rows = []
for i in range(target_dim):
rows.append(
[
input_dim_to_sign_sigma[j] if j in target_to_input_dim[i] else 0
for j in range(input_dim)
]
)
return np.array(rows, dtype=np.float32).T
@property
def S(self) -> np.ndarray:
return self.S_prime.T
@property
def input_to_target_dim(self) -> Dict[int, int]:
d = {}
for k, v in self.target_to_input_dim.items():
for x in v:
d[x] = k
return d
def project_down(self, X: np.ndarray) -> np.ndarray:
"""
Project one or multiple points from the ambient into the target space.
:param X: Points in the ambient space. Shape: [num_points, input_dim]
:return: numpy array, shape: [num_points, target_dim]
"""
X = np.array(X)
assert len(X.shape) <= 2
assert X.shape[0] == self.input_dim
if not -1 <= X.min() <= X.max() <= 1:
raise OutOfBoundsException()
return self.S @ X
def project_up(self, Y: np.ndarray) -> np.ndarray:
"""
Project one or multiple points from the target into the ambient space.
:param X: Points in the target space. Shape: [num_points, target_dim]
:return: numpy array, shape: [num_points, input_dim]
"""
Y = np.array(Y)
assert len(Y.shape) <= 2
assert Y.shape[0] == self.target_dim
if not -1 <= Y.min() <= Y.max() <= 1:
raise OutOfBoundsException()
return self.S_prime @ Y
def contributing_dimensions(self, target_dimension: int):
"""
Returns the dimensions in the ambient space that contribute to a target dimension.
:param target_dimension: the target dimension for which to return the contributing input dimensions
:return: the input dimensions contributing to the target dimension
"""
return self.target_to_input_dim[target_dimension]
def increase_target_dimensionality(self, dims_and_bins: Dict[int, int]):
"""
Split up one target dimension. The contributing input dimensions will be randomly assigned to two bins.
One bin is the current target dimension, the other bin will be assigned to a new target dimension.
Therefore, the target dimensionality will be increased by one. The projection matrix will change by this!
The affected target dimension and the new dimension will only have half the number of contributing input
dimensions than the target dimension prior to the splitting.
:param splitting_target_dim: the target dimension to split
:return: None
"""
dims = list(dims_and_bins.keys())
dims_and_contributing_input_dims = {
i: deepcopy(self.contributing_dimensions(i)) for i in dims
}
# contributing_input_dims: np.ndarray = deepcopy(
# self.contributing_dimensions(splitting_target_dim)
# )
for d in dims:
assert len(dims_and_contributing_input_dims[d]) >= dims_and_bins[d], (
f"Only {len(dims_and_contributing_input_dims[d])} contributing input dimensions but want to split "
f"into {dims_and_bins[d]} new bins"
)
for splitting_target_dim, n_new_bins in dims_and_bins.items():
self.target_dim += n_new_bins - 1 # one bin is in the current dim
contributing_input_dims = dims_and_contributing_input_dims[
splitting_target_dim
]
bins: List[np.ndarray] = []
for b in range(n_new_bins):
if b < n_new_bins - 1:
bin: np.ndarray = self._random_state.choice(
contributing_input_dims,
size=len(self.contributing_dimensions(splitting_target_dim))
// n_new_bins,
replace=False,
)
contributing_input_dims = np.setdiff1d(contributing_input_dims, bin)
else:
bin: np.ndarray = contributing_input_dims
bins.append(bin)
self.target_to_input_dim[splitting_target_dim] = bins[0].tolist()
for i, b in enumerate(bins[1:]):
self.target_to_input_dim[self.target_dim - i - 1] = b.tolist()
# re-compute S'
S_prime_new = self._compute_proj_mtrx(
target_dim=self.target_dim,
input_dim=self.input_dim,
input_dim_to_sign_sigma=self.input_dim_to_sign_sigma,
target_to_input_dim=self.target_to_input_dim,
)
self.S_prime = S_prime_new
def merge_dims(self, d1: int, d2: int):
self.target_dim -= 1
contrib_b1 = self.contributing_dimensions(d1)
contrib_b2 = self.contributing_dimensions(d2)
all_contrib = contrib_b1 + contrib_b2
tds = self.target_to_input_dim.keys()
dims_that_stay = [d for d in tds if d < min(d1, d2)]
dims_minus_1 = [d for d in tds if min(d1, d2) < d < max(d1, d2)]
dims_minus_2 = [d for d in tds if d > max(d1, d2)]
new_target_to_input_dim = (
{d: self.target_to_input_dim[d] for d in dims_that_stay}
| {d - 1: self.target_to_input_dim[d] for d in dims_minus_1}
| {d - 2: self.target_to_input_dim[d] for d in dims_minus_2}
)
max_td = max(new_target_to_input_dim.keys())
new_target_to_input_dim[max_td + 1] = all_contrib
self.target_to_input_dim = new_target_to_input_dim
S_prime_new = self._compute_proj_mtrx(
target_dim=self.target_dim,
input_dim=self.input_dim,
input_dim_to_sign_sigma=self.input_dim_to_sign_sigma,
target_to_input_dim=self.target_to_input_dim,
)
self.S_prime = S_prime_new | PypiClean |
/Lowdown-0.2.1.tar.gz/Lowdown-0.2.1/doc/release.rst | ..
:copyright: Copyright (c) 2014 ftrack
########
Releases
########
.. release:: 0.2.1
:date: 2021-04-21
.. change:: change
Limit docutils to < 0.17 to avoid breaking with sphinx_rtd_theme.
.. release:: 0.2.0
:date: 2020-06-10
.. change:: new
Provide support for python3.
.. release:: 0.1.3
:date: 2020-02-12
.. change:: fixed
Copy stylesheet breaks on python3.
.. release:: 0.1.2
:date: 2017-04-28
.. change:: fixed
Lists are not properly when included in a change directive.
.. release:: 0.1.1
:date: 2015-05-30
.. change:: change
:tags: documentation
Update documentation to reflect that package is now installable from
PyPi.
.. release:: 0.1.0
:date: 2015-01-06
.. change:: new
Initial release.
Includes support for specifying releases with specific changes listed.
Each change supports tags as well as a main category and changeset link.
| PypiClean |
/NativDebugging-35.tar.gz/NativDebugging-35/src/Patterns/PE.py | from .Finder import *
# Resources:
# winnt.h
# corkami.com
# Wikipedia
# Microsoft docs http://download.microsoft.com/download/9/c/5/9c5b2167-8017-4bae-9fde-d599bac8184a/pecoff_v8.docx
# http://www.csn.ul.ie/~caolan/publink/winresdump/winresdump/doc/pefile.html
VALID_MACHINE_TYPES = {
0x014c : "I386",
0x0162 : "R3000",
0x0166 : "R4000",
0x0168 : "R10000",
0x0169 : "WCEMIPSV2",
0x0184 : "ALPHA",
0x01a2 : "SH3",
0x01a3 : "SH3DSP",
0x01a4 : "SH3E",
0x01a6 : "SH4",
0x01a8 : "SH5",
0x01c0 : "ARM",
0x01c2 : "THUMB",
0x01c4 : "ARMNT",
0x01d3 : "AM33",
0x01F0 : "POWERPC",
0x01f1 : "POWERPCFP",
0x0200 : "IA64",
0x0266 : "MIPS16",
0x0284 : "ALPHA64",
0x0366 : "MIPSFPU",
0x0466 : "MIPSFPU16",
0x0520 : "TRICORE",
0x0CEF : "CEF",
0x0EBC : "EBC",
0x8664 : "AMD64",
0x9041 : "M32R",
0xC0EE : "CEE" }
VALID_SECTION_ALGINMENTS = {
0x00100000: "1BYTES",
0x00200000: "2BYTES",
0x00300000: "4BYTES",
0x00400000: "8BYTES",
0x00500000: "16BYTES",
0x00600000: "32BYTES",
0x00700000: "64BYTES" }
PE32_MAGIC = 0x010b
PE32P_MAGIC = 0x020b
VALID_PE_FORMATS = {
PE32_MAGIC: "PE32",
PE32P_MAGIC: "PE32P",
0x0107: "ROM" }
WINDOWS_SUBSYSTEMS = {
0 : "IMAGE_SUBSYSTEM_UNKNOWN",
1 : "IMAGE_SUBSYSTEM_NATIVE",
2 : "IMAGE_SUBSYSTEM_WINDOWS_GUI",
3 : "IMAGE_SUBSYSTEM_WINDOWS_CUI",
7 : "IMAGE_SUBSYSTEM_POSIX_CUI",
9 : "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI",
10 : "IMAGE_SUBSYSTEM_EFI_APPLICATION",
11 : "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
12 : "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER",
13 : "IMAGE_SUBSYSTEM_EFI_ROM",
14 : "IMAGE_SUBSYSTEM_XBOX" }
DLL_CHARACTERISTICS_FALGS = {
0x0020 : "HIGH_ENTROPY_VA",
0x0040 : "DYNAMIC_BASE",
0x0080 : "FORCE_INTEGRITY",
0x0100 : "NX_COMPAT",
0x0200 : "NO_ISOLATION",
0x0400 : "NO_SEH",
0x0800 : "NO_BIND",
0x1000 : "APPCONTAINER",
0x2000 : "WDM_DRIVER",
0x4000 : "GUARD_CF",
0x8000 : "TERMINAL_SERVER_AWARE" }
ImageFileHeader = [
SHAPE("Machine", 0, WORD(list(VALID_MACHINE_TYPES.keys()))),
SHAPE("NumberOfSections", 0, WORD()),
SHAPE("TimeDateStamp", 0, CTIME()),
SHAPE("PointerToSymTable", 0, DWORD()),
SHAPE("NumberOfSymbols", 0, DWORD()),
SHAPE("OptionalHeaderSize", 0, WORD()),
SHAPE("Characteristics", 0, WORD()) ]
ImageSectionHeader = [
SHAPE("Name", 0, BUFFER(size=8)),
SHAPE("VirtualSize", 0, DWORD()),
SHAPE("VirtualAddress", 0, DWORD()),
SHAPE("RawDataSize", 0, DWORD()),
SHAPE("PointerToRawData", 0, DWORD()),
SHAPE("PointerToRelocations", 0, DWORD()),
SHAPE("PointerToLinenumbers", 0, DWORD()),
SHAPE("NumberOfRelocations", 0, WORD()),
SHAPE("NumberOfLinenumbers", 0, WORD()),
SHAPE("Characteristics", 0, DWORD()) ]
ImageDataDirectory = [
SHAPE("VirtualAddress", 0, DWORD()),
SHAPE("Size", 0, DWORD()) ]
ImageExportDirectory = [
SHAPE("Characteristics", 0, DWORD()),
SHAPE("TimeDateStamp", 0, CTIME()),
SHAPE("MajorVersion", 0, WORD()),
SHAPE("MinorVersion", 0, WORD()),
SHAPE("Name", 0, DWORD()),
SHAPE("Base", 0, DWORD()),
SHAPE("NumberOfFunctions", 0, DWORD()),
SHAPE("NumberOfNames", 0, DWORD()),
SHAPE("FunctionsAddress", 0, DWORD()),
SHAPE("NamesAddress", 0, DWORD()),
SHAPE("NameOrdinalsAddress", 0, DWORD())
]
ImageImportDescriptor = [
SHAPE("Characteristics", 0, DWORD()),
SHAPE("TimeDateStamp", 0, CTIME()),
SHAPE("ForwarderChain", 0, DWORD()),
SHAPE("Name", 0, DWORD()),
SHAPE("FirstThunk", 0, DWORD())
]
ImageDebugDirectory = [
SHAPE("Characteristics", 0, DWORD()),
SHAPE("TimeDateStamp", 0, CTIME()),
SHAPE("MajorVersion", 0, WORD()),
SHAPE("MinorVersion", 0, WORD()),
SHAPE("Type", 0, DWORD()),
SHAPE("DataSize", 0, DWORD()),
SHAPE("AddrOfRawData", 0, DWORD()),
SHAPE("PointerToRawData", 0, DWORD()) ]
ResourceDirectoryString = [
SHAPE("Length", 0, WORD()),
SHAPE("Data", 0, STRING(size="Length", isUnicode=True)) ]
ResourceDataEntry = [
SHAPE("DataRVA", 0, DWORD()),
SHAPE("DataEntrySize", 0, DWORD()),
SHAPE("Codepage", 0, DWORD()),
SHAPE("Reserved", 0, DWORD(0)) ]
ResourceDirectoryNameEntry = [
SHAPE("NameRVA", 0, DWORD()),
SHAPE("DataEntryRVA", 0, DWORD()),
ASSIGN("isDataEntry", lambda pf, ctx: 0 == (ctx.DataEntryRVA & 0x80000000)),
ASSIGN("subdirectoryRVA", lambda pf, ctx: ctx.DataEntryRVA & 0x7fffffff) ]
ResourceDirectoryIdEntry = [
SHAPE("Id", 0, DWORD()),
SHAPE("DataEntryRVA", 0, DWORD()),
ASSIGN("isDataEntry", lambda pf, ctx: 0 == (ctx.DataEntryRVA & 0x80000000)),
ASSIGN("subdirectoryRVA", lambda pf, ctx: ctx.DataEntryRVA & 0x7fffffff) ]
ImageResourceDirectory = [
SHAPE("Characteristics", 0, DWORD()),
SHAPE("TimeDateStamp", 0, CTIME()),
SHAPE("MajorVersion", 0, WORD()),
SHAPE("MinorVersion", 0, WORD()),
SHAPE("NumOfNamedEntries", 0, WORD()),
SHAPE("NumOfIdEntries", 0, WORD()),
SHAPE("NamedEntries", 0,
ARRAY("NumOfNamedEntries", STRUCT, (ResourceDirectoryNameEntry,))),
SHAPE("IdEntries", 0,
ARRAY("NumOfIdEntries", STRUCT, (ResourceDirectoryIdEntry,)))]
RESOURCE_TYPES = {
1 : "CURSOR",
2 : "BITMAP",
3 : "ICON",
4 : "MENU",
5 : "DIALOG",
6 : "STRING",
7 : "FONTDIR",
8 : "FONT",
9 : "ACCELERATOR",
10 : "RCDATA",
11 : "MESSAGETABLE",
16 : "VERSION" }
ResourceVersionInfo = [
SHAPE("VersionLength", 0, WORD()),
SHAPE("ValueLength", 0, WORD()),
SHAPE("dataType", 0, WORD([0,1])),
SHAPE("VsVersionInfoStr", 0, STRING(fixedValue="VS_VERSION_INFO", isUnicode=True)),
SHAPE("Algin", 0, DWORD(0)),
SHAPE("Vs_FixedFileInfo", 0, DWORD(0xfeef04bd)) ]
def getAllResData(pe, offset=0, isDir=True):
resAddr = None
for item in pe.Sections:
item = item.Item
if item.Name.startswith(b'.rsrc\x00'):
resAddr = item.PointerToRawData
if None == resAddr:
raise Exception("Can't find resources data")
if isDir:
res = next(p.search(ImageResourceDirectory, resAddr+offset))
else:
res = next(p.search(ResourceDataEntry, resAddr+offset))
print(res)
addr = res.DataRVA - pe.OptionalHeader.ResDir.VirtualAddress + resAddr
data = m.readMemory(res.DataRVA, res.DataEntrySize)
print(DATA(data))
print('+' * 20)
return
print(res)
print('-' * 20)
for i, item in enumerate(res.NamedEntries):
print(i,'.')
item = item.Item
if item.isDataEntry:
getAllResData(resAddr, pe, item.subdirectoryRVA, False)
else:
getAllResData(resAddr, pe, item.subdirectoryRVA, True)
for i, item in enumerate(res.IdEntries):
print('%d, .' % i)
item = item.Item
if item.isDataEntry:
getAllResData(resAddr, pe, item.subdirectoryRVA, False)
else:
getAllResData(resAddr, pe, item.subdirectoryRVA, True)
ImageOptionalHeader = [
SHAPE("Magic", 0, WORD(list(VALID_PE_FORMATS.keys()))),
SHAPE("MajorLinkerVersion", 0, BYTE()),
SHAPE("MinorLinkerVersion", 0, BYTE()),
SHAPE("CodeSize", 0, DWORD()),
SHAPE("InitializedDataSize", 0, DWORD()),
SHAPE("UninitializedDataSize", 0, DWORD()),
SHAPE("EntryPointAddress", 0, DWORD()),
SHAPE("BaseOfCode", 0, DWORD()),
SHAPE("BaseOfDataImageBase", 0, SWITCH( "Magic",
{
PE32_MAGIC : [
SHAPE("BaseOfData", 0, DWORD()),
SHAPE("ImageBase", 0, DWORD()) ],
PE32P_MAGIC : [
SHAPE("ImageBase", 0, QWORD()) ],
"default" : [
SHAPE("ImageBase", 0, QWORD()) ] }) ),
SHAPE("SectionAlignment", 0, DWORD()), #list(VALID_SECTION_ALGINMENTS.keys()))),
SHAPE("FileAlignment", 0, DWORD()),
SHAPE("MajorOSVersion", 0, WORD()),
SHAPE("MinorOSVersion", 0, WORD()),
SHAPE("MajorImageVer", 0, WORD()),
SHAPE("MinorImageVer", 0, WORD()),
SHAPE("MajorSubsystemVer", 0, WORD()),
SHAPE("MinorSubsystemVer", 0, WORD()),
SHAPE("Win32VersionValue", 0, DWORD()),
SHAPE("ImageSize", 0, DWORD()),
SHAPE("HeadersSize", 0, DWORD()),
SHAPE("CheckSum", 0, DWORD()),
SHAPE("Subsystem", 0, WORD(list(WINDOWS_SUBSYSTEMS.keys()))),
SHAPE("DllCharacteristics", 0, FLAGS(DLL_CHARACTERISTICS_FALGS, size=2)),
SHAPE("Stack", 0, SWITCH( lambda ctx: ctx.Magic,
{
PE32_MAGIC : [
SHAPE("StackReserveSize", 0, DWORD()),
SHAPE("StackCommitSize", 0, DWORD()),
SHAPE("HeapReserveSize", 0, DWORD()),
SHAPE("HeapCommitSize", 0, DWORD()) ],
PE32P_MAGIC : [
SHAPE("StackReserveSize", 0, QWORD()),
SHAPE("StackCommitSize", 0, QWORD()),
SHAPE("HeapReserveSize", 0, QWORD()),
SHAPE("HeapCommitSize", 0, QWORD()) ]
}) ),
SHAPE("LoaderFlags", 0, DWORD(0)),
SHAPE("NumOfRvaAndSizes", 0, DWORD()),
SHAPE("ExportDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("ImportDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("ResDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("ExceptionDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("SecurityDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("BaserelocDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("DebugDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("ArchDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("GlobalsDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("TLSDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("LoadConfDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("BoundImportDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("IATDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("DelayImportDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("CLRRuntimeDir", 0, STRUCT(ImageDataDirectory)),
SHAPE("ReservedDir", 0, STRUCT(ImageDataDirectory)) ]
ImageNtHeaders = [
SHAPE("Signature", 0, STRING(fixedValue=b'PE\x00\x00')),
SHAPE("FileHeader", 0, STRUCT(ImageFileHeader)),
SHAPE("OptionalHeader", 0, STRUCT(ImageOptionalHeader)),
SHAPE("Sections", 0, \
ARRAY(lambda ctx: ctx.FileHeader.NumberOfSections, STRUCT, [ImageSectionHeader])) ]
ImageDosHeader = [
SHAPE("e_magic", 0, STRING(fixedValue=b"MZ")),
SHAPE("e_cblp", 0, WORD()),
SHAPE("e_cp", 0, WORD()),
SHAPE("e_crlc", 0, WORD()),
SHAPE("e_cparhdr", 0, WORD()),
SHAPE("e_minalloc", 0, WORD()),
SHAPE("e_maxalloc", 0, WORD()),
SHAPE("e_ss", 0, WORD()),
SHAPE("e_sp", 0, WORD()),
SHAPE("e_csum", 0, WORD()),
SHAPE("e_ip", 0, WORD()),
SHAPE("e_cs", 0, WORD()),
SHAPE("e_lfarlc", 0, WORD()),
SHAPE("e_ovno", 0, WORD()),
SHAPE("e_res", 0, ARRAY(4, WORD, ())),
SHAPE("e_oemid", 0, WORD()),
SHAPE("e_oeminfo", 0, WORD()),
SHAPE("e_res2", 0, ARRAY(10, WORD, ())),
SHAPE("e_lfanew", 0, DWORD()),
SHAPE("PE", lambda ctx, addr: (addr + ctx.e_lfanew, ctx.e_lfanew), STRUCT(ImageNtHeaders))
]
#def getImports(baseCtx):
# IMPORT_DESCRIPTOR_SIZE = 5 * 4
# importsPat = []
# importsAddr = baseCtx.PE.OptionalHeader.AddressOfImports
# importsSize = baseCtx.PE.OptionalHeader.ImportDir.Size
# numDescriptors = importsSize / IMPORT_DESCRIPTOR_SIZE
# for i in range(numDescriptors):
# importsPat.append(
# SHAPE("Import%06x" % i, 0, STRUCT(ImageImportDescriptor))) | PypiClean |
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/docs/cli.rst | .. _cli:
Command-Line Interface
======================
SYNOPSIS
--------
``cohen`` <options> [--plugin=<BACKEND> [ , <PARAM_NAME> : <PARAM_VALUE> ] ...]
DESCRIPTION
-----------
Cohen is a Python DLNA/UPnP Media Server made to broadcast digital media content over your network.
The core of Cohen provides a (hopefully complete) implementation
of:
* a SSDP server,
* a MSEARCH client,
* server and client for HTTP/SOAP requests, and
* server and client for Event Subscription and Notification (GENA).
OPTIONS
-------
-v, --version Show program's version number and exit
--help Show help message and exit
-d, --daemon Daemonize
-c, --configfile=PATH Path to config file
--noconfig ignore any config file found
-o, --option=OPTION activate option
-l, --logfile=PATH Path to log file.
EXAMPLES
--------
:cohen --plugin=backend\:FSStore,name\:MyCoherence:
Start cohen activating the `FSStore` backend.
:cohen --plugin=backend\:MediaStore,medialocation\:$HOME/Music/,mediadb\:/tmp/media.db:
Start cohen activating the `MediaStore` backend with media
located in `$HOME/Music` and the media metadata store in
`/tmp/media.db`.
AVAILABLE STORES
----------------
BetterLight, AmpacheStore, FlickrStore, MiroStore, ElisaPlayer,
ElisaMediaStore, Gallery2Store, DVBDStore, FSStore, BuzztardPlayer,
BuzztardStore, GStreamerPlayer, SimpleLight, ITVStore, SWR3Store,
TrackerStore, LolcatsStore, BBCStore, MediaStore, AppleTrailerStore,
LastFMStore, AxisCamStore, YouTubeStore, TEDStore, IRadioStore, TwitchStore
FILES
-----
:$HOME/.cohen: default config file
ENVIRONMENT VARIABLES
---------------------
:COHEN_DEBUG=<STORE>:
Supplies debug information pertaining to the named store.
SEE ALSO
--------
Project Homepage https://github.com/unintended/Cohen
| PypiClean |
/IOT3ApiClient-1.0.0.tar.gz/IOT3ApiClient-1.0.0/requests/utils.py | import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from urllib3.util import make_headers
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment, Mapping)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
DEFAULT_PORTS = {'http': 80, 'https': 443}
# Ensure that ', ' is used to preserve previous delimiter behavior.
DEFAULT_ACCEPT_ENCODING = ", ".join(
re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
)
if sys.platform == 'win32':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
if is_py3:
import winreg
else:
import _winreg as winreg
except ImportError:
return False
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
proxyEnable = int(winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0])
# ProxyOverride is almost always a string
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
netrc_file = os.environ.get('NETRC')
if netrc_file is not None:
netrc_locations = (netrc_file,)
else:
netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES)
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in netrc_locations:
try:
loc = os.path.expanduser(f)
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# App Engine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
"""
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
member = '/'.join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, member.split('/')[-1])
if not os.path.exists(extracted_path):
# use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
with atomic_open(extracted_path) as file_handler:
file_handler.write(zip_file.read(member))
return extracted_path
@contextlib.contextmanager
def atomic_open(filename):
"""Write a file to the disk in an atomic fashion"""
replacer = os.rename if sys.version_info[0] == 2 else os.replace
tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
with os.fdopen(tmp_descriptor, 'wb') as tmp_handler:
yield tmp_handler
replacer(tmp_name, filename)
except BaseException:
os.remove(tmp_name)
raise
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
if 'application/json' in content_type:
# Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
return 'utf-8'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
parsed = urlparse(url)
if parsed.hostname is None:
# URLs don't always have hostnames, e.g. file:/// urls.
return True
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(parsed.hostname, proxy_ip):
return True
elif parsed.hostname == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
host_with_port = parsed.hostname
if parsed.port:
host_with_port += ':{}'.format(parsed.port)
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
with set_environ('no_proxy', no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': DEFAULT_ACCEPT_ENCODING,
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.") | PypiClean |
/APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/versioneer.py | from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | PypiClean |
/Git-Track-0.1.tar.gz/Git-Track-0.1/issue_db/issue_db.py | "The central pieces of code..."
from __future__ import with_statement
from git import Repo
from git.exc import InvalidGitRepositoryError
import datetime
import os
import pickle
import time
import subprocess
import sys
ISSUE_FILE = '.issues'
def catch_id_err(func):
"Decorator to catch errors from wrong ids."
def _safe(self, my_id, *args, **kwargs):
"Id-safe version of ..."
try:
func(self, int(my_id), *args, **kwargs)
except KeyError:
print "%s: no such issue!" % my_id
except ValueError: # comes from int(...)
print "No or invalid id given!"
return _safe
def safe_tmp_textfile(func):
'''Make sure that the __issue__.msg file does not exitst (and if so,
move it to a bakcup. Delete it after funciton call.'''
def _safe(self, *args, **kwargs):
"Cleanup the file __issue__.msg before and after call."
if os.path.exists("__issue__.msg"):
os.rename("__issue__.msg", "__issue__.msg~")
func(self, *args, **kwargs)
if os.path.exists("__issue__.msg"):
os.remove("__issue__.msg")
return _safe
class IssueDB(object):
"'Database' managing the issues."
def __init__(self):
try:
Repo('.')
try:
Repo('.').head.commit.hexsha
except ValueError:
print "git repository .git has no head commit!"
sys.exit()
except InvalidGitRepositoryError:
print "No git repository fond in .git!"
sys.exit()
if os.path.exists(ISSUE_FILE):
with open(ISSUE_FILE,'r') as issues:
self.issue_db = pickle.load(issues)
else:
self.issue_db = {}
if self.issue_db:
self.max_id = max(self.issue_db)
else:
self.max_id = 0
def __str__(self):
"Prints all active issues."
return (
"\t".join(("id","Date", "Commit", "Status", "Prio", "Comment")) +
"\n" + "-" * 70 + "\n" +
"\n".join(str(issue) for issue in sorted(self.issue_db.values())[::-1]
if issue.status == 'open'))
def show_all(self):
"Prints also closed issues."
return (
"\t".join(("id","Date", "Commit", "Status", "Prio", "Comment")) +
"\n" + "-" * 70 + "\n" +
"\n".join(str(issue) for issue in self.issue_db.values()))
def __repickle(self):
"Rewrite database."
with open(ISSUE_FILE, 'w') as issues:
pickle.dump(self.issue_db, issues, -1)
@staticmethod
def __get_head_sha():
"Get head commit's sha."
return Repo('.').head.commit.hexsha
@safe_tmp_textfile
def add_issue(self):
"Add an issue."
editor = os.getenv('EDITOR').split() or ['emacs']
subprocess.call(editor + ['__issue__.msg'])
if os.path.exists('__issue__.msg'):
message = open('__issue__.msg','r')
msg = message.read()
sha = self.__get_head_sha()
self.max_id += 1
self.issue_db[self.max_id] = Issue(self.max_id, sha, msg)
self.__repickle()
message.close()
print 'Added issue\n', self.issue_db[self.max_id]
else:
print 'Abort: Empty message!'
@catch_id_err
def set_prio(self, issue_id, prio):
try:
self.issue_db[issue_id].priority = int(prio)
self.__repickle()
except ValueError:
print "Priority must be integer!"
@catch_id_err
def info(self, issue_id):
"Get info on a specific issue."
self.issue_db[issue_id].more_info()
@catch_id_err
def remove(self, issue_id):
"Remove a specific issue."
del self.issue_db[issue_id]
self.__repickle()
@catch_id_err
def close(self, issue_id):
"Close a specific issue."
self.issue_db[issue_id].closedsha = self.__get_head_sha()
self.issue_db[issue_id].status = 'closed'
self.__repickle()
@catch_id_err
def re_add(self, issue_id):
"Reset the sha to the latest commit."
self.issue_db[issue_id].commitsha = self.__get_head_sha()
self.__repickle()
@safe_tmp_textfile
@catch_id_err
def edit(self, issue_id):
"Change the message of an existing issue."
with open('__issue__.msg', 'w') as message:
message.write(self.issue_db[issue_id].msg)
editor = os.getenv('EDITOR').split() or ['emacs']
subprocess.call(editor + ['__issue__.msg'])
message = open('__issue__.msg','r')
msg = message.read()
self.issue_db[issue_id].msg = msg
self.__repickle()
message.close()
class Issue(object):
"Issue object."
def __init__(self, myid, commitsha, msg):
self.myid = myid
self.commitsha = commitsha
self.closedsha = ""
self.msg = msg
self.issued = datetime.datetime.now()
self.status = 'open'
self.priority = 3
def __str__(self):
msg = self.msg.split("\n")[0] # get the first line
return "\t".join((
"%03i" % self.myid,
self.issued.strftime("%b %d"),
self.commitsha[:5],
self.status,
str(self.priority),
len(msg) > 30 and msg[:27] + "..." or msg))
def __gt__(self, other):
return self.myid > other.myid if self.priority == \
other.priority else self.priority > other.priority
def get_commit(self, sha = ""):
"Get the corresponding commit object."
repo = Repo('.')
return repo.commit(sha or self.commitsha)
def more_info(self):
"Print detailed informaiton."
print "Issue " + "%03i" % self.myid
print "-"*70
print ("Status: %s\n"
"Date: %s\n"
"%s") % (
self.status, self.issued.strftime("%a %b %d %H:%M %Y"),
self.msg)
if self.closedsha:
print "-"*70
print "Closed with:"
self.__print_info(self.get_commit(self.closedsha))
print "-"*70
print "Opened:"
self.__print_info(self.get_commit())
@staticmethod
def __print_info(commit):
"Print info on commit."
print ("commit %s\n"
"Author: %s\n"
"Date: %s\n") % (
commit.hexsha, str(commit.author),
time.asctime(time.gmtime(commit.committed_date)))
print commit.message | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/gui/qt/installwizard.py | import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum_vtc as electrum
from electrum_vtc import Wallet, WalletStorage
from electrum_vtc.util import UserCancelled, InvalidPassword
from electrum_vtc.base_wizard import BaseWizard
from electrum_vtc.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Vertcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-VTC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-vtc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-LTC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if path:
self.name_e.setText(path)
def on_filename(filename):
filename = unicode(filename)
path = os.path.join(wallet_folder, filename.encode('utf8'))
try:
self.storage = WalletStorage(path)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n.decode('utf8'))
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = unicode(self.pw_e.text())
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(unicode(text)))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n) | PypiClean |
/MosqitoMod-0.0.2.tar.gz/MosqitoMod-0.0.2/libname/mos.py | import random
import math
import time
import os
from os.path import exists
import keyboard
import replit
def on_run():
print('The Mos Module was made by MosqitoTorpedo')
def pause(s):
time.sleep(s)
def rndint(x,y):
rand = random.randint(x, y)
return rand
def read(file):
file_exists = exists(file)
if file_exists == False:
print('ACTION_FAILED; invalid file name or path; ERR_CODE=1001')
else:
rfile = open(file)
content = rfile.read()
rfile.close()
return content
def clear():
replit.clear()
def edit(file,newContent):
if not 'mos.py' in file:
efile = open(file, "w")
efile.write(newContent)
efile = open(file)
content = efile.read()
efile.close()
return content
else:
print('ACTION_RESTRICTED; can not edit mos.py for risk of damaging module; ERR_CODE=0110')
def errsrch(ERR_CODE):
if '0110' in ERR_CODE:
print('Error code 0110 means you tried to edit an undeitable file')
elif '1001' in ERR_CODE:
print('Error code 1001 means that you tried to read an invalid file, check the stated file directory and try again')
elif '4040' in ERR_CODE:
print('Error code 4040 means that you have tried to use the helpFunc command but entered an invalid funtion; check to make sure you entered the correct funtion and dont include the parenthese or variables')
elif '8604' in ERR_CODE:
print('Error code 8604 means that you tried to use the error search funtion but inputed an invalid error code')
else:
print('ACTION_FAILED; Invalid ERR_CODE; ERR_CODE=8604')
def help():
print("""Mos Module Help Page
mos.pause([time_in_seconds]) - Pauses the program for set ammount of time in seconds
mos.rndint(x, y) - Generates a random integer (whole number) between x and y; y must be greater that x
mos.read(file) - Reads the file inputed; if your file is embeded in folders you must start the path with the first folder until you get to the file you want to read
mos.edit(file,new_file_content) - Edits the file with whatever you put; if your file is embeded in folders you must start the path with the first folder until you get to the file you want to edit
mos.errsrch(ERR_CODE) - Show the meaning of the inputed error code
mos.help() - shows this list
mos.clear() - Clears the console
mos.helpFunc(funtion) - Show a more in depth explination of the inputed function
""")
def helpFunc(func):
if 'pause' in func:
print('This pauses the program and prevents anything from happening until the time is up.\nProper usage would be:\nmos.pause(seconds_to_pause)')
if 'rndint' in func:
print('This generates a random number between x and y; y must be greater than x, otherwise you will recieve an error\nProper usage would be:\nmos.rndint(x, y)')
if 'read' in func:
print("This reads the file that you have inputed; if you have the file inside of a folder you must include the full path to the file in the command\nProper usage would be:\nmos.read('folder/folder/text.txt')")
if 'edit' in func:
print("This edit the inputed file to whatever you set it to be; WARNING: USING THIS COMMAND WILL REPLACE ALL PREVIOUS DATA IN THE FILE\nProper usage would be:\nmos.edit('folder/folder/text.txt','example')")
elif 'errsrch' in func:
print("This will show you the meaning of the error code you inputed;anytime you recive an error you will see a line that says 'ERR_CODE=xxxx' when using the errsrch command you would put those four digits into the parentheses\nProper usage would be:\nmos.errsrch('ERR_CODE')")
elif 'help' in func:
print('This will show a page with all commands and what they do\nProper usage would be:\nmos.help()')
elif 'helpFunc' in func:
print("This will show you a more indepth help result to than the help page\nProper usage would be\nmos.helpFunc('funtion')")
elif 'clear' in func:
print('This will clear the console\nProper usage would be:\nmos.clear()')
elif 'errsrch' in func:
print("This will allow you to search the error codes that you recieve when using the Mos Module\nProper usage would be:\nmos.errsrch('error code')")
else:
print('ACTION_FAILED; invalid funtion; ERR_CODE=4040') | PypiClean |
/GraphiPy-0.0.2a0-py3-none-any.whl/graphipy/graph/graph_pandas.py | import pandas as pd
import os
from graphipy.graph.graph_base import BaseGraph
# Graph object implemented by pandas
class PandasGraph(BaseGraph):
def __init__(self):
BaseGraph.__init__(self)
self._type = "pandas"
# a dict of node data frames and edge data frames
# key=label, value=dataframe
self.nodes_df = {}
self.edges_df = {}
# buffer for optimization
self.node_count = 0
self.edge_count = 0
self.buffer = 5000
self.nodes_dict = {}
self.edges_dict = {}
self.path = os.getcwd() + "\\csv"
if not os.path.exists(self.path):
os.mkdir(self.path)
def graph_type(self):
return self._type
def convert_to_df(self, _type):
if _type == "node" or _type == "both":
for key in self.nodes_df:
nodes_list = []
for _id in self.nodes_dict[key]:
nodes_list.append(vars(self.nodes_dict[key][_id]))
df = pd.DataFrame(nodes_list)
self.nodes_df[key] = self.nodes_df[key].append(
df, sort=False, ignore_index=True)
self.nodes_dict[key] = {}
self.node_count = 0
if _type == "edge" or _type == "both":
for key in self.edges_df:
edges_list = []
for _id in self.edges_dict[key]:
edges_list.append(vars(self.edges_dict[key][_id]))
df = pd.DataFrame(edges_list)
self.edges_df[key] = self.edges_df[key].append(
df, sort=False, ignore_index=True)
self.edges_dict[key] = {}
self.edge_count = 0
def export_all_csv(self, prefix):
""" exports all dataframes as csv """
# Create folders to export to
export_path = self.path + "\\" + prefix + "\\"
export_path_node = export_path + "nodes\\"
export_path_edge = export_path + "edges\\"
if not os.path.exists(export_path):
os.mkdir(export_path)
if not os.path.exists(export_path_node):
os.mkdir(export_path_node)
if not os.path.exists(export_path_edge):
os.mkdir(export_path_edge)
# append remaining nodes/edges to dataframe
self.convert_to_df("both")
# get node data frames
n_df = self.get_nodes()
for key in n_df.keys():
# convert data frame to csv
n_df[key].to_csv(export_path_node + key + ".csv",
encoding="utf-8", index=False)
# get edge data frames
e_df = self.get_edges()
for key in e_df.keys():
# convert data frame to csv
e_df[key].to_csv(export_path_edge + key + ".csv",
encoding="utf-8", index=False)
return export_path
# export data frame to csv specified by node label and edge label
def export_csv(self, prefix, node_option=set(), edge_option=set()):
""" exports a specified dataframe as csv """
# Create folders to export to
export_path = self.path + "\\" + prefix + "\\"
export_path_node = export_path + "nodes\\"
export_path_edge = export_path + "edges\\"
if not os.path.exists(export_path):
os.mkdir(export_path)
if not os.path.exists(export_path_node):
os.mkdir(export_path_node)
if not os.path.exists(export_path_edge):
os.mkdir(export_path_edge)
# append remaining nodes/edges to dataframe
self.convert_to_df("both")
if len(node_option) > 0:
# get node data frames
n_df = self.get_nodes()
for key in n_df.keys():
# if matches node label that user wants
if key in node_option:
n_df[key].to_csv(export_path_node + key +
".csv", encoding="utf-8", index=False)
if len(edge_option) > 0:
# get edge data frames
e_df = self.get_edges()
for key in e_df.keys():
# if matches edge label that user wants
if key in edge_option:
e_df[key].to_csv(export_path_edge + key +
".csv", encoding="utf-8", index=False)
return export_path
def create_node(self, node):
""" creates a node in the graph """
label = node.get_label_attribute().lower()
_id = node.get_id()
# create a new dataframe if it is a new node type
if label not in self.nodes_dict:
self.nodes_dict[label] = {}
self.nodes_df[label] = pd.DataFrame(
columns=vars(node).keys())
# append the node to the dictionary
self.nodes_dict[label][_id] = node
self.node_count += 1
# if buffer is reached, move everything to the dataframe
if (self.node_count == self.buffer):
self.convert_to_df("node")
def create_edge(self, edge):
""" creates an edge in the graph """
label = edge.get_label_attribute().lower()
_id = edge.get_id()
# create a new dataframe if it is a new edge type
if label not in self.edges_df:
self.edges_dict[label] = {}
self.edges_df[label] = pd.DataFrame(
columns=vars(edge).keys())
# append the node to the dictionary
self.edges_dict[label][_id] = edge
self.edge_count += 1
# if buffer is reached, move everything to the dataframe
if self.edge_count >= self.buffer:
self.convert_to_df("edge")
def get_nodes(self):
""" returns all node dataframes """
# append remaining nodes/edges to dataframe
self.convert_to_df("node")
return self.nodes_df
def get_edges(self):
""" returns all edge dataframes """
# append remaining nodes/edges to dataframe
self.convert_to_df("edge")
return self.edges_df
def get_df_multiple(self, node_df=set(), edge_df=set()):
""" returns specified dataframes in a dictionary"""
# append remaining nodes/edges to dataframe
self.convert_to_df("both")
dataframes = {
"node": {},
"edge": {}
}
# check nodes
for node in node_df:
node = node.lower()
if node in self.nodes_df:
dataframes["node"][node] = self.nodes_df[node]
# check edges
for edge in edge_df:
edge = edge.lower()
if edge in self.edges_df:
dataframes["edge"][edge] = self.edges_df[edge]
return dataframes
def get_df(self, name, _type="node"):
""" returns a single specified dataframe """
# append remaining nodes/edges to dataframe
self.convert_to_df(_type.lower())
name = name.lower()
if _type == "node":
if name in self.nodes_df:
return self.nodes_df[name]
else:
if name in self.edges_df:
return self.edges_df[name] | PypiClean |
/CraftSlicer-2.0.tar.gz/CraftSlicer-2.0/craftslicer/core/mc/one_dot_sixteen/one_dot_sixteen.py | palette = \
{
"minecraft:air": [0, 0, 0],
# "minecraft:acacia_log": [97, 97, 87],
# "minecraft:acacia_planks": [90, 90, 50],
"minecraft:ancient_debris": [96, 64, 56],
"minecraft:andesite": [136, 136, 137],
"minecraft:barrel": [122, 90, 52],
"minecraft:basalt": [73, 73, 78],
"minecraft:bedrock": [85, 85, 85],
"minecraft:bee_nest": [161, 127, 88],
"minecraft:birch_log": [217, 215, 210],
"minecraft:birch_planks": [192, 175, 121],
"minecraft:black_concrete": [8, 10, 15],
"minecraft:black_concrete_powder": [25, 27, 32],
"minecraft:black_glazed_terracotta": [68, 30, 32],
"minecraft:black_terracotta": [37, 23, 16],
"minecraft:black_stained_glass": [25, 25, 25],
"minecraft:black_wool": [21, 21, 26],
"minecraft:blackstone": [42, 35, 41],
"minecraft:blast_furnace": [108, 108, 107],
"minecraft:blue_concrete": [45, 47, 143],
"minecraft:blue_concrete_powder": [70, 73, 167],
"minecraft:blue_glazed_terracotta": [47, 65, 139],
"minecraft:blue_ice": [116, 168, 253],
"minecraft:blue_terracotta": [74, 60, 91],
"minecraft:blue_stained_glass": [51, 76, 178],
"minecraft:blue_wool": [53, 57, 157],
"minecraft:bone_block": [229, 226, 208],
"minecraft:bone_meal": [159, 158, 180],
"minecraft:bookshelf": [117, 95, 60],
"minecraft:brain_coral": [198, 85, 152],
"minecraft:brain_coral_block": [207, 91, 159],
"minecraft:bricks": [151, 98, 83],
"minecraft:brown_concrete": [96, 60, 32],
"minecraft:brown_concrete_powder": [126, 85, 54],
"minecraft:brown_glazed_terracotta": [120, 106, 86],
"minecraft:brown_stained_glass": [102, 76, 51],
"minecraft:brown_mushroom_block": [149, 112, 81],
"minecraft:brown_terracotta": [77, 51, 36],
"minecraft:brown_wool": [114, 72, 41],
"minecraft:bubble_coral": [161, 24, 160],
"minecraft:bubble_coral_block": [165, 26, 162],
"minecraft:cartography_table": [72, 50, 34],
"minecraft:carved_pumpkin": [150, 84, 17],
"minecraft:chiseled_nether_bricks": [47, 24, 28],
"minecraft:chiseled_polished_blackstone": [54, 49, 57],
"minecraft:chiseled_quartz_block": [232, 227, 218],
"minecraft:chiseled_red_sandstone": [183, 97, 28],
"minecraft:chiseled_sandstone": [216, 203, 155],
"minecraft:chiseled_stone_bricks": [120, 119, 120],
"minecraft:coal_block": [16, 16, 16],
"minecraft:coal_ore": [116, 116, 116],
"minecraft:coarse_dirt": [119, 86, 59],
"minecraft:cobblestone": [128, 127, 128],
"minecraft:cracked_nether_bricks": [40, 20, 24],
"minecraft:cracked_polished_blackstone_bricks": [44, 38, 44],
"minecraft:cracked_stone_bricks": [118, 118, 118],
"minecraft:crimson_nylium": [131, 31, 31],
"minecraft:crimson_planks": [101, 49, 71],
"minecraft:crimson_stem": [93, 26, 30],
"minecraft:crying_obsidian": [33, 10, 60],
"minecraft:cut_red_sandstone": [189, 102, 32],
"minecraft:cut_sandstone": [218, 206, 160],
"minecraft:cyan_concrete": [21, 119, 136],
"minecraft:cyan_concrete_powder": [37, 148, 157],
"minecraft:cyan_glazed_terracotta": [52, 119, 125],
"minecraft:cyan_stained_glass": [76, 127, 153],
"minecraft:cyan_terracotta": [87, 91, 91],
"minecraft:cyan_wool": [21, 138, 145],
"minecraft:dark_oak_log": [60, 47, 26],
"minecraft:dark_oak_planks": [67, 43, 20],
"minecraft:dark_prismarine": [52, 92, 76],
"minecraft:dead_brain_coral_block": [124, 118, 114],
"minecraft:dead_bubble_coral_block": [132, 124, 119],
"minecraft:dead_fire_coral_block": [132, 124, 120],
"minecraft:dead_horn_coral_block": [134, 126, 122],
"minecraft:dead_tube_coral_block": [130, 123, 120],
"minecraft:diorite": [189, 188, 189],
"minecraft:dirt": [134, 96, 67],
"minecraft:dried_kelp_block": [38, 49, 30],
"minecraft:emerald_block": [42, 203, 88],
"minecraft:emerald_ore": [117, 137, 124],
"minecraft:end_stone": [220, 223, 158],
"minecraft:end_stone_bricks": [218, 224, 162],
"minecraft:fire_coral_block": [164, 35, 47],
"minecraft:fletching_table": [173, 155, 111],
"minecraft:gilded_blackstone": [57, 44, 39],
"minecraft:glass": [176, 214, 219],
"minecraft:glowstone": [172, 131, 84],
"minecraft:gold_block": [246, 208, 62],
"minecraft:gold_ore": [144, 140, 125],
"minecraft:granite": [149, 103, 86],
"minecraft:grass_block": [127, 107, 66],
"minecraft:grass_path": [142, 106, 70],
"minecraft:gravel": [132, 127, 127],
"minecraft:gray_concrete": [55, 58, 62],
"minecraft:gray_concrete_powder": [77, 81, 85],
"minecraft:gray_glazed_terracotta": [83, 90, 94],
"minecraft:gray_stained_glass": [76, 76, 76],
"minecraft:gray_terracotta": [58, 42, 36],
"minecraft:gray_wool": [63, 68, 72],
"minecraft:green_concrete": [73, 91, 36],
"minecraft:green_concrete_powder": [97, 119, 45],
"minecraft:green_glazed_terracotta": [117, 142, 67],
"minecraft:green_stained_glass": [102, 127, 51],
"minecraft:green_terracotta": [76, 83, 42],
"minecraft:green_wool": [85, 110, 28],
"minecraft:hay_block": [166, 136, 38],
"minecraft:honey_block": [241, 146, 18],
"minecraft:honeycomb_block": [229, 148, 30],
"minecraft:horn_coral_block": [216, 200, 66],
"minecraft:ice": [146, 184, 254],
"minecraft:iron_block": [220, 220, 220],
"minecraft:iron_ore": [136, 131, 127],
"minecraft:jack_o_lantern": [215, 152, 53],
"minecraft:jukebox": [4, 5, 5],
"minecraft:jungle_log": [85, 68, 25],
"minecraft:jungle_planks": [160, 115, 81],
"minecraft:lapis_block": [31, 67, 140],
"minecraft:lapis_ore": [99, 111, 133],
"minecraft:light_blue_concrete": [36, 137, 199],
"minecraft:light_blue_concrete_powder": [74, 181, 213],
"minecraft:light_blue_glazed_terracotta": [95, 165, 209],
"minecraft:light_blue_stained_glass": [102, 153, 216],
"minecraft:light_blue_terracotta": [113, 109, 138],
"minecraft:light_blue_wool": [58, 175, 217],
"minecraft:light_gray_concrete": [125, 125, 115],
"minecraft:light_gray_concrete_powder": [155, 155, 148],
"minecraft:light_gray_glazed_terracotta": [144, 166, 168],
"minecraft:light_gray_stained_glass": [153, 153, 153],
"minecraft:light_gray_terracotta": [135, 107, 98],
"minecraft:light_gray_wool": [142, 142, 135],
"minecraft:lime_concrete": [94, 169, 24],
"minecraft:lime_concrete_powder": [125, 189, 42],
"minecraft:lime_glazed_terracotta": [163, 198, 55],
"minecraft:lime_stained_glass": [127, 204, 25],
"minecraft:lime_terracotta": [104, 118, 53],
"minecraft:lime_wool": [112, 185, 26],
"minecraft:lodestone": [119, 120, 123],
"minecraft:loom": [76, 60, 36],
"minecraft:magenta_concrete": [169, 48, 159],
"minecraft:magenta_concrete_powder": [193, 84, 185],
"minecraft:magenta_glazed_terracotta": [208, 100, 192],
"minecraft:magenta_stained_glass": [178, 76, 216],
"minecraft:magenta_terracotta": [150, 88, 109],
"minecraft:magenta_wool": [190, 69, 180],
"minecraft:magma_block": [143, 63, 32],
"minecraft:mossy_cobblestone": [110, 118, 95],
"minecraft:mossy_stone_bricks": [115, 121, 105],
"minecraft:mushroom_stem": [203, 197, 186],
"minecraft:mycelium": [113, 87, 71],
"minecraft:nether_brick": [3, 5, 4],
"minecraft:nether_bricks": [3, 5, 4],
"minecraft:nether_gold_ore": [115, 55, 42],
"minecraft:nether_quartz_ore": [118, 66, 62],
"minecraft:nether_wart_block": [115, 3, 2],
"minecraft:netherite_block": [67, 61, 64],
"minecraft:netherrack": [98, 38, 38],
"minecraft:oak_log": [109, 85, 51],
"minecraft:oak_planks": [162, 131, 79],
"minecraft:obsidian": [15, 11, 25],
"minecraft:orange_concrete": [224, 97, 1],
"minecraft:orange_concrete_powder": [227, 132, 32],
"minecraft:orange_glazed_terracotta": [155, 147, 92],
"minecraft:orange_stained_glass": [216, 127, 51],
"minecraft:orange_terracotta": [162, 84, 38],
"minecraft:orange_wool": [241, 118, 20],
"minecraft:packed_ice": [142, 180, 250],
"minecraft:pink_concrete": [214, 101, 143],
"minecraft:pink_concrete_powder": [229, 153, 181],
"minecraft:pink_glazed_terracotta": [235, 155, 182],
"minecraft:pink_stained_glass": [242, 127, 165],
"minecraft:pink_terracotta": [162, 78, 79],
"minecraft:pink_wool": [238, 141, 172],
"minecraft:podzol": [123, 88, 57],
"minecraft:polished_andesite": [132, 135, 134],
"minecraft:polished_basalt": [89, 88, 92],
"minecraft:polished_blackstone": [53, 49, 57],
"minecraft:polished_blackstone_bricks": [47, 41, 48],
"minecraft:polished_diorite": [193, 193, 195],
"minecraft:polished_granite": [154, 107, 89],
"minecraft:prismarine": [99, 156, 151],
"minecraft:prismarine_bricks": [99, 172, 158],
"minecraft:pumpkin": [196, 115, 24],
"minecraft:purple_concrete": [100, 32, 156],
"minecraft:purple_concrete_powder": [132, 56, 178],
"minecraft:purple_glazed_terracotta": [110, 48, 152],
"minecraft:purple_stained_glass": [127, 63, 178],
"minecraft:purple_terracotta": [118, 70, 86],
"minecraft:purple_wool": [122, 42, 173],
"minecraft:purpur_block": [170, 126, 170],
"minecraft:purpur_pillar": [172, 130, 172],
"minecraft:quartz_block": [237, 230, 224],
"minecraft:quartz_bricks": [235, 229, 222],
"minecraft:quartz_pillar": [236, 231, 224],
"minecraft:red_concrete": [142, 33, 33],
"minecraft:red_concrete_powder": [168, 54, 51],
"minecraft:red_glazed_terracotta": [182, 60, 53],
"minecraft:red_stained_glass": [153, 51, 51],
"minecraft:red_mushroom": [217, 75, 68],
"minecraft:red_mushroom_block": [200, 47, 45],
"minecraft:red_nether_bricks": [70, 7, 9],
"minecraft:red_sand": [191, 103, 33],
"minecraft:red_sandstone": [187, 99, 29],
"minecraft:red_terracotta": [143, 61, 47],
"minecraft:red_wool": [161, 39, 35],
"minecraft:redstone_block": [4, 3, 4],
"minecraft:redstone_lamp": [95, 55, 30],
"minecraft:redstone_ore": [133, 108, 108],
"minecraft:sand": [219, 207, 163],
"minecraft:sandstone": [216, 203, 156],
"minecraft:sea_lantern": [172, 200, 190],
"minecraft:shroomlight": [241, 147, 71],
"minecraft:slime_block": [2, 2, 2],
"minecraft:smithing_table": [64, 28, 24],
"minecraft:smoker": [107, 106, 104],
"minecraft:smooth_stone": [159, 159, 159],
"minecraft:snow_block": [249, 254, 254],
"minecraft:soul_sand": [81, 62, 51],
"minecraft:soul_soil": [76, 58, 47],
"minecraft:spruce_log": [59, 38, 17],
"minecraft:spruce_planks": [115, 85, 49],
"minecraft:stone": [126, 126, 126],
"minecraft:stone_bricks": [122, 122, 122],
"minecraft:stripped_acacia_log": [175, 93, 60],
"minecraft:stripped_birch_log": [197, 176, 118],
"minecraft:stripped_crimson_stem": [137, 57, 90],
"minecraft:stripped_dark_oak_log": [97, 76, 50],
"minecraft:stripped_jungle_log": [171, 133, 85],
"minecraft:stripped_oak_log": [177, 144, 86],
"minecraft:stripped_spruce_log": [116, 90, 52],
"minecraft:stripped_warped_stem": [58, 151, 148],
"minecraft:target": [229, 176, 168],
"minecraft:terracotta": [152, 94, 68],
"minecraft:tube_coral_block": [49, 87, 207],
"minecraft:warped_fungus": [74, 109, 88],
"minecraft:warped_nylium": [43, 114, 101],
"minecraft:warped_planks": [43, 105, 99],
"minecraft:warped_wart_block": [23, 120, 121],
"minecraft:wet_sponge": [171, 181, 70],
"minecraft:white_concrete": [207, 213, 214],
"minecraft:white_concrete_powder": [226, 227, 228],
"minecraft:white_glazed_terracotta": [188, 212, 203],
"minecraft:white_stained_glass": [255, 255, 255],
"minecraft:white_terracotta": [210, 178, 161],
"minecraft:white_wool": [234, 236, 237],
"minecraft:yellow_concrete": [241, 175, 21],
"minecraft:yellow_concrete_powder": [233, 199, 55],
"minecraft:yellow_glazed_terracotta": [234, 192, 89],
"minecraft:yellow_stained_glass": [229, 229, 51],
"minecraft:yellow_terracotta": [186, 133, 35],
"minecraft:yellow_wool": [249, 198, 40]
} | PypiClean |
/FIRSTBEATLU-0.13.1.tar.gz/FIRSTBEATLU-0.13.1/econml/orf/_causal_tree.py | import numpy as np
from sklearn.utils import check_random_state
class Node:
"""Building block of :class:`CausalTree` class.
Parameters
----------
sample_inds : array-like, shape (n, )
Indices defining the sample that the split criterion will be computed on.
estimate_inds : array-like, shape (n, )
Indices defining the sample used for calculating balance criteria.
"""
def __init__(self, sample_inds, estimate_inds):
self.feature = -1
self.threshold = np.inf
self.split_sample_inds = sample_inds
self.est_sample_inds = estimate_inds
self.left = None
self.right = None
def find_tree_node(self, value):
"""
Recursively find and return the node of the causal tree that corresponds
to the input feature vector.
Parameters
----------
value : array-like, shape (d_x,)
Feature vector whose node we want to find.
"""
if self.feature == -1:
return self
elif value[self.feature] < self.threshold:
return self.left.find_tree_node(value)
else:
return self.right.find_tree_node(value)
class CausalTree:
"""Base class for growing an OrthoForest.
Parameters
----------
nuisance_estimator : method
Method that estimates the nuisances at each node.
Takes in (Y, T, X, W) and returns nuisance estimates.
parameter_estimator : method
Method that estimates the parameter of interest at each node.
Takes in (Y, T, nuisance_estimates) and returns the parameter estimate.
moment_and_mean_gradient_estimator : method
Method that estimates the moments and mean moment gradient at each node.
Takes in (Y, T, X, W, nuisance_estimates, parameter_estimate) and returns
the moments and the mean moment gradient.
min_leaf_size : integer, optional (default=10)
The minimum number of samples in a leaf.
max_depth : integer, optional (default=10)
The maximum number of splits to be performed when expanding the tree.
n_proposals : int, optional (default=1000)
Number of split proposals to be considered. A smaller number will improve
execution time, but might reduce accuracy of prediction.
balancedness_tol : float, optional (default=.3)
Tolerance for balance between child nodes in a split. A smaller value
will result in an unbalanced tree prone to overfitting. Has to lie
between 0 and .5 as it is used to control both directions of imbalancedness.
With the default value we guarantee that each child of a split contains
at least 20% and at most 80% of the data of the parent node.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self,
min_leaf_size=10,
max_depth=10,
n_proposals=1000,
balancedness_tol=.3,
random_state=None):
# Causal tree parameters
self.min_leaf_size = min_leaf_size
self.max_depth = max_depth
self.balancedness_tol = balancedness_tol
self.n_proposals = n_proposals
self.random_state = check_random_state(random_state)
# Tree structure
self.tree = None
def create_splits(self, Y, T, X, W,
nuisance_estimator, parameter_estimator, moment_and_mean_gradient_estimator):
"""
Recursively build a causal tree.
Parameters
----------
Y : array-like, shape (n, d_y)
Outcome for the treatment policy.
T : array-like, shape (n, d_t)
Treatment policy.
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
W : array-like, shape (n, d_w) or None (default=None)
High-dimensional controls.
"""
# No need for a random split since the data is already
# a random subsample from the original input
n = Y.shape[0] // 2
self.tree = Node(np.arange(n), np.arange(n, Y.shape[0]))
# node list stores the nodes that are yet to be splitted
node_list = [(self.tree, 0)]
while len(node_list) > 0:
node, depth = node_list.pop()
# If by splitting we have too small leaves or if we reached the maximum number of splits we stop
if node.split_sample_inds.shape[0] // 2 >= self.min_leaf_size and depth < self.max_depth:
# Create local sample set
node_X = X[node.split_sample_inds]
node_W = W[node.split_sample_inds] if W is not None else None
node_T = T[node.split_sample_inds]
node_Y = Y[node.split_sample_inds]
node_X_estimate = X[node.est_sample_inds]
node_size_split = node_X.shape[0]
node_size_est = node_X_estimate.shape[0]
# Compute nuisance estimates for the current node
nuisance_estimates = nuisance_estimator(node_Y, node_T, node_X, node_W)
if nuisance_estimates is None:
# Nuisance estimate cannot be calculated
continue
# Estimate parameter for current node
node_estimate = parameter_estimator(node_Y, node_T, node_X, nuisance_estimates)
if node_estimate is None:
# Node estimate cannot be calculated
continue
# Calculate moments and gradient of moments for current data
moments, mean_grad = moment_and_mean_gradient_estimator(
node_Y, node_T, node_X, node_W,
nuisance_estimates,
node_estimate)
# Calculate inverse gradient
try:
inverse_grad = np.linalg.inv(mean_grad)
except np.linalg.LinAlgError as exc:
if 'Singular matrix' in str(exc):
# The gradient matrix is not invertible.
# No good split can be found
continue
else:
raise exc
# Calculate point-wise pseudo-outcomes rho
rho = np.matmul(moments, inverse_grad)
# a split is determined by a feature and a sample pair
# the number of possible splits is at most (number of features) * (number of node samples)
n_proposals = min(self.n_proposals, node_X.size)
# we draw random such pairs by drawing a random number in {0, n_feats * n_node_samples}
random_pair = self.random_state.choice(node_X.size, size=n_proposals, replace=False)
# parse row and column of random pair
thr_inds, dim_proposals = np.unravel_index(random_pair, node_X.shape)
# the sample of the pair is the integer division of the random number with n_feats
thr_proposals = node_X[thr_inds, dim_proposals]
# calculate the binary indicator of whether sample i is on the left or the right
# side of proposed split j. So this is an n_samples x n_proposals matrix
side = node_X[:, dim_proposals] < thr_proposals
# calculate the number of samples on the left child for each proposed split
size_left = np.sum(side, axis=0)
# calculate the analogous binary indicator for the samples in the estimation set
side_est = node_X_estimate[:, dim_proposals] < thr_proposals
# calculate the number of estimation samples on the left child of each proposed split
size_est_left = np.sum(side_est, axis=0)
# find the upper and lower bound on the size of the left split for the split
# to be valid so as for the split to be balanced and leave at least min_leaf_size
# on each side.
lower_bound = max((.5 - self.balancedness_tol) * node_size_split, self.min_leaf_size)
upper_bound = min((.5 + self.balancedness_tol) * node_size_split, node_size_split - self.min_leaf_size)
valid_split = (lower_bound <= size_left)
valid_split &= (size_left <= upper_bound)
# similarly for the estimation sample set
lower_bound_est = max((.5 - self.balancedness_tol) * node_size_est, self.min_leaf_size)
upper_bound_est = min((.5 + self.balancedness_tol) * node_size_est, node_size_est - self.min_leaf_size)
valid_split &= (lower_bound_est <= size_est_left)
valid_split &= (size_est_left <= upper_bound_est)
# if there is no valid split then don't create any children
if ~np.any(valid_split):
continue
# filter only the valid splits
valid_dim_proposals = dim_proposals[valid_split]
valid_thr_proposals = thr_proposals[valid_split]
valid_side = side[:, valid_split]
valid_size_left = size_left[valid_split]
valid_side_est = side_est[:, valid_split]
# calculate the average influence vector of the samples in the left child
left_diff = np.matmul(rho.T, valid_side)
# calculate the average influence vector of the samples in the right child
right_diff = np.matmul(rho.T, 1 - valid_side)
# take the square of each of the entries of the influence vectors and normalize
# by size of each child
left_score = left_diff**2 / valid_size_left.reshape(1, -1)
right_score = right_diff**2 / (node_size_split - valid_size_left).reshape(1, -1)
# calculate the vector score of each candidate split as the average of left and right
# influence vectors
spl_score = (right_score + left_score) / 2
# eta specifies how much weight to put on individual heterogeneity vs common heterogeneity
# across parameters. we give some benefit to individual heterogeneity factors for cases
# where there might be large discontinuities in some parameter as the conditioning set varies
eta = np.random.uniform(0.25, 1)
# calculate the scalar score of each split by aggregating across the vector of scores
split_scores = np.max(spl_score, axis=0) * eta + np.mean(spl_score, axis=0) * (1 - eta)
# Find split that minimizes criterion
best_split_ind = np.argmax(split_scores)
node.feature = valid_dim_proposals[best_split_ind]
node.threshold = valid_thr_proposals[best_split_ind]
# Create child nodes with corresponding subsamples
left_split_sample_inds = node.split_sample_inds[valid_side[:, best_split_ind]]
left_est_sample_inds = node.est_sample_inds[valid_side_est[:, best_split_ind]]
node.left = Node(left_split_sample_inds, left_est_sample_inds)
right_split_sample_inds = node.split_sample_inds[~valid_side[:, best_split_ind]]
right_est_sample_inds = node.est_sample_inds[~valid_side_est[:, best_split_ind]]
node.right = Node(right_split_sample_inds, right_est_sample_inds)
# add the created children to the list of not yet split nodes
node_list.append((node.left, depth + 1))
node_list.append((node.right, depth + 1))
def print_tree_rec(self, node):
if not node:
return
print("Node: ({}, {})".format(node.feature, node.threshold))
print("Left Child")
self.print_tree_rec(node.left)
print("Right Child")
self.print_tree_rec(node.right)
def print_tree(self):
self.print_tree_rec(self.tree)
def find_split(self, value):
return self.tree.find_tree_node(value.astype(np.float64)) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_internal/configuration.py | import locale
import logging
import os
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import (
ConfigurationError, ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.locations import (
global_config_files, legacy_config_file, new_config_file, site_config_file,
)
from pip._internal.utils.misc import ensure_dir, enum
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Iterable, List, NewType, Optional, Tuple
)
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
return name.split(".", 1)
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
SITE="site", # [Virtual] Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
class Configuration(object):
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Kind) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.SITE, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated # type: bool
self.load_only = load_only # type: Optional[Kind]
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key))
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration.
"""
self._ensure_have_load_only()
if key not in self._config[self.load_only]:
raise ConfigurationError("No such key - {}".format(key))
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Remove the key in the parser
modified_something = False
if parser.has_section(section):
# Returns whether the option was removed or not
modified_something = parser.remove_option(section, name)
if modified_something:
# name removed from parser, section may now be empty
section_iter = iter(parser.items(section))
try:
val = next(section_iter)
except StopIteration:
val = None
if val is None:
parser.remove_section(section)
self._mark_as_modified(fname, parser)
else:
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the current in-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f)
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in self._override_order:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self._iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug(
"Skipping file '%s' (variant: %s)", fname, variant
)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant, fname):
# type: (Kind, str) -> RawConfigParser
logger.debug("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname):
# type: (str) -> RawConfigParser
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
try:
parser.read(fname)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason="contains invalid {} characters".format(
locale.getpreferredencoding(False)
),
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self):
# type: () -> None
"""Loads configuration from environment variables
"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self._get_environ_vars())
)
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def _get_environ_vars(self):
# type: () -> Iterable[Tuple[str, str]]
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
should_be_yielded = (
key.startswith("PIP_") and
key[4:].lower() not in self._ignore_env_names
)
if should_be_yielded:
yield key[4:].lower(), val
# XXX: This is patched in the tests.
def _iter_config_files(self):
# type: () -> Iterable[Tuple[Kind, List[str]]]
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary.
"""
# SMELL: Move the conditions out of this function
# environment variables have the lowest priority
config_file = os.environ.get('PIP_CONFIG_FILE', None)
if config_file is not None:
yield kinds.ENV, [config_file]
else:
yield kinds.ENV, []
# at the base we have any global configuration
yield kinds.GLOBAL, list(global_config_files)
# per-user configuration next
should_load_user_config = not self.isolated and not (
config_file and os.path.exists(config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, [legacy_config_file, new_config_file]
# finally virtualenv configuration first trumping others
yield kinds.SITE, [site_config_file]
def _get_parser_to_modify(self):
# type: () -> Tuple[str, RawConfigParser]
# Determine which parser to modify
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname, parser):
# type: (str, RawConfigParser) -> None
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple) | PypiClean |
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/client/corelayer/protocol/StateS31A.py |
# Copyright (c) 2016, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
State S31 : Configuration
"""
from ...util.funcutils import singleton
from .StateSCC import StateSCC
from ....common.KeyHandler import KeyHandler
@singleton
class StateS31A(StateSCC):
"""State S31 : Configuration"""
def do(self, handler, data):
"""Action of the state S31A: treat response of configuration request"""
with handler.lock:
try:
# Test if configuration request is rejected
is_KO = data[:5] == b"ERROR"
if is_KO:
raise Exception((data[6:]).decode())
# Test if configuration is accepted
is_OK = data[:2] == b"OK"
if is_OK:
if data[3:] == b"1":
message = "Configuration accepted"
elif data[3:] == b"2":
message = "New configuration accepted"
else:
raise Exception("S31 protocol error")
# Create the client KeyHandler
cypher_suite = handler.config.split(';')
handler.keyH = KeyHandler(
handler.ms, cur1=cypher_suite[0], cip1=cypher_suite[1],
cur2=cypher_suite[2], cip2=cypher_suite[3],
cur3=cypher_suite[4], cip3=cypher_suite[5])
# Task is ended
handler.core.taskInProgress = False
# Notify the handler a property has changed
handler.loop.run_in_executor(None, handler.notify,
"application.keyhandler",
handler.keyH)
handler.loop.run_in_executor(None, handler.notify,
"connection.state", message)
else:
raise Exception("S31 protocol error")
except Exception as exc:
# Schedule a call to the exception handler
handler.loop.call_soon_threadsafe(handler.exception_handler, exc) | PypiClean |
/Box2D-2.3.2.tar.gz/Box2D-2.3.2/examples/theo_jansen.py |
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef, b2PolygonShape,
b2Vec2, b2_pi)
# Original inspired by a contribution by roman_m
# Dimensions scooped from APE (http://www.cove.org/ape/index.htm)
class TheoJansen (Framework):
name = "Theo Jansen"
description = "Keys: left = a, brake = s, right = d, toggle motor = m"
motorSpeed = 2
motorOn = True
offset = (0, 8)
def __init__(self):
super(TheoJansen, self).__init__()
#
ball_count = 40
pivot = b2Vec2(0, 0.8)
# The ground
ground = self.world.CreateStaticBody(
shapes=[
b2EdgeShape(vertices=[(-50, 0), (50, 0)]),
b2EdgeShape(vertices=[(-50, 0), (-50, 10)]),
b2EdgeShape(vertices=[(50, 0), (50, 10)]),
]
)
box = b2FixtureDef(
shape=b2PolygonShape(box=(0.5, 0.5)),
density=1,
friction=0.3)
circle = b2FixtureDef(
shape=b2CircleShape(radius=0.25),
density=1)
# Create the balls on the ground
for i in range(ball_count):
self.world.CreateDynamicBody(
fixtures=circle,
position=(-40 + 2.0 * i, 0.5),
)
# The chassis
chassis_fixture = b2FixtureDef(
shape=b2PolygonShape(box=(2.5, 1)),
density=1,
friction=0.3,
groupIndex=-1)
self.chassis = self.world.CreateDynamicBody(
fixtures=chassis_fixture,
position=pivot + self.offset)
# Chassis wheel
wheel_fixture = b2FixtureDef(
shape=b2CircleShape(radius=1.6),
density=1,
friction=0.3,
groupIndex=-1)
self.wheel = self.world.CreateDynamicBody(
fixtures=wheel_fixture,
position=pivot + self.offset)
# Add a joint between the chassis wheel and the chassis itself
self.motorJoint = self.world.CreateRevoluteJoint(
bodyA=self.wheel,
bodyB=self.chassis,
anchor=pivot + self.offset,
collideConnected=False,
motorSpeed=self.motorSpeed,
maxMotorTorque=400,
enableMotor=self.motorOn)
wheelAnchor = pivot + (0, -0.8)
self.CreateLeg(-1, wheelAnchor)
self.CreateLeg(1, wheelAnchor)
self.wheel.transform = (self.wheel.position, 120.0 * b2_pi / 180)
self.CreateLeg(-1, wheelAnchor)
self.CreateLeg(1, wheelAnchor)
self.wheel.transform = (self.wheel.position, -120.0 * b2_pi / 180)
self.CreateLeg(-1, wheelAnchor)
self.CreateLeg(1, wheelAnchor)
def CreateLeg(self, s, wheelAnchor):
p1, p2 = b2Vec2(5.4 * s, -6.1), b2Vec2(7.2 * s, -1.2)
p3, p4 = b2Vec2(4.3 * s, -1.9), b2Vec2(3.1 * s, 0.8)
p5, p6 = b2Vec2(6.0 * s, 1.5), b2Vec2(2.5 * s, 3.7)
# Use a simple system to create mirrored vertices
if s > 0:
poly1 = b2PolygonShape(vertices=(p1, p2, p3))
poly2 = b2PolygonShape(vertices=((0, 0), p5 - p4, p6 - p4))
else:
poly1 = b2PolygonShape(vertices=(p1, p3, p2))
poly2 = b2PolygonShape(vertices=((0, 0), p6 - p4, p5 - p4))
body1 = self.world.CreateDynamicBody(
position=self.offset,
angularDamping=10,
fixtures=b2FixtureDef(
shape=poly1,
groupIndex=-1,
density=1),
)
body2 = self.world.CreateDynamicBody(
position=p4 + self.offset,
angularDamping=10,
fixtures=b2FixtureDef(
shape=poly2,
groupIndex=-1,
density=1),
)
# Using a soft distance constraint can reduce some jitter.
# It also makes the structure seem a bit more fluid by
# acting like a suspension system.
# Now, join all of the bodies together with distance joints,
# and one single revolute joint on the chassis
self.world.CreateDistanceJoint(
dampingRatio=0.5,
frequencyHz=10,
bodyA=body1, bodyB=body2,
anchorA=p2 + self.offset,
anchorB=p5 + self.offset,
)
self.world.CreateDistanceJoint(
dampingRatio=0.5,
frequencyHz=10,
bodyA=body1, bodyB=body2,
anchorA=p3 + self.offset,
anchorB=p4 + self.offset,
)
self.world.CreateDistanceJoint(
dampingRatio=0.5,
frequencyHz=10,
bodyA=body1, bodyB=self.wheel,
anchorA=p3 + self.offset,
anchorB=wheelAnchor + self.offset,
)
self.world.CreateDistanceJoint(
dampingRatio=0.5,
frequencyHz=10,
bodyA=body2, bodyB=self.wheel,
anchorA=p6 + self.offset,
anchorB=wheelAnchor + self.offset,
)
self.world.CreateRevoluteJoint(
bodyA=body2,
bodyB=self.chassis,
anchor=p4 + self.offset,
)
def Keyboard(self, key):
if key == Keys.K_a:
self.motorJoint.motorSpeed = -self.motorSpeed
elif key == Keys.K_d:
self.motorJoint.motorSpeed = self.motorSpeed
elif key == Keys.K_s:
self.motorJoint.motorSpeed = 0
elif key == Keys.K_m:
self.motorJoint.motorEnabled = not self.motorJoint.motorEnabled
if __name__ == "__main__":
main(TheoJansen) | PypiClean |
/Flask_Unchained-0.9.0-py3-none-any.whl/flask_unchained/bundles/security/extensions/security.py | from flask import Request
from flask_login import LoginManager
from flask_principal import Principal, Identity, UserNeed, RoleNeed, identity_loaded
from flask_unchained import FlaskUnchained, injectable, lazy_gettext as _
from flask_unchained.utils import ConfigProperty, ConfigPropertyMetaclass
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from types import FunctionType
from typing import *
from ..models import AnonymousUser, User
from ..utils import current_user
from ..services.security_utils_service import SecurityUtilsService
from ..services.user_manager import UserManager
class _SecurityConfigProperties(metaclass=ConfigPropertyMetaclass):
__config_prefix__ = 'SECURITY'
changeable: bool = ConfigProperty()
confirmable: bool = ConfigProperty()
login_without_confirmation: bool = ConfigProperty()
recoverable: bool = ConfigProperty()
registerable: bool = ConfigProperty()
token_authentication_header: str = ConfigProperty()
token_authentication_key: str = ConfigProperty()
token_max_age: str = ConfigProperty()
password_hash: str = ConfigProperty()
password_salt: str = ConfigProperty()
datetime_factory: FunctionType = ConfigProperty()
_unauthorized_callback: FunctionType = \
ConfigProperty('SECURITY_UNAUTHORIZED_CALLBACK')
class Security(_SecurityConfigProperties):
"""
The `Security` extension::
from flask_unchained.bundles.security import security
"""
def __init__(self):
self._context_processors = {}
self._send_mail_task = None
# injected services
self.security_utils_service = None
self.user_manager = None
# remaining properties are all set by `self.init_app`
self.confirm_serializer = None
self.hashing_context = None
self.login_manager = None
self.principal = None
self.pwd_context = None
self.remember_token_serializer = None
self.reset_serializer = None
def init_app(self, app: FlaskUnchained):
# NOTE: the order of these `self.get_*` calls is important!
self.confirm_serializer = self._get_serializer(app, 'confirm')
self.hashing_context = self._get_hashing_context(app)
self.login_manager = self._get_login_manager(
app, app.config.SECURITY_ANONYMOUS_USER)
self.principal = self._get_principal(app)
self.pwd_context = self._get_pwd_context(app)
self.remember_token_serializer = self._get_serializer(app, 'remember')
self.reset_serializer = self._get_serializer(app, 'reset')
self.context_processor(lambda: dict(security=_SecurityConfigProperties()))
# FIXME: should this be easier to customize for end users, perhaps by making
# FIXME: the function come from a config setting?
identity_loaded.connect_via(app)(self._on_identity_loaded)
app.extensions['security'] = self
def inject_services(self,
security_utils_service: SecurityUtilsService = injectable,
user_manager: UserManager = injectable):
self.security_utils_service = security_utils_service
self.user_manager = user_manager
######################################################
# public api to register template context processors #
######################################################
def context_processor(self, fn):
"""
Add a context processor that runs for every view with a template in the
security bundle.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
"""
Add a context processor for the :meth:`SecurityController.forgot_password` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
"""
Add a context processor for the :meth:`SecurityController.login` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
"""
Add a context processor for the :meth:`SecurityController.register` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
"""
Add a context processor for the :meth:`SecurityController.reset_password` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
"""
Add a context processor for the :meth:`SecurityController.change_password` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
"""
Add a context processor for the
:meth:`SecurityController.send_confirmation_email` view.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('send_confirmation_email', fn)
def mail_context_processor(self, fn):
"""
Add a context processor to be used when rendering all the email templates.
:param fn: A function that returns a dictionary of template context variables.
"""
self._add_ctx_processor('mail', fn)
def run_ctx_processor(self, endpoint) -> Dict[str, Any]:
rv = {}
for group in {None, endpoint}:
for fn in self._context_processors.setdefault(group, []):
rv.update(fn())
return rv
# protected
def _add_ctx_processor(self, endpoint, fn) -> None:
group = self._context_processors.setdefault(endpoint, [])
if fn not in group:
group.append(fn)
##########################################
# protected api methods used by init_app #
##########################################
def _get_hashing_context(self, app: FlaskUnchained) -> CryptContext:
"""
Get the token hashing (and verifying) context.
"""
return CryptContext(schemes=app.config.SECURITY_HASHING_SCHEMES,
deprecated=app.config.SECURITY_DEPRECATED_HASHING_SCHEMES)
def _get_login_manager(self,
app: FlaskUnchained,
anonymous_user: AnonymousUser,
) -> LoginManager:
"""
Get an initialized instance of Flask Login's
:class:`~flask_login.LoginManager`.
"""
login_manager = LoginManager()
login_manager.anonymous_user = anonymous_user or AnonymousUser
login_manager.localize_callback = _
login_manager.request_loader(self._request_loader)
login_manager.user_loader(
# skipcq: PYL-W0108 (unnecessary lambda)
lambda *a, **kw: self.security_utils_service.user_loader(*a, **kw))
login_manager.login_view = 'security_controller.login'
login_manager.login_message = _(
'flask_unchained.bundles.security:error.login_required')
login_manager.login_message_category = 'info'
login_manager.needs_refresh_message = _(
'flask_unchained.bundles.security:error.fresh_login_required')
login_manager.needs_refresh_message_category = 'info'
login_manager.init_app(app)
return login_manager
def _get_principal(self, app: FlaskUnchained) -> Principal:
"""
Get an initialized instance of Flask Principal's.
:class:~flask_principal.Principal`.
"""
principal = Principal(app, use_sessions=False)
principal.identity_loader(self._identity_loader)
return principal
def _get_pwd_context(self, app: FlaskUnchained) -> CryptContext:
"""
Get the password hashing context.
"""
pw_hash = app.config.SECURITY_PASSWORD_HASH
schemes = app.config.SECURITY_PASSWORD_SCHEMES
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError(f'Invalid password hashing scheme {pw_hash}. '
f'Allowed values are {allowed}.')
return CryptContext(schemes=schemes, default=pw_hash,
deprecated=app.config.SECURITY_DEPRECATED_PASSWORD_SCHEMES)
def _get_serializer(self, app: FlaskUnchained, name: str) -> URLSafeTimedSerializer:
"""
Get a URLSafeTimedSerializer for the given serialization context name.
:param app: the :class:`FlaskUnchained` instance
:param name: Serialization context. One of ``confirm``, ``remember``,
or ``reset``
:return: URLSafeTimedSerializer
"""
salt = app.config.get(f'SECURITY_{name.upper()}_SALT', f'security-{name}-salt')
return URLSafeTimedSerializer(secret_key=app.config.SECRET_KEY, salt=salt)
def _identity_loader(self) -> Union[Identity, None]:
"""
Identity loading function to be passed to be assigned to the Principal
instance returned by :meth:`_get_principal`.
"""
if not isinstance(current_user._get_current_object(), AnonymousUser):
return Identity(current_user.id)
def _on_identity_loaded(self, sender, identity: Identity) -> None:
"""
Callback that runs whenever a new identity has been loaded.
"""
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in getattr(current_user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _request_loader(self, request: Request) -> Union[User, AnonymousUser]:
"""
Attempt to load the user from the request token.
"""
header_key = self.token_authentication_header
args_key = self.token_authentication_key
token = request.args.get(args_key, request.headers.get(header_key, None))
if request.is_json:
data = request.get_json(silent=True) or {}
token = data.get(args_key, token)
try:
data = self.remember_token_serializer.loads(token, max_age=self.token_max_age)
user = self.user_manager.get(data[0])
if user and self.security_utils_service.verify_hash(data[1], user.password):
return user
except:
pass
return self.login_manager.anonymous_user() | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/hydrolysis/src/ast-utils/fluent-traverse.ts | import * as estree from 'estree';
export interface Visitor {
classDetected?: boolean,
enterIdentifier?: (node: estree.Identifier, parent: estree.Node)=>void;
leaveIdentifier?: (node: estree.Identifier, parent: estree.Node)=>void;
enterLiteral?: (node: estree.Literal, parent: estree.Node)=>void;
leaveLiteral?: (node: estree.Literal, parent: estree.Node)=>void;
enterProgram?: (node: estree.Program, parent: estree.Node)=>void;
leaveProgram?: (node: estree.Program, parent: estree.Node)=>void;
enterExpressionStatement?: (node: estree.ExpressionStatement, parent: estree.Node)=>void;
leaveExpressionStatement?: (node: estree.ExpressionStatement, parent: estree.Node)=>void;
enterBlockStatement?: (node: estree.BlockStatement, parent: estree.Node)=>void;
leaveBlockStatement?: (node: estree.BlockStatement, parent: estree.Node)=>void;
enterEmptyStatement?: (node: estree.EmptyStatement, parent: estree.Node)=>void;
leaveEmptyStatement?: (node: estree.EmptyStatement, parent: estree.Node)=>void;
enterDebuggerStatement?: (node: estree.DebuggerStatement, parent: estree.Node)=>void;
leaveDebuggerStatement?: (node: estree.DebuggerStatement, parent: estree.Node)=>void;
enterWithStatement?: (node: estree.WithStatement, parent: estree.Node)=>void;
leaveWithStatement?: (node: estree.WithStatement, parent: estree.Node)=>void;
enterReturnStatement?: (node: estree.ReturnStatement, parent: estree.Node)=>void;
leaveReturnStatement?: (node: estree.ReturnStatement, parent: estree.Node)=>void;
enterLabeledStatement?: (node: estree.LabeledStatement, parent: estree.Node)=>void;
leaveLabeledStatement?: (node: estree.LabeledStatement, parent: estree.Node)=>void;
enterBreakStatement?: (node: estree.BreakStatement, parent: estree.Node)=>void;
leaveBreakStatement?: (node: estree.BreakStatement, parent: estree.Node)=>void;
enterContinueStatement?: (node: estree.ContinueStatement, parent: estree.Node)=>void;
leaveContinueStatement?: (node: estree.ContinueStatement, parent: estree.Node)=>void;
enterIfStatement?: (node: estree.IfStatement, parent: estree.Node)=>void;
leaveIfStatement?: (node: estree.IfStatement, parent: estree.Node)=>void;
enterSwitchStatement?: (node: estree.SwitchStatement, parent: estree.Node)=>void;
leaveSwitchStatement?: (node: estree.SwitchStatement, parent: estree.Node)=>void;
enterSwitchCase?: (node: estree.SwitchCase, parent: estree.Node)=>void;
leaveSwitchCase?: (node: estree.SwitchCase, parent: estree.Node)=>void;
enterThrowStatement?: (node: estree.ThrowStatement, parent: estree.Node)=>void;
leaveThrowStatement?: (node: estree.ThrowStatement, parent: estree.Node)=>void;
enterTryStatement?: (node: estree.TryStatement, parent: estree.Node)=>void;
leaveTryStatement?: (node: estree.TryStatement, parent: estree.Node)=>void;
enterCatchClause?: (node: estree.CatchClause, parent: estree.Node)=>void;
leaveCatchClause?: (node: estree.CatchClause, parent: estree.Node)=>void;
enterWhileStatement?: (node: estree.WhileStatement, parent: estree.Node)=>void;
leaveWhileStatement?: (node: estree.WhileStatement, parent: estree.Node)=>void;
enterDoWhileStatement?: (node: estree.DoWhileStatement, parent: estree.Node)=>void;
leaveDoWhileStatement?: (node: estree.DoWhileStatement, parent: estree.Node)=>void;
enterForStatement?: (node: estree.ForStatement, parent: estree.Node)=>void;
leaveForStatement?: (node: estree.ForStatement, parent: estree.Node)=>void;
enterForInStatement?: (node: estree.ForInStatement, parent: estree.Node)=>void;
leaveForInStatement?: (node: estree.ForInStatement, parent: estree.Node)=>void;
enterForOfStatement?: (node: estree.ForOfStatement, parent: estree.Node)=>void;
leaveForOfStatement?: (node: estree.ForOfStatement, parent: estree.Node)=>void;
enterFunctionDeclaration?: (node: estree.FunctionDeclaration, parent: estree.Node)=>void;
leaveFunctionDeclaration?: (node: estree.FunctionDeclaration, parent: estree.Node)=>void;
enterVariableDeclaration?: (node: estree.VariableDeclaration, parent: estree.Node)=>void;
leaveVariableDeclaration?: (node: estree.VariableDeclaration, parent: estree.Node)=>void;
enterVariableDeclarator?: (node: estree.VariableDeclarator, parent: estree.Node)=>void;
leaveVariableDeclarator?: (node: estree.VariableDeclarator, parent: estree.Node)=>void;
enterThisExpression?: (node: estree.ThisExpression, parent: estree.Node)=>void;
leaveThisExpression?: (node: estree.ThisExpression, parent: estree.Node)=>void;
enterArrayExpression?: (node: estree.ArrayExpression, parent: estree.Node)=>void;
leaveArrayExpression?: (node: estree.ArrayExpression, parent: estree.Node)=>void;
enterObjectExpression?: (node: estree.ObjectExpression, parent: estree.Node)=>void;
leaveObjectExpression?: (node: estree.ObjectExpression, parent: estree.Node)=>void;
enterProperty?: (node: estree.Property, parent: estree.Node)=>void;
leaveProperty?: (node: estree.Property, parent: estree.Node)=>void;
enterFunctionExpression?: (node: estree.FunctionExpression, parent: estree.Node)=>void;
leaveFunctionExpression?: (node: estree.FunctionExpression, parent: estree.Node)=>void;
enterArrowFunctionExpression?: (node: estree.ArrowFunctionExpression, parent: estree.Node)=>void;
leaveArrowFunctionExpression?: (node: estree.ArrowFunctionExpression, parent: estree.Node)=>void;
enterYieldExpression?: (node: estree.YieldExpression, parent: estree.Node)=>void;
leaveYieldExpression?: (node: estree.YieldExpression, parent: estree.Node)=>void;
enterSuper?: (node: estree.Super, parent: estree.Node)=>void;
leaveSuper?: (node: estree.Super, parent: estree.Node)=>void;
enterUnaryExpression?: (node: estree.UnaryExpression, parent: estree.Node)=>void;
leaveUnaryExpression?: (node: estree.UnaryExpression, parent: estree.Node)=>void;
enterUpdateExpression?: (node: estree.UpdateExpression, parent: estree.Node)=>void;
leaveUpdateExpression?: (node: estree.UpdateExpression, parent: estree.Node)=>void;
enterBinaryExpression?: (node: estree.BinaryExpression, parent: estree.Node)=>void;
leaveBinaryExpression?: (node: estree.BinaryExpression, parent: estree.Node)=>void;
enterAssignmentExpression?: (node: estree.AssignmentExpression, parent: estree.Node)=>void;
leaveAssignmentExpression?: (node: estree.AssignmentExpression, parent: estree.Node)=>void;
enterLogicalExpression?: (node: estree.LogicalExpression, parent: estree.Node)=>void;
leaveLogicalExpression?: (node: estree.LogicalExpression, parent: estree.Node)=>void;
enterMemberExpression?: (node: estree.MemberExpression, parent: estree.Node)=>void;
leaveMemberExpression?: (node: estree.MemberExpression, parent: estree.Node)=>void;
enterConditionalExpression?: (node: estree.ConditionalExpression, parent: estree.Node)=>void;
leaveConditionalExpression?: (node: estree.ConditionalExpression, parent: estree.Node)=>void;
enterCallExpression?: (node: estree.CallExpression, parent: estree.Node)=>void;
leaveCallExpression?: (node: estree.CallExpression, parent: estree.Node)=>void;
enterNewExpression?: (node: estree.NewExpression, parent: estree.Node)=>void;
leaveNewExpression?: (node: estree.NewExpression, parent: estree.Node)=>void;
enterSequenceExpression?: (node: estree.SequenceExpression, parent: estree.Node)=>void;
leaveSequenceExpression?: (node: estree.SequenceExpression, parent: estree.Node)=>void;
enterTemplateLiteral?: (node: estree.TemplateLiteral, parent: estree.Node)=>void;
leaveTemplateLiteral?: (node: estree.TemplateLiteral, parent: estree.Node)=>void;
enterTaggedTemplateExpression?: (node: estree.TaggedTemplateExpression, parent: estree.Node)=>void;
leaveTaggedTemplateExpression?: (node: estree.TaggedTemplateExpression, parent: estree.Node)=>void;
enterTemplateElement?: (node: estree.TemplateElement, parent: estree.Node)=>void;
leaveTemplateElement?: (node: estree.TemplateElement, parent: estree.Node)=>void;
enterSpreadElement?: (node: estree.SpreadElement, parent: estree.Node)=>void;
leaveSpreadElement?: (node: estree.SpreadElement, parent: estree.Node)=>void;
enterPattern?: (node: estree.Pattern, parent: estree.Node)=>void;
leavePattern?: (node: estree.Pattern, parent: estree.Node)=>void;
enterAssignmentProperty?: (node: estree.AssignmentProperty, parent: estree.Node)=>void;
leaveAssignmentProperty?: (node: estree.AssignmentProperty, parent: estree.Node)=>void;
enterObjectPattern?: (node: estree.ObjectPattern, parent: estree.Node)=>void;
leaveObjectPattern?: (node: estree.ObjectPattern, parent: estree.Node)=>void;
enterArrayPattern?: (node: estree.ArrayPattern, parent: estree.Node)=>void;
leaveArrayPattern?: (node: estree.ArrayPattern, parent: estree.Node)=>void;
enterRestElement?: (node: estree.RestElement, parent: estree.Node)=>void;
leaveRestElement?: (node: estree.RestElement, parent: estree.Node)=>void;
enterAssignmentPattern?: (node: estree.AssignmentPattern, parent: estree.Node)=>void;
leaveAssignmentPattern?: (node: estree.AssignmentPattern, parent: estree.Node)=>void;
enterMethodDefinition?: (node: estree.MethodDefinition, parent: estree.Node)=>void;
leaveMethodDefinition?: (node: estree.MethodDefinition, parent: estree.Node)=>void;
enterClassDeclaration?: (node: estree.ClassDeclaration, parent: estree.Node)=>void;
leaveClassDeclaration?: (node: estree.ClassDeclaration, parent: estree.Node)=>void;
enterClassExpression?: (node: estree.ClassExpression, parent: estree.Node)=>void;
leaveClassExpression?: (node: estree.ClassExpression, parent: estree.Node)=>void;
enterMetaProperty?: (node: estree.MetaProperty, parent: estree.Node)=>void;
leaveMetaProperty?: (node: estree.MetaProperty, parent: estree.Node)=>void;
enterModuleDeclaration?: (node: estree.ModuleDeclaration, parent: estree.Node)=>void;
leaveModuleDeclaration?: (node: estree.ModuleDeclaration, parent: estree.Node)=>void;
enterModuleSpecifier?: (node: estree.ModuleSpecifier, parent: estree.Node)=>void;
leaveModuleSpecifier?: (node: estree.ModuleSpecifier, parent: estree.Node)=>void;
enterImportDeclaration?: (node: estree.ImportDeclaration, parent: estree.Node)=>void;
leaveImportDeclaration?: (node: estree.ImportDeclaration, parent: estree.Node)=>void;
enterImportSpecifier?: (node: estree.ImportSpecifier, parent: estree.Node)=>void;
leaveImportSpecifier?: (node: estree.ImportSpecifier, parent: estree.Node)=>void;
enterImportDefaultSpecifier?: (node: estree.ImportDefaultSpecifier, parent: estree.Node)=>void;
leaveImportDefaultSpecifier?: (node: estree.ImportDefaultSpecifier, parent: estree.Node)=>void;
enterImportNamespaceSpecifier?: (node: estree.ImportNamespaceSpecifier, parent: estree.Node)=>void;
leaveImportNamespaceSpecifier?: (node: estree.ImportNamespaceSpecifier, parent: estree.Node)=>void;
enterExportNamedDeclaration?: (node: estree.ExportNamedDeclaration, parent: estree.Node)=>void;
leaveExportNamedDeclaration?: (node: estree.ExportNamedDeclaration, parent: estree.Node)=>void;
enterExportSpecifier?: (node: estree.ExportSpecifier, parent: estree.Node)=>void;
leaveExportSpecifier?: (node: estree.ExportSpecifier, parent: estree.Node)=>void;
enterExportDefaultDeclaration?: (node: estree.ExportDefaultDeclaration, parent: estree.Node)=>void;
leaveExportDefaultDeclaration?: (node: estree.ExportDefaultDeclaration, parent: estree.Node)=>void;
enterExportAllDeclaration?: (node: estree.ExportAllDeclaration, parent: estree.Node)=>void;
leaveExportAllDeclaration?: (node: estree.ExportAllDeclaration, parent: estree.Node)=>void;
} | PypiClean |
/DynaMIT-1.1.5.tar.gz/DynaMIT-1.1.5/dynamit/runner.py | from __future__ import print_function
from __future__ import division
from builtins import object, range, str
from multiprocessing import Pool
import sys, os, shutil, six
import dynamit.utils
def runSearcherThread(params):
"""Function executing the run method of the passed
processSearcher object on the sequencesFilename file.
Capture stdout and stderr to a StringIO variable for
threaded execution (thus allowing to print the whole
output of thread in one block after execution).
Args:
params, tuple composed of:
processSearcher: the MotifSearcher object for which the
run method must be executed.
sequencesFilename: name of the file containing input sequences.
Returns:
Returns a tuple containing the modified processSearcher object,
the return value of its run method and a StringIO variable
containing the method console output.
"""
(processSearcher, sequencesFilename) = params
try:
# redirect this process STDOUT/ERR to store the searcher's output.
processOutput = six.StringIO()
sys.stdout = sys.stderr = processOutput
# inform that the searcher is starting.
print(" [INFO] Starting the " + processSearcher.searcherName + \
" searcher...")
# run the searcher and store its return value.
returnValue = processSearcher.runSearch(sequencesFilename)
except OSError:
print("[ERROR] Unable to run the <" + processSearcher.searcherName + \
"> motif searcher. The likely cause of the error is a wrong " \
" tool path specification in the configuration file. " \
"Continuing with remaining searchers...")
returnValue = 1
except (IOError, IndexError, RuntimeError, ValueError):
print("[ERROR] Unable to run the <" + processSearcher.searcherName + \
"> motif searcher. Continuing with remaining searchers...")
returnValue = 1
# we are done with the searcher, so return its object, along with
# its return value and console output for the parent process.
return (processSearcher, returnValue, processOutput)
class Runner(object):
"""Class devoted to running the various phases of DynaMIT workflow.
Loads the configurations, runs motif searchers and forward the results
to the selected integration strategy which is then executed.
Eventually, runs the specified results printers and terminates the
run execution. This class is instantiated and its methods called only
by the __main__ script, which acts as an interface to the command-line.
"""
def __init__(self):
"""Initialize all class attributes with their default values.
"""
self.searchers = []
self.integrator = None
self.searchesResults = []
self.integrationResults = {}
self.printers = []
self.outputFolder = ""
self.configFilename = ""
self.sequencesFilename = ""
def loadConfiguration(self, filename):
"""Reads DynaMIT configuration from the specified filename,
instantiates and configures all the MotifSearchers and the
IntegrationStrategy listed in the configuration file.
filename: Name of the file containing DynaMIT configuration.
Args:
filename: filename containing the configuration to be loaded.
Returns:
Returns 0 if everything went fine, 1 and prints an error
message otherwise.
"""
print(("\n--------------------------------\nLoading DynaMIT " + \
"configuration...\n--------------------------------"))
try:
self.configFilename = filename
self.searchers = []
self.integrator = None
self.printers = []
linesCount = 1
configHandle = open(self.configFilename)
for line in configHandle.readlines():
# allow for comment lines in the configuration file, starting with #.
if not line.startswith("#"):
# get the line parts defining the configuration elements
# (MotifSearcher or Integrator, classname, ecc).
componentType, fullClassName, path, params = \
line.rstrip('\n').split('\t')
try:
# instantiate the component...
newComponent = dynamit.utils.getClassInstance(fullClassName)
# ...and try to configure it.
configurationOutcome = 0
# as the motif searcher component may need the underlying tool
# path, they take one more parameter (the path) than integration
# strategies and results printers when setting configuration.
if componentType == "MotifSearcher":
configurationOutcome = newComponent.setConfiguration(path, params)
else:
# path is expected to be empty for IntegrationStrategies and
# ResultsPrinters configuration lines, and is not used.
configurationOutcome = newComponent.setConfiguration(params)
if configurationOutcome == 0:
if componentType == "MotifSearcher":
# store the MotifSearcher component.
self.searchers.append(newComponent)
elif componentType == "IntegrationStrategy":
# if an IntegrationStrategy was already loaded from configuration,
# warn that only the last loaded one will be used
if self.integrator != None:
print(("[WARNING] The configuration defines more than one " + \
"integration strategy. DynaMIT will use the last " + \
"strategy found in the configuration file."))
# store the integrationStrategy component.
self.integrator = newComponent
elif componentType == "ResultsPrinter":
# store the integrationStrategy component.
self.printers.append(newComponent)
# inform the user of the new component successful loading.
print(("[SUCCESS:%d] Instance of the < %s > %s was initialized." \
% (linesCount, fullClassName, componentType)))
else:
# raise this exception so that, as configuration went wrong
# (returned value != 0), we stop and report the issue.
raise ValueError('')
except ValueError:
# an error occured in configuring the component, report it
# abort execution to avoid any problems later on.
print("[ERROR:%d] Impossible to instantiate the < %s > " \
"%s at configuration line %d." \
% (linesCount, fullClassName, componentType, linesCount))
return 1
# keep track of line number to report errors if they occur.
linesCount += 1
# otherwise, if configuration loading went fine, check that we have
# all required components in the right quantities.
if len(self.searchers) < 1:
print("[ERROR] No motif searcher could be loaded from " \
"the configuration file.")
return 1
elif self.integrator == None:
print("[ERROR] No integration strategy could be loaded from " \
"the configuration file.")
return 1
elif len(self.printers) < 1:
self.printers = [
dynamit.utils.getClassInstance("dynamit.tablePrinter.TablePrinter"),
dynamit.utils.getClassInstance("dynamit.clusterEvaluationPrinter" \
".ClusterEvaluationPrinter")]
print(("[WARNING] No results printer could be loaded from the " + \
"configuration file. Execution will continue anyway, " + \
"TablePrinter and ClusterEvaluationPrinter will be run."))
# report success in loading the configuration. DynaMIT can now be run.
print("[DONE] Configuration loaded successfully.")
return 0
except IOError:
print(("[ERROR] The configuration file < %s > does not exist." % filename))
return 1
except (ImportError, AttributeError):
print(("[ERROR] Cannot instantiate the < %s > component: please check " \
"the class name correctness in the configuration file." % fullClassName))
return 1
except Exception:
print(("[ERROR] Unexpected error: please check your configuration file " \
"at line " + str(linesCount) + "."))
print((sys.exc_info()[0]))
return 1
def run(self, outputFolderName, sequencesFilename, nproc=1, polisherDist=-1, noCopy=0):
"""Runs DynaMIT with current configuration, first performing motif
searches (in rounds of nproc parallel processes) and eventually
running the integration strategy on all collected results.
Args:
outputFolderName: folder into which results will be stored.
sequencesFilename: filename containing sequences to be analyzed.
nproc: number of processor cores to use for running the search
phase (default=1).
polisherDist: if greater than 0, tell to run the motif cluster
polisher and keep only motif instances overlapping
with another instance for at least this much bases.
Returns:
Returns 0 if everything went fine, 1 and prints an error
message otherwise.
"""
print("\n--------------------------------\nStarting DynaMIT " + \
"run...\n--------------------------------")
# check that configuration was loaded prior to trying running DynaMIT.
if (len(self.searchers) < 1) or (self.integrator is None):
print("[ERROR] Please call loadConfiguration() prior to calling run()!")
return 1
# check for the input sequences file existence.
if not os.path.isfile(sequencesFilename):
print("[ERROR] Could not find the specified input sequences filename.")
return 1
# check that the run folder was specified correctly.
if outputFolderName == "":
print("[ERROR] Please specify a non-empty output folder name.")
return 1
# create the run folder if it does not exist yet, and store its path
if not os.path.isdir(outputFolderName):
os.mkdir(outputFolderName)
self.outputFolder = os.path.abspath(outputFolderName)
# copy the configuration file into the output folder
# unless the user has specified to avoid this copy.
if noCopy == 0:
shutil.copy(self.configFilename, self.outputFolder)
# copy sequences file in the output folder regardless
# of the noCopy parameter; if needed it will be deleted
# at the end of dynamit processing.
shutil.copy(sequencesFilename, self.outputFolder)
self.sequencesFilename = os.path.abspath(os.path.join(outputFolderName,
os.path.basename(sequencesFilename)))
# move to the output folder, so that all subsequent
# operations can take place there
os.chdir(self.outputFolder)
# load sequences into BioPython SeqRecord objects for use by
# integration strategies and results printers
sequences = dynamit.utils.getSequencesRecords(self.sequencesFilename)
# check that there are at least two sequences in the input file.
if sequences == 1:
print("[ERROR] The specified input sequences filename " \
"is in the wrong format (FASTA required).")
return 1
print("[STARTING] Running motif searchers...")
if nproc > 1 and sys.platform.startswith("win") and sys.version_info[0] == 2:
print(" [WARNING] Parallel execution of motif searchers in Python 2.x on\n" \
" Windows is disabled due to a bug in the multiprocessing module.\n" \
" Motif searchers execution will proceed using a single processor.")
nproc = 1
if nproc > 1:
# run motif searchers in parallel with nproc processes.
searchersPool = Pool(processes=max(nproc, len(self.searchers)))
results = searchersPool.map(runSearcherThread,
[(searcher, self.sequencesFilename) for searcher in self.searchers])
self.searchers = []
for searcherResults in results:
# store the searcher containing execution results.
self.searchers.append(searcherResults[0])
# if the searcher was successfull, extract its results.
if searcherResults[1] != 1:
# appending them to global results list.
self.searchesResults.extend(searcherResults[1])
# print the searcher console output to the console.
print(searcherResults[2].getvalue())
else:
# execute motif search in single-processor mode:
# run all searchers loaded by configuration one-by-one.
for searcher in self.searchers:
try:
# run the searcher and append its results to overall search results.
print(" [INFO] Starting the " + searcher.searcherName + " searcher...")
currentSearcherResults = searcher.runSearch(self.sequencesFilename)
# check that the searcher run with success and store its results.
if currentSearcherResults != 1:
self.searchesResults.extend(currentSearcherResults)
else:
raise ValueError('Unexpected searcher error')
except OSError:
print("[ERROR] Unable to run the <" + searcher.searcherName + \
"> motif searcher. The likely cause of the error is a wrong " \
" tool path specification in the configuration file. " \
"Continuing with remaining searchers...")
except (IOError, IndexError, RuntimeError, ValueError):
print("[ERROR] Unable to run the <" + searcher.searcherName + \
"> motif searcher. Continuing with remaining searchers...")
# check if search results are empty, if so abort.
if len(self.searchesResults) == 0:
print("[ERROR] Motif search with the specified configuration yielded no results.")
return 1
# save motifs matches to a tab-separated file
# (for debugging purposes and user inspection)
with open("motifSearchesResults.txt", "w") as resultsHandle:
resultsHandle.write("#motif motifType motifSearcher sequenceID startPos endPos\n")
for match in self.searchesResults:
resultsHandle.write(match + "\n")
print(" [INFO] Raw motifs matches saved to < motifSearchesResults.txt >.\n" \
"[DONE] Motif searches completed successfully.\n")
# perform motifs integration according to the selected integration strategy.
print("[STARTING] Running results integration strategy...")
self.integrationResults = self.integrator.doIntegration(sequences,
self.searchesResults)
if self.integrationResults != 1:
# save integration results to a file (for debugging purposes and user inspection).
with open("integrationResults_" + \
self.integrator.integrationStrategyName + ".txt", "w") as resultsHandle:
for (key, value) in list(self.integrationResults.items()):
resultsHandle.write(key + ":\n" + str(value) + "\n")
print(" [INFO] Raw integration results saved to < integrationResults.txt >.")
print("[DONE] Results integration completed successfully.\n")
# if specified, run the motif cluster polisher.
if (polisherDist > 0) and \
"clustering" in self.integrationResults:
polishedResults = []
print("[STARTING] Running motif cluster polisher...")
# loop over all motif matches to identify overlaps.
for i in range(0, len(self.searchesResults)):
iMatch = self.searchesResults[i].split('\t')
iMotif = iMatch[0] + "|" + iMatch[2]
iCluster = self.integrationResults["clustering"][\
self.integrationResults["motifs"].index(iMotif)]
foundOverlap = 0
iRange = list(range(int(iMatch[4]), int(iMatch[5])))
for j in range(0, len(self.searchesResults)):
jMatch = self.searchesResults[j].split('\t')
jMotif = jMatch[0] + "|" + jMatch[2]
jCluster = self.integrationResults["clustering"][\
self.integrationResults["motifs"].index(jMotif)]
if (i != j) and ("".join(iMatch) != "".join(jMatch)):
jRange = list(range(int(jMatch[4]), int(jMatch[5])))
if len(set(iRange) & set(jRange)) >= polisherDist:
if iCluster == jCluster:
# we found two overlapping instances, belonging to the
# same cluster, so retain this instance.
foundOverlap = 1
break
if foundOverlap == 1:
polishedResults.append(self.searchesResults[i])
# store the list of polished motif matches to be used for printing.
self.searchesResults = polishedResults
print("[DONE] Motif cluster polishing completed successfully.\n")
else:
print(("[ERROR] Unable to run the <" + \
self.integrator.integrationStrategyName + \
"> integration strategy."))
# integration failed, so if any printer was specified, try to run it
# anyway with motifs list as the only integration result.
if len(self.printers) > 0:
self.integrationResults = {"motifs":
dynamit.utils.getMotifsFromSearchResults(
self.searchesResults)}
print("[INFO] Trying to run specified results printers with " \
" motifs list only as integration results...")
# perform results printing with each of the selected printers, if any.
if len(self.printers) > 0:
print("[STARTING] Running results printers...")
for printer in self.printers:
# make a folder for the printer results and move into it
# so that the output of this printer will be stored in its folder
printerDir = "results_" + printer.resultsPrinterName
if not os.path.isdir(printerDir):
os.mkdir(printerDir)
os.chdir(printerDir)
# run the printer
printer.printResults(sequences, self.searchesResults,
self.integrator.integrationStrategyName,
self.integrationResults)
# move back to global results folder
os.chdir(self.outputFolder)
print("[DONE] Results printing completed successfully.")
# if the user has asked to not store the input file
# in the output folder, delete it now that we are done.
if noCopy:
os.remove(self.sequencesFilename)
print(("\n--------------------------------\nDynaMIT run completed!\n---" + \
"-----------------------------\n[INFO] Results can be found in " + \
"the <" + self.outputFolder + "> folder."))
return 0
def exportResults(self):
"""Obtains the raw run results (prior to running the printers) from
the integrator component used.
Returns:
Returns the integration strategy results.
"""
if not self.integrationResults:
print("[ERROR] Results generation failed or not yet performed.")
return 1
else:
return self.integrationResults | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/plugins/extra/formatlesspaste/lib/formatlesspaste-plugin.js | define(
['aloha/core', 'aloha/plugin', 'aloha/jquery', 'aloha/floatingmenu',
'formatlesspaste/formatlesshandler', 'aloha/contenthandlermanager',
'i18n!formatlesspaste/nls/i18n', 'i18n!aloha/nls/i18n','css!formatlesspaste/css/formatless.css'],
function(Aloha, Plugin, jQuery, FloatingMenu, FormatlessPasteHandler, ContentHandlerManager, i18n, i18nCore) {
// Public Methods
return Plugin.create('formatlesspaste', {
/**
* Configure Formatless pasting
*/
formatlessPasteOption: false,
/**
* Whether to display a button in the floating menu that allows to switch formatless pasting on and off
*/
button: true,
//Here we removes the text-level semantic and edit elements (http://dev.w3.org/html5/spec/text-level-semantics.html#usage-summary)
strippedElements : [
"a",
"em",
"strong",
"small",
"s",
"cite",
"q",
"dfn",
"abbr",
"time",
"code",
"var",
"samp",
"kbd",
"sub",
"sup",
"i",
"b",
"u",
"mark",
"ruby",
"rt",
"rp",
"bdi",
"bdo",
"ins",
"del"
],
/**
* Initialize the PastePlugin
*/
init: function() {
var that = this;
// look for old configuration directly in settings
if ( typeof this.settings.formatlessPasteOption !== 'undefined') {
this.formatlessPasteOption = this.settings.formatlessPasteOption;
}
if ( typeof this.settings.strippedElements !== 'undefined') {
this.strippedElements = this.settings.strippedElements;
}
// look for newer config in settings.config
if (this.settings.config) {
if (this.settings.config.formatlessPasteOption) {
this.formatlessPasteOption = this.settings.config.formatlessPasteOption;
}
if (this.settings.config.strippedElements) {
this.strippedElements = this.settings.config.strippedElements;
}
if (this.settings.config.button === false) {
this.button = false;
}
}
this.registerFormatlessPasteHandler();
var formatlessPasteHandlerLastState;
Aloha.bind( 'aloha-editable-activated', function( event, params) {
var config = that.getEditableConfig( params.editable.obj );
if (!config) {
return;
}
// make button configuration a bit more tolerant
if (typeof config.button === 'string') {
config.button = config.button.toLowerCase();
if (config.button === 'false' || config.button === '0') {
// disable button only if 'false' or '0' is specified
config.button = false;
} else {
// otherwise the button will always be shown
config.button = true;
}
}
// make formatlessPasteOption configuration a bit more tolerant
if (typeof config.formatlessPasteOption === 'string') {
config.formatlessPasteOption = config.formatlessPasteOption.toLowerCase();
if (config.formatlessPasteOption === 'false' || config.formatlessPasteOption === '0') {
// disable button only if 'false' or '0' is specified
config.formatlessPasteOption = false;
} else {
// otherwise the button will always be shown
config.formatlessPasteOption = true;
}
}
if ( config.strippedElements ) {
FormatlessPasteHandler.strippedElements = config.strippedElements;
}
if (config.formatlessPasteOption === true) {
that.formatlessPasteButton.setPressed(true);
FormatlessPasteHandler.enabled = true;
} else if (config.formatlessPasteOption === false) {
that.formatlessPasteButton.setPressed(false);
FormatlessPasteHandler.enabled = false;
}
if ( config.button === false ) {
that.formatlessPasteButton.hide();
} else {
that.formatlessPasteButton.show();
}
});
},
/**
* Register Formatless paste handler
*/
registerFormatlessPasteHandler: function(){
ContentHandlerManager.register( 'formatless', FormatlessPasteHandler );
FormatlessPasteHandler.strippedElements = this.strippedElements;
// add button to toggle format-less pasting
this.formatlessPasteButton = new Aloha.ui.Button({
'iconClass' : 'aloha-button aloha-button-formatless-paste',
'size' : 'small',
'onclick' : function () {
//toggle the value of allowFormatless
FormatlessPasteHandler.enabled = !FormatlessPasteHandler.enabled;
},
'tooltip' : i18n.t( 'button.formatlessPaste.tooltip' ),
'toggle' : true
});
FloatingMenu.addButton(
'Aloha.continuoustext',
this.formatlessPasteButton,
i18nCore.t( 'floatingmenu.tab.format' ),
1
);
// activate formatless paste button if option is set
if (this.formatlessPasteOption === true) {
this.formatlessPasteButton.setPressed(true);
FormatlessPasteHandler.enabled = true;
}
// hide button by default if configured
if (this.button === false) {
this.formatlessPasteButton.hide();
}
}
});
}); | PypiClean |
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Model/Grammar/all.py |
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
# List subpackages to import with the current one
# see docs.python.org/2/tutorial/modules.html
from netzob.Model.Grammar.States.all import *
from netzob.Model.Grammar.Transitions.all import *
from netzob.Model.Grammar.Automata import Automata | PypiClean |
/AltTester_Driver-2.0.2-py3-none-any.whl/alttester/altobject.py | import json
import alttester.commands as commands
from alttester.by import By
class AltObject:
"""The AltObject class represents an object present in the application and it allows you to interact with it.
It is the return type of the methods in the “find_*” category from the AltDriver class.
"""
def __init__(self, altdriver, data):
self._altdriver = altdriver
self._data = data
def __repr__(self):
return "{}(altdriver, {!r})".format(self.__class__.__name__, self.to_json())
def __str__(self):
return json.dumps(self.to_json())
@property
def _connection(self):
return self._altdriver._connection
@property
def name(self):
return self._data.get("name", "")
@property
def id(self):
return self._data.get("id", 0)
@property
def x(self):
return self._data.get("x", 0)
@property
def y(self):
return self._data.get("y", 0)
@property
def z(self):
return self._data.get("z", 0)
@property
def mobileY(self):
return self._data.get("mobileY", 0)
@property
def type(self):
return self._data.get("type", "")
@property
def enabled(self):
return self._data.get("enabled", True)
@property
def worldX(self):
return self._data.get("worldX", 0.0)
@property
def worldY(self):
return self._data.get("worldY", 0.0)
@property
def worldZ(self):
return self._data.get("worldZ", 0.0)
@property
def idCamera(self):
return self._data.get("idCamera", 0)
@property
def transformParentId(self):
return self._data.get("transformParentId", 0)
@property
def transformId(self):
return self._data.get("transformId", 0)
def to_json(self):
return {
"name": self.name,
"id": self.id,
"x": self.x,
"y": self.y,
"z": self.z,
"mobileY": self.mobileY,
"type": self.type,
"enabled": self.enabled,
"worldX": self.worldX,
"worldY": self.worldY,
"worldZ": self.worldZ,
"transformParentId": self.transformParentId,
"transformId": self.transformId,
"idCamera": self.idCamera
}
def update_object(self):
altObject = commands.FindObject.run(
self._connection,
By.ID, self.id, By.NAME, "", enabled=True
)
return AltObject(self._altdriver, altObject)
def get_screen_position(self):
"""Returns the screen position.
Returns:
tuple: A tuple containing ``x`` and ``y``.
"""
return self.x, self.y
def get_world_position(self):
"""Returns the world position.
Returns:
tuple: A tuple containing ``worldX``, ``worldY`` and ``worldZ``.
"""
return self.worldX, self.worldY, self.worldZ
def get_parent(self):
"""Returns the parent object.
Returns:
AltObject: The parent object.
"""
data = commands.FindObject.run(
self._connection,
By.PATH, "//*[@id={}]/..".format(self.id), By.NAME, "", enabled=True
)
return AltObject(self._altdriver, data)
def get_all_components(self):
"""Returns all components."""
return commands.GetAllComponents.run(self._connection, self)
def wait_for_component_property(self, component_name, property_name,
property_value, assembly, timeout=20, interval=0.5):
"""Wait until a property has a specific value and returns the value of the given component property.
Args:
component_name (:obj:`str`): The name of the component. If the component has a namespace the format should
look like this: ``"namespace.componentName"``.
property_name (:obj:`str`): The name of the property of which value you want. If the property is an array
you can specify which element of the array to return by doing ``property[index]``, or if you want a
property inside of another property you can get by doing ``property.subProperty``.
property_value(:obj:`str`): The value of the component expected
assembly (:obj:`str`): The name of the assembly containing the component.
timeout (:obj:`int`, optional): The number of seconds that it will wait for property.
interval (:obj:`float`, optional): The number of seconds after which it will try to find the object again.
The interval should be smaller than timeout.
Returns:
str: The property value is serialized to a JSON string.
"""
return commands.WaitForComponentProperty.run(
component_name, property_name, property_value,
assembly, self, timeout, interval
)
def get_component_property(self, component_name, property_name, assembly, max_depth=2):
"""Returns the value of the given component property.
Args:
component_name (:obj:`str`): The name of the component. If the component has a namespace the format should
look like this: ``"namespace.componentName"``.
property_name (:obj:`str`): The name of the property of which value you want. If the property is an array
you can specify which element of the array to return by doing ``property[index]``, or if you want a
property inside of another property you can get by doing ``property.subProperty``.
assembly (:obj:`str`): The name of the assembly containing the component.
maxDepth (:obj:`int`, optional): Set how deep to serialize the property. Defaults to ``2``.
Returns:
str: The property value is serialized to a JSON string.
"""
return commands.GetComponentProperty.run(
self._connection,
component_name, property_name, assembly, max_depth, self
)
def set_component_property(self, component_name, property_name, assembly, value):
"""Sets a value for a given component property.
Args:
component_name (:obj:`str`): The name of the component. If the component has a namespace the format should
look like this: ``"namespace.componentName"``.
property_name (:obj:`str`): The name of the property of which value you want to set.
assembly (:obj:`str`): The name of the assembly containing the component.
value (:obj:`str`): The value to be set for the chosen component's property.
Returns:
str: The property value is serialized to a JSON string.
"""
return commands.SetComponentProperty.run(
self._connection,
component_name, property_name, value, assembly, self
)
def call_component_method(self, component_name, method_name, assembly, parameters=None, type_of_parameters=None):
"""Invokes a method from an existing component of the object.
Args:
component_name (:obj:`str`): The name of the script. If the script has a namespace the format should look
like this: ``"namespace.typeName"``.
method_name (:obj:`str`): The name of the public method that we want to call. If the method is inside a
static property/field to be able to call that method, methodName need to be the following format
``"propertyName.MethodName"``.
assembly (:obj:`str`): The name of the assembly containing the script.
parameters (:obj:`list`, :obj:`tuple`, optional): Defaults to ``None``.
type_of_parameters (:obj:`list`, :obj:`tuple`, optional): Defaults to ``None``.
Return:
str: The value returned by the method is serialized to a JSON string.
"""
return commands.CallMethod.run(
self._connection,
component_name,
method_name,
alt_object=self,
parameters=parameters,
type_of_parameters=type_of_parameters,
assembly=assembly
)
def get_text(self):
"""Returns text value from a Button, Text, InputField. This also works with TextMeshPro elements.
Returns:
str: The text value of the AltObject.
"""
return commands.GetText.run(self._connection, self)
def set_text(self, text, submit=False):
"""Sets text value for a Button, Text or InputField. This also works with TextMeshPro elements.
Args:
text (obj:`str`): The text to be set.
submit (obj:`bool`): If set will trigger a submit event.
Returns:
AltObject: The current AltObject.
"""
data = commands.SetText.run(self._connection, text, self, submit)
return AltObject(self._altdriver, data)
def pointer_up(self):
"""Simulates pointer up action on the object.
Returns:
AltObject: The current AltObject.
"""
data = commands.PointerUp.run(self._connection, self)
return AltObject(self._altdriver, data)
def pointer_down(self):
"""Simulates pointer down action on the object.
Returns:
AltObject: The current AltObject.
"""
data = commands.PointerDown.run(self._connection, self)
return AltObject(self._altdriver, data)
def pointer_enter(self):
"""Simulates pointer enter action on the object.
Returns:
AltObject: The current AltObject.
"""
data = commands.PointerEnter.run(self._connection, self)
return AltObject(self._altdriver, data)
def pointer_exit(self):
"""Simulates pointer exit action on the object.
Returns:
AltObject: The current AltObject.
"""
data = commands.PointerExit.run(self._connection, self)
return AltObject(self._altdriver, data)
def tap(self, count=1, interval=0.1, wait=True):
"""Taps the current object.
Args:
count (:obj:`int`, optional): Number of taps. Defaults to ``1``.
interval (:obj:`int`, :obj:`float`, optional): Interval between taps in seconds. Defaults to ``0.1``.
wait (:obj:`int`, optional): Wait for command to finish. Defaults to ``True``.
Returns:
AltObject: The tapped object.
"""
data = commands.TapElement.run(
self._connection,
self, count, interval, wait
)
return AltObject(self._altdriver, data)
def click(self, count=1, interval=0.1, wait=True):
"""Clicks the current object.
Args:
count (:obj:`int`, optional): Number of clicks. Defaults to ``1``.
interval (:obj:`int`, :obj:`float`, optional): Interval between clicks in seconds. Defaults to ``0.1``.
wait (:obj:`int`, optional): Wait for command to finish. Defaults to ``True``.
Returns:
AltObject: The clicked object.
"""
data = commands.ClickElement.run(
self._connection,
self, count, interval, wait
)
return AltObject(self._altdriver, data) | PypiClean |
/dyson-1.1.2.tar.gz/dyson-1.1.2/lib/dyson/modules/core/waits/wait_for.py | from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from dyson import constants
from dyson.constants import to_boolean
from dyson.errors import DysonError
from dyson.utils.module import DysonModule
from dyson.utils.selectors import translate_selector_to_by
class WaitForModule(DysonModule):
VALID_ACTIONS = frozenset([
'visibility_of',
'invisibility_of',
'presence_of',
'title_to_be',
'title_to_contain',
'alert',
'text_to_be_present',
'clickable',
'value_to_be',
'staleness_of',
'presence_of_all',
'element_to_be_selected',
'selection_state_to_be',
'frame_and_switch'
])
def run(self, webdriver, params):
"""
Wait for things
:param webdriver:
:param params:
:return:
"""
if isinstance(params, dict):
if 'visibility_of' in params:
"""
Wait for the visibility of an element
"""
if 'element' in params['visibility_of']:
element = params['visibility_of']['element']
else:
raise DysonError("Key \"element\" is required")
timeout = int(constants.DEFAULT_TIMEOUT) # seconds
if 'timeout' in params['visibility_of']:
timeout = int(params['visibility_of']['timeout'])
return self._wait_for(element, expected_conditions.visibility_of_element_located, timeout, webdriver)
if 'invisibility_of' in params:
"""
Wait for the invisibility of an element
"""
if 'element' in params['invisibility_of']:
element = params['invisibility_of']['element']
else:
raise DysonError("Key \"element\" is required")
timeout = int(constants.DEFAULT_TIMEOUT) # seconds
if 'timeout' in params['invisibility_of']:
timeout = int(params['invisibility_of']['timeout'])
return self._wait_for(element, expected_conditions.invisibility_of_element_located, timeout, webdriver)
if 'presence_of' in params:
"""
Wait for the presence of an element
"""
if 'element' in params['presence_of']:
element = params['presence_of']['element']
else:
raise DysonError("Key \"element\" is required")
timeout = int(constants.DEFAULT_TIMEOUT) # seconds
if 'timeout' in params['presence_of']:
timeout = int(params['presence_of']['timeout'])
return self._wait_for(element, expected_conditions.presence_of_element_located, timeout, webdriver)
if 'title_to_be' in params:
"""
Wait for the title to be something
"""
title = params['title_to_be']
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['title_to_be']:
timeout = int(params['title_to_be']['timeout'])
return WebDriverWait(webdriver, timeout).until(
expected_conditions.title_is(title)
)
if 'title_to_contain' in params:
"""
Wait for the title to contain
"""
title = params['title_to_contain']
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['title_to_contain']:
timeout = int(params['title_to_contain']['timeout'])
return WebDriverWait(webdriver, timeout).until(
expected_conditions.title_contains(title)
)
if 'alert' in params:
"""
Wait for an alert to be present
"""
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['alert']:
timeout = int(params['alert']['timeout'])
return WebDriverWait(webdriver, timeout).until(
expected_conditions.alert_is_present()
)
if 'text_to_be_present' in params:
"""
Wait for text to be present in an element
"""
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['text_to_be_present']:
timeout = int(params['text_to_be_present']['timeout'])
if 'in_element' in params['text_to_be_present']:
in_element = params['text_to_be_present']['in_element']
if 'text' in params['text_to_be_present']:
text = in_element['text']
strategy, selector = translate_selector_to_by(in_element)
return WebDriverWait(webdriver, timeout).until(
expected_conditions.text_to_be_present_in_element(
(strategy, selector), text
)
)
else:
raise DysonError("Key \"text\" is required")
else:
raise DysonError("Key \"in_element\" is required")
if 'clickable' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['clickable']:
timeout = int(params['clickable']['timeout'])
return self._wait_for(params['clickable']['element'],
expected_conditions.element_to_be_clickable, timeout, webdriver)
if 'value_to_be' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['value_to_be']:
timeout = int(params['value_to_be']['timeout'])
if 'in_element' in params['value_to_be']:
in_element = params['value_to_be']['in_element']
if 'value' in params['value_to_be']:
value = in_element['value']
strategy, selector = translate_selector_to_by(in_element)
return WebDriverWait(webdriver, timeout).until(
expected_conditions.text_to_be_present_in_element_value(
(strategy, selector), value
)
)
else:
raise DysonError("Key \"text\" is required")
else:
raise DysonError("Key \"in_element\" is required")
if 'staleness_of' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['staleness_of']:
timeout = int(params['staleness_of']['timeout'])
if 'element' in params['staleness_of']:
element = params['staleness_of']['element']
return self._wait_for(element, expected_conditions.staleness_of, timeout, webdriver)
else:
raise DysonError("Key \"element\" is required")
if 'presence_of_all' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['presence_of_all']:
timeout = int(params['presence_of_all']['timeout'])
if 'elements' in params['presence_of_all']:
elements = params['presence_of_all']
return self._wait_for(elements, expected_conditions.presence_of_all_elements_located, timeout,
webdriver)
else:
raise DysonError("Key \"elements\" is required")
if 'element_to_be_selected' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['element_to_be_selected']:
timeout = int(params['element_to_be_selected']['timeout'])
if 'element' in params['element_to_be_selected']:
element = params['element_to_be_selected']['element']
return self._wait_for(element, expected_conditions.element_located_to_be_selected, timeout,
webdriver)
else:
raise DysonError("Key \"element\" is required")
if 'selection_state_to_be' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['selection_state_to_be']:
timeout = int(params['selection_state_to_be']['timeout'])
if 'in_element' in params['selection_state_to_be']:
in_element = params['selection_state_to_be']['in_element']
if 'state' in params['selection_state_to_be']:
state = to_boolean(params['selection_state_to_be']['state'])
strategy, selector = translate_selector_to_by(in_element)
return WebDriverWait(webdriver, timeout).until(
expected_conditions.element_located_selection_state_to_be(
(strategy, selector), state
)
)
else:
raise DysonError("Key \"state\" is required")
else:
raise DysonError("Key \"in_element\" is required")
if 'frame_and_switch' in params:
timeout = int(constants.DEFAULT_TIMEOUT)
if 'timeout' in params['frame_and_switch']:
timeout = int(params['frame_and_switch']['timeout'])
if 'frame' in params['frame_and_switch']:
frame = params['frame_and_switch']['frame']
return self._wait_for(frame, expected_conditions.frame_to_be_available_and_switch_to_it, timeout,
webdriver)
else:
raise DysonError("Key \"frame\" is required")
else:
self.fail("Invalid type. You must specify a valid action")
def _wait_for(self, element, expected_condition, timeout, webdriver):
"""
Helper method to wait for a specific condition of an element
:param element: the element as it's passed from the test (e.g. "css=something")
:param expected_condition: the ExpectedCondition from Selenium
:return:
"""
strategy, selector = translate_selector_to_by(element)
return WebDriverWait(webdriver, timeout).until(
expected_condition((strategy, selector))
) | PypiClean |
/Azule_Hair_Transplant-0.0.1.tar.gz/Azule_Hair_Transplant-0.0.1/losses/masked_lpips/__init__.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage.metrics import structural_similarity
import torch
from torch.autograd import Variable
from ..masked_lpips import dist_model
class PerceptualLoss(torch.nn.Module):
def __init__(
self,
model="net-lin",
net="alex",
vgg_blocks=[1, 2, 3, 4, 5],
colorspace="rgb",
spatial=False,
use_gpu=True,
gpu_ids=[0],
): # VGG using our perceptually-learned weights (LPIPS metric)
# def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
super(PerceptualLoss, self).__init__()
print("Setting up Perceptual loss...")
self.use_gpu = use_gpu
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model = dist_model.DistModel()
self.model.initialize(
model=model,
net=net,
vgg_blocks=vgg_blocks,
use_gpu=use_gpu,
colorspace=colorspace,
spatial=self.spatial,
gpu_ids=gpu_ids,
)
print("...[%s] initialized" % self.model.name())
print("...Done")
def forward(self, pred, target, mask=None, normalize=False):
"""
Pred and target are Variables.
If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
If normalize is False, assumes the images are already between [-1,+1]
Inputs pred and target are Nx3xHxW
Output pytorch Variable N long
"""
if normalize:
target = 2 * target - 1
pred = 2 * pred - 1
return self.model.forward(target, pred, mask=mask)
def normalize_tensor(in_feat, eps=1e-10):
# takes care of masked tensors implicitly.
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
return in_feat / (norm_factor + eps)
def l2(p0, p1, range=255.0):
return 0.5 * np.mean((p0 / range - p1 / range) ** 2)
def psnr(p0, p1, peak=255.0):
return 10 * np.log10(peak ** 2 / np.mean((1.0 * p0 - 1.0 * p1) ** 2))
def dssim(p0, p1, range=255.0):
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.0
def rgb2lab(in_img, mean_cent=False):
from skimage import color
img_lab = color.rgb2lab(in_img)
if mean_cent:
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
return img_lab
def tensor2np(tensor_obj):
# change dimension of a tensor object into a numpy array
return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0))
def np2tensor(np_obj):
# change dimenion of np array into tensor array
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False):
# image tensor to lab tensor
from skimage import color
img = tensor2im(image_tensor)
img_lab = color.rgb2lab(img)
if mc_only:
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
if to_norm and not mc_only:
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
img_lab = img_lab / 100.0
return np2tensor(img_lab)
def tensorlab2tensor(lab_tensor, return_inbnd=False):
from skimage import color
import warnings
warnings.filterwarnings("ignore")
lab = tensor2np(lab_tensor) * 100.0
lab[:, :, 0] = lab[:, :, 0] + 50
rgb_back = 255.0 * np.clip(color.lab2rgb(lab.astype("float")), 0, 1)
if return_inbnd:
# convert back to lab, see if we match
lab_back = color.rgb2lab(rgb_back.astype("uint8"))
mask = 1.0 * np.isclose(lab_back, lab, atol=2.0)
mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis])
return (im2tensor(rgb_back), mask)
else:
return im2tensor(rgb_back)
def rgb2lab(input):
from skimage import color
return color.rgb2lab(input / 255.0)
def tensor2im(image_tensor, imtype=np.uint8, cent=1.0, factor=255.0 / 2.0):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1.0, factor=255.0 / 2.0):
return torch.Tensor(
(image / factor - cent)[:, :, :, np.newaxis].transpose((3, 2, 0, 1))
)
def tensor2vec(vector_tensor):
return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
def voc_ap(rec, prec, use_07_metric=False):
"""ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def tensor2im(image_tensor, imtype=np.uint8, cent=1.0, factor=255.0 / 2.0):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1.0, factor=255.0 / 2.0):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
return torch.Tensor(
(image / factor - cent)[:, :, :, np.newaxis].transpose((3, 2, 0, 1))
) | PypiClean |
/HearPlanetAPI-0.1.1.tar.gz/HearPlanetAPI-0.1.1/README.rst | About
=====
This is the HearPlanet supported Python driver for HearPlanet’s public
API.
This API supports queries to HearPlanet’s database of Points of
Interest, Articles, Images and Audio files.
If you need additional support, please visit http://www.hearplanet.com/
Overview
========
Setup
-----
The easiest way to get started with the driver is to install it from the
Python Package Index.
::
pip install HearPlanetAPI
First you need to obtain API access credentials from HearPlanet.
Create a configuration file containing your credentials, by copying and
customizing hearplanet.cfg.example to one, or both, of the following:
1. /etc/hearplanet.cfg ### Site-wide
2. ~/.hearplanet.cfg ### Individual
To use the driver in a Python program, just …
::
from hearplanet import HearPlanet
api = HearPlanet()
example.py is provided with the driver as a reference.
Dependencies
------------
Minimum Python version 2.5.
`Requests`_
Basic Design
------------
The driver allows you to access the HearPlanet API, sending requests and
getting data back.
One thing to be aware of is the behavior of the query modifier
functions. These return new query instances, so base queries can be set
up and then modified in different ways to produce new queries.
You specify the table (type of object) you want to search for, the
search terms, and various filters and modifiers. The search is executed
when you access the response object.
Tables
------
Many of the HearPlanet database tables can be accessed. However,
generally if you are only making requests, you will only need to be
accessing the “article” and/or “poi” tables. The general layout looks
something like this:
::
table('poi')
table('article')
fetch({id}, objects={'object'}, size={'image_size_code'})
search()
term('A Query Term')
point({'lat':'37.0', 'lng':'-122.5'})
location('123 Address St., Anytown')
filters({'key':'value'})
featured()
First you would select the table (poi or article). If you already know
the unique identifier of the poi or article, you can use fetch(). If you
would like to get the “featured” articles, then just use featured().
Otherwise, use search() plus one or more of term(), point() and
location(). Finally, you can add filters to further refine your search.
Other tables of interest might be “langugages” and “categories.” For a
complete list, consult the `API documentation`_.
Search Requests
---------------
Searches for POI’s and Articles can be performed based on location or
query term.
Location searches return POI’s and Articles near a point – either a
latitude/longitude or an address (or other geocodable location). If you
give both point() and location(), objects near location will be used,
and distances to that location will be calculated from point. Examples:
::
point({'lat':'37.0', 'lng':'-122.5'})
location('123 Address St., Anytown')
Query Term searches do a full-text search in the title of the POI or
Article.
::
term('Pizza')
In combination:
::
# Search for POI's with "Pizza" in their title located in
# Chicago, calculating distances from the given point.
req = api.table('poi').search()
req = req.term('Pizza').location('Chicago, IL')
req = req.point({'lat':'37.0', 'lng':'-122.5'})
Fetch a particular POI or Article if you have its id:
::
req = api.table('article').fetch(999999)
If you only want some of the objects associated with an article, you can
request them specifically. For example, if an article has email
addresses associated with it:
::
req = api.table('article').fetch(999999, 'emails')
Images take an optional ‘size’ parameter, for example ‘T’ for thumbnail:
::
req = api.table('article').fetch(999999, 'images', 'T')
The full list of article objects is:
- addresses
- audio
- categories
- details
- emails
- images
- phones
- rating\_average
- ratings
- reviews
- sections
- sections\_f
- tags
- websites
Get featured Articles :
::
req = api.table('article').featured()
Search Request Filters
----------------------
Filters can be applied to the searches:
::
req = req.filters({'ch':'hearplanet'})
req = req.filters({'lang':'fr'})
req = req.filters({'bbox':'(37.3,-122.8)(37.6,-120.0)'})
req = req.filters({'radius':15'}) # search radius in kilometers
Request modifiers
-----------------
Request modifiers are used for paging results, selecting the text format
and the amount of data returned.
You can either use limit() and offset() together, or just use page().
The default values for offset and limit are 0 and 10, respectively. If
you use page(), just specify an integer page number from 1 to N. The
default page length is 10.
::
limit(max_rows)
offset(offset)
page(page_num, limit=DEFAULT_LIMIT)
format(format) # ('HTML', 'HTML-RAW', 'XHTML', 'PLAIN', 'AS-IS')
depth(depth) # ('min', 'poi', 'article', 'section',
'section_text', 'all',)
- The format modifiers change the formatting of the section text.
Normally this is set on the backend and you don’t have to worry about
it. However, if necessary you can override it.
- The depth modifiers change the amount of information that is
returned. That’s primarily for performance enhancement, when
accessing the API over a slow network. For example, make a shallow
initial search using the poi.json endpoint at depth ‘poi’ to get a
list of POI’s and their Articles. Then the full Article can be
selected by the user, and a second request made for just that Article
using fetch().
First do a shallow search of POI’s that have “Pizza” in their title
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
poi_list = api.table('poi').search().term('Pizza').depth('poi').page(1).objects()
Get the id of the first Article in the first POI
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
first_poi = poi_list[0]
first_article_id = first_poi.articles[0].id
print first_poi
Now get all the data related to that Article
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
article = api.table('article').fetch(first_article_id).objects()
print article
Examples
--------
Create an API query object
~~~~~~~~~~~~~~~~~~~~~~~~~~
::
api = HearPlanet()
Specify a search of the POI table
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
req = api.table('poi').search()
Add a query term, and search origin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
req = req.term('Golden Gate')
req = req.location('San Francisco, CA')
Add a filter: only return articles in the Wikipedia channel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
req = req.filters({'ch':'wikipedia'})
Ask for only the first page (default is the first 10 objects)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
req = req.page(1)
Get the return value as data or objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
objects = req.objects()
Do something with the objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
for poi in objects:
print poi.title
Or, you can chain the requests all together
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
pois = api.table('poi').search().term('Golden Gate').location('San Francisco, CA').filters({'ch':'wikipedia'}).page(1).objects()
Unit Tests
----------
Unit Tests are provided to ensure the driver is functioning as expected.
The unit tests also serve as examples of various API requests.
You can run the Unit Tests in test\_hearplanet.py like this:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
python test_hearplanet.py
URL Encoding
------------
The Python driver handles URL encoding, therefore all parameters passed
to the driver should be in their un-encoded form.
.. _Requests: http://docs.python-requests.org/en/latest/
.. _API documentation: http://prod.hearplanet.com/api/2.0/documentation/
| PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.8-alpha-1.rst | =================================
Djblets 0.8 Alpha 1 Release Notes
=================================
**Release date**: November 9, 2013
Compatibility
=============
* Djblets now requires Django 1.5. At this time, it is not tested
with Django 1.6, but will soon require that as the minimum.
* Made some progress toward Python 3 support. It is not yet complete.
Static files
============
* All static media bundled with Djblets has been moved into a
:file:`static/` directory.
* Static files are now compatible out-of-the-box with django-pipeline.
:py:mod:`djblets.settings` exposes :py:data:`PIPELINE_CSS` and
:py:data:`PIPELINE_JS` variables that can be pulled into a project and used
directly or merged with their own pipeline settings.
There are four JavaScript bundles:
* ``djblets-datagrid`` - Datagrid support
* ``djblets-extensions-admin`` - Admin UI for extensions
* ``djblets-extensions`` - General JavaScript extension support
* ``djblets-gravy`` - jquery.gravy's set of widgets and utilities
There are also two CSS bundles:
* ``djblets-admin`` - Admin UI styles for siteconfig and extensions.
* ``djblets-datagrid`` - Styles for datagrids
* CSS files are now LesSCSS files. This requires the less.js runtime, or
compilation.
* jQuery and jQuery-UI are no longer shipped with Djblets. The
:file:`jquery.html` and :file:`jquery-ui.html` templates now use the
versions on the Google CDN by default.
jquery.gravy
============
* ``jquery.gravy`` has been split into multiple files. These can be used
individually, but will be combined together if using the provided
:py:data:`PIPELINE_JS`.
* Removed legacy workarounds for older versions of Internet Explorer,
Mobile Safari, and Opera.
* Removed ``$.browser`` additions.
jquery.gravy.inlineEditor
=========================
* Added support for custom textarea-type widgets in an ``inlineEditor``.
Callers can now pass new options to ``inlineEditor`` that define methods
for operating on a textarea, or equivalent. These allow other widgets,
such as CodeMirror, to be placed in an ``inlineEditor``.
The new optional methods are: ``createMultilineField``, ``setFieldValue``,
``getFieldValue``, and ``isFieldDirty``.
* On required fields, the asterisk now has a tooltip.
* Fixed auto-sizing ``inlineEditor`` when forcing an auto-size. It didn't
always size itself correctly.
* Fixed dirty detection when using ``hasRawValue``.
* Fixed setting the width of an editor when the content wraps.
djblets.datagrid
================
* Datagrids now use spritesheets for the icons, reducing the number of
downloads.
* Added Retina icons for the datagrids.
djblets.extensions
==================
* Added support for looking up static media for extensions.
Extensions can now provide a :file:`static/` directory containing all their
static media. They can then look them up in a template by doing::
{% ext_static extension "path/to/file.png" %}
This requires that the project list
``djblets.extensions.staticfiles.ExtensionFinder`` in
:py:data:`settings.STATICFILES_FINDERS`.
* Added support for defining static media bundles.
Extensions can list all the CSS and JavaScript bundles they use by
defining :py:attr:`Extension.css_bundles` and
:py:attr:`Extension.js_bundles` variables. These are Pipeline bundle
definitions, and follow the same format, with the exception that
``output_filename`` is optional.
If the bundle name is "default", it will be included on any page
that uses the ``{% load_extensions_js %}`` and
``{% load_extensions_css %}`` template tags.
* Added support for packaging static media bundles.
Bundles will be automatically packaged along with an extension, if
the project providing extension support provides the proper support,
and the extension makes use of it. See djblets.extensions.packaging.
* Added support for JavaScript extensions.
These function as a counterpart to the Python extensions. They
are subclasses of :js:class:`Djblets.Extension`, and are very similar to
the Python extensions.
Python Extension subclasses can specify the name of their JavaScript
counterpart by setting the :py:attr:`Extension.js_model_class` variable.
These will be instantiated on all pages that use the
``{% init_js_extensions %}`` template tag.
JavaScript hooks can also be written by subclassing
:js:class:`Djblets.ExtensionHook`. See the documentation included in
:file:`static/djblets/js/extensions/models/extensionHookModel.js` for more
information.
This requires Backbone.js.
djblets.testing
===============
* Removed the old Selenium support.
djblets.util.templatetags.djblets_email
=======================================
* The ``{% condense %}`` template tag can now be passed the max number of
sequential blank lines to allow.
djblets.util.templatetags.djblets_js
====================================
* The :py:func:`json_dumps` filter now uses :py:class:`DjangoJSONEncoder`,
which properly handles timestamps and some other types.
djblets.webapi
==============
* Drastically improved performance of queries made through the API.
* :py:meth:`WebAPIResource.get_object` now takes an optional ``id_field``
parameter, which specifies which field to look up as the ID. If not specified,
the default for the resource is used.
* Removed backwards-compatibility support for the old
:py:attr:`allowed_item_mimetypes` and :py:attr:`allowed_list_mimetypes`
fields.
* Specifying a negative index for the ``start`` query parameter on list
resources no longer triggers an HTTP 500. It's now interpreted as 0.
Contributors
============
* Christian Hammond
* David Trowbridge
| PypiClean |
/K_AIKO-0.5.2-py3-none-any.whl/kaiko/menu/songs.py | import os
import random
import shutil
import queue
import traceback
import dataclasses
from typing import Optional
from pathlib import Path
from kaiko.utils import commands as cmd
from kaiko.utils import datanodes as dn
from kaiko.utils import engines
from kaiko.beats import beatsheets
@dataclasses.dataclass
class SongMetadata:
root: str
audio: str
volume: float
info: str
preview: float
@classmethod
def from_beatmap(clz, beatmap):
if beatmap.audio is None:
return None
return clz(root=beatmap.root, audio=beatmap.audio,
volume=beatmap.volume, info=beatmap.info, preview=beatmap.preview)
@property
def path(self):
return os.path.join(self.root, self.audio)
def get_info(self, logger):
res = {}
res["path:"] = Path(self.path).as_uri()
for line in self.info.strip().splitlines():
index = line.find(":")
key, value = (line[:index+1], line[index+1:]) if index != -1 else (line, "")
res[key] = value
return "\n".join(f"{k} {logger.emph(v)}" for k, v in res.items())
class BeatmapManager:
def __init__(self, path, logger):
self.path = path
self.logger = logger
self._beatmaps = {}
self._beatmaps_mtime = None
def is_uptodate(self):
return self._beatmaps_mtime == os.stat(str(self.path)).st_mtime
def reload(self):
logger = self.logger
songs_dir = self.path
logger.print(f"Load songs from {logger.emph(songs_dir.as_uri())}...", prefix="data")
for file in songs_dir.iterdir():
if file.is_file() and file.suffix == ".osz":
distpath = file.parent / file.stem
if distpath.exists():
continue
logger.print(f"Unzip file {logger.emph(file.as_uri())}...", prefix="data")
distpath.mkdir()
zf = zipfile.ZipFile(str(file), 'r')
zf.extractall(path=str(distpath))
file.unlink()
logger.print("Load beatmaps...", prefix="data")
self._beatmaps_mtime = os.stat(str(songs_dir)).st_mtime
self._beatmaps = {}
for song in songs_dir.iterdir():
if song.is_dir():
beatmapset = []
for beatmap in song.iterdir():
if beatmap.suffix in (".kaiko", ".ka", ".osu"):
beatmapset.append(beatmap.relative_to(songs_dir))
if beatmapset:
self._beatmaps[song.relative_to(songs_dir)] = beatmapset
if len(self._beatmaps) == 0:
logger.print("There is no song in the folder yet!", prefix="data")
logger.print(flush=True)
def add(self, beatmap):
logger = self.logger
songs_dir = self.path
if not beatmap.exists():
with logger.warn():
logger.print(f"File not found: {str(beatmap)}")
return
if not beatmap.is_file() and not beatmap.is_dir():
with logger.warn():
logger.print(f"Not a file or directory: {str(beatmap)}")
return
logger.print(f"Add a new song from {logger.emph(beatmap.as_uri())}...", prefix="data")
distpath = songs_dir / beatmap.name
n = 1
while distpath.exists():
n += 1
distpath = songs_dir / f"{beatmap.stem} ({n}){beatmap.suffix}"
if n != 1:
logger.print(f"Name conflict! Rename to {logger.emph(distpath.name)}", prefix="data")
if beatmap.is_file():
shutil.copy(str(beatmap), str(songs_dir))
elif beatmap.is_dir():
shutil.copytree(str(beatmap), str(distpath))
self.reload()
def remove(self, beatmap):
logger = self.logger
songs_dir = self.path
beatmap_path = songs_dir / beatmap
if beatmap_path.is_file():
logger.print(f"Remove the beatmap at {logger.emph(beatmap_path.as_uri())}...", prefix="data")
beatmap_path.unlink()
self.reload()
elif beatmap_path.is_dir():
logger.print(f"Remove the beatmapset at {logger.emph(beatmap_path.as_uri())}...", prefix="data")
shutil.rmtree(str(beatmap_path))
self.reload()
else:
with logger.warn():
logger.print(f"Not a file: {str(beatmap)}")
def is_beatmapset(self, path):
return path in self._beatmaps
def is_beatmap(self, path):
return path.parent in self._beatmaps and path in self._beatmaps[path.parent]
def get_beatmap_metadata(self, path):
if not self.is_beatmap(path):
raise ValueError(f"Not a beatmap: {str(path)}")
filepath = self.path / path
try:
beatmap = beatsheets.BeatSheet.read(str(filepath), metadata_only=True)
except beatsheets.BeatmapParseError:
return None
else:
return beatmap
def get_song(self, path):
if self.is_beatmapset(path):
path = self._beatmaps[path][0]
beatmap = self.get_beatmap_metadata(path)
return beatmap and SongMetadata.from_beatmap(beatmap)
def get_songs(self):
songs = [self.get_song(path) for path in self._beatmaps.keys()]
return [song for song in songs if song is not None]
def make_parser(self, bgm_controller=None):
return BeatmapParser(self, bgm_controller)
class BeatmapParser(cmd.TreeParser):
def __init__(self, beatmap_manager, bgm_controller):
super().__init__(BeatmapParser.make_tree(beatmap_manager._beatmaps))
self.beatmap_manager = beatmap_manager
self.bgm_controller = bgm_controller
@staticmethod
def make_tree(beatmapsets):
tree = {}
for beatmapset_path, beatmapset in beatmapsets.items():
subtree = {}
subtree[""] = Path
for beatmap_path in beatmapset:
subtree[str(beatmap_path.relative_to(beatmapset_path))] = Path
tree[os.path.join(str(beatmapset_path), "")] = subtree
return tree
def info(self, token):
path = Path(token)
song = self.beatmap_manager.get_song(path)
if self.bgm_controller is not None and song is not None:
self.bgm_controller.play(song, song.preview)
if self.beatmap_manager.is_beatmap(path):
beatmap = self.beatmap_manager.get_beatmap_metadata(path)
return beatmap.info.strip() if beatmap is not None else None
class KAIKOBGMController:
def __init__(self, config, logger, beatmap_manager):
self.config = config
self.logger = logger
self._current_bgm = None
self._action_queue = queue.Queue()
self.beatmap_manager = beatmap_manager
@dn.datanode
def load_mixer(self, manager):
try:
mixer_task, mixer = engines.Mixer.create(self.config.current.devices.mixer, manager)
except Exception:
with self.logger.warn():
self.logger.print("Failed to load mixer")
self.logger.print(traceback.format_exc(), end="")
self.mixer = mixer
try:
with mixer_task:
yield from mixer_task.join((yield))
finally:
self.mixer = None
@dn.datanode
def play_song(self, song, start):
with dn.create_task(lambda event: self.mixer.load_sound(song.path, event)) as task:
yield from task.join((yield))
node = dn.DataNode.wrap(task.result)
self._current_bgm = song
try:
with self.mixer.play(node, start=start, volume=song.volume) as song_handler:
yield
while not song_handler.is_finalized():
yield
finally:
self._current_bgm = None
@dn.datanode
def load_bgm(self, manager):
self.mixer = None
self._current_bgm = None
while True:
yield
if self._action_queue.empty():
continue
while not self._action_queue.empty():
song, start = self._action_queue.get()
if song is None:
continue
with self.load_mixer(manager) as mixer_task:
while song is not None:
with self.play_song(song, start) as song_task:
while True:
try:
mixer_task.send(None)
except StopIteration:
song, start = None, None
break
try:
song_task.send(None)
except StopIteration:
song, start = self.random_song(), None
break
if not self._action_queue.empty():
while not self._action_queue.empty():
song, start = self._action_queue.get()
break
yield
def random_song(self):
songs = self.beatmap_manager.get_songs()
if self._current_bgm is not None:
songs.remove(self._current_bgm)
return random.choice(songs) if songs else None
def stop(self):
self._action_queue.put((None, None))
def play(self, song, start=None):
self._action_queue.put((song, start))
class BGMCommand:
def __init__(self, bgm_controller, beatmap_manager, logger):
self.bgm_controller = bgm_controller
self.beatmap_manager = beatmap_manager
self.logger = logger
@cmd.function_command
def on(self):
logger = self.logger
if self.bgm_controller._current_bgm is not None:
logger.print("now playing:")
logger.print(self.bgm_controller._current_bgm.get_info(logger))
return
song = self.bgm_controller.random_song()
if song is None:
logger.print("There is no song in the folder yet!", prefix="data")
return
logger.print("will play:")
logger.print(song.get_info(logger))
self.bgm_controller.play(song)
@cmd.function_command
def off(self):
self.bgm_controller.stop()
@cmd.function_command
def skip(self):
if self.bgm_controller._current_bgm is not None:
song = self.bgm_controller.random_song()
self.logger.print("will play:")
self.logger.print(song.get_info(self.logger))
self.bgm_controller.play(song)
@cmd.function_command
def play(self, beatmap, start:Optional[float]=None):
logger = self.logger
try:
song = self.beatmap_manager.get_song(beatmap)
except beatsheets.BeatmapParseError:
with logger.warn():
logger.print("Fail to read beatmap")
return
if song is None:
with logger.warn():
logger.print("This beatmap has no song")
return
logger.print("will play:")
logger.print(song.get_info(logger))
self.bgm_controller.play(song, start)
@play.arg_parser("beatmap")
def _play_beatmap_parser(self):
return self.beatmap_manager.make_parser()
@cmd.function_command
def now_playing(self):
current = self.bgm_controller._current_bgm
if current is None:
self.logger.print("no song")
else:
self.logger.print("now playing:")
self.logger.print(current.get_info(self.logger)) | PypiClean |
/OASYS1_HALF_SRW-0.0.3-py3-none-any.whl/orangecontrib/srw/widgets/optical_elements/ow_srw_crl.py | import os
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.util.oasys_util import TriggerOut
import oasys.util.oasys_util as OU
from oasys.util.oasys_objects import OasysThicknessErrorsData
from syned.widget.widget_decorator import WidgetDecorator
from wofrysrw.beamline.optical_elements.other.srw_crl import CRLShape, PlaneOfFocusing, SRWCRL
from orangecontrib.srw.util.srw_objects import SRWData
from orangecontrib.srw.widgets.gui.ow_srw_optical_element import OWSRWOpticalElement
from orangecontrib.srw.util.srw_util import get_absorption_parameters
class OWSRWCRL(OWSRWOpticalElement):
name = "CRL"
description = "SRW: CRL"
icon = "icons/crl.png"
priority = 16
plane_of_focusing = Setting(2)
material_data = Setting(0)
material = Setting("Be")
refractive_index = Setting(1e-6)
attenuation_length = Setting(1e-3)
shape = Setting(0)
diameter = Setting(400)
radius_of_curvature = Setting(50)
number_of_lenses = Setting(10)
wall_thickness = Setting(30)
horizontal_center_coordinate = Setting(0.0)
vertical_center_coordinate = Setting(0.0)
void_center_coordinates = Setting("")
horizontal_points = Setting(1001)
vertical_points = Setting(1001)
has_thickness_error = Setting(0)
crl_error_profiles = Setting([])
crl_scaling_factor = Setting(1.0)
inputs = [("SRWData", SRWData, "set_input"),
("Thickness Errors Data", OasysThicknessErrorsData, "setThicknessErrorProfiles"),
("Trigger", TriggerOut, "propagate_new_wavefront"),
WidgetDecorator.syned_input_data()[0]]
def __init__(self):
super().__init__(has_orientation_angles=False)
def draw_specific_box(self):
tabs_crl = gui.tabWidget(self.tab_bas)
tab_bas = oasysgui.createTabPage(tabs_crl, "CRL")
self.filter_box = oasysgui.widgetBox(tab_bas, "CRL Setting", addSpace=False, orientation="vertical")
gui.comboBox(self.filter_box, self, "plane_of_focusing", label="Plane of Focusing",
labelWidth=220, items=PlaneOfFocusing.items(), sendSelectedValue=False, orientation="horizontal")
gui.comboBox(self.filter_box, self, "material_data", label="Material Properties from", labelWidth=180,
items=["Chemical Formula", "Absorption Parameters"],
callback=self.set_MaterialData,
sendSelectedValue=False, orientation="horizontal")
self.filter_box_1 = oasysgui.widgetBox(self.filter_box, "", addSpace=False, orientation="vertical", height=30, width=self.CONTROL_AREA_WIDTH-40)
self.filter_box_2 = oasysgui.widgetBox(self.filter_box, "", addSpace=False, orientation="horizontal", height=30, width=self.CONTROL_AREA_WIDTH-40)
oasysgui.lineEdit(self.filter_box_1, self, "material", "Chemical Formula", labelWidth=260, valueType=str, orientation="horizontal")
oasysgui.lineEdit(self.filter_box_2, self, "refractive_index", "Refr. Index (\u03b4)", valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.filter_box_2, self, "attenuation_length", "Att. Lenght [m]", valueType=float, orientation="horizontal")
self.set_MaterialData()
gui.comboBox(self.filter_box, self, "shape", label="Shape",
labelWidth=220, items=CRLShape.items(), sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.filter_box, self, "diameter", "Diameter [\u03bcm]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.filter_box, self, "radius_of_curvature", "Radius of Curvature [\u03bcm]", labelWidth=260, valueType=float, orientation="horizontal")
box = oasysgui.widgetBox(self.filter_box, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(box, self, "horizontal_points", "H Points", labelWidth=130, valueType=int, orientation="horizontal")
oasysgui.lineEdit(box, self, "vertical_points", "V Points", labelWidth=90, valueType=int, orientation="horizontal")
oasysgui.lineEdit(self.filter_box, self, "number_of_lenses", "Number of Lenses", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(self.filter_box, self, "wall_thickness", "Wall Thickness [\u03bcm]", labelWidth=260, valueType=float, orientation="horizontal")
box = oasysgui.widgetBox(self.filter_box, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(box, self, "horizontal_center_coordinate", "Center Coord. H [m]", labelWidth=130, valueType=float, orientation="horizontal")
oasysgui.lineEdit(box, self, "vertical_center_coordinate", "V [m]", labelWidth=90, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.filter_box, self, "void_center_coordinates", "Void center coordinates [m] (x1, y1, r1, x2, y2, r2, ...)", labelWidth=350, valueType=str, orientation="vertical")
gui.separator(self.filter_box)
tab_thick = oasysgui.createTabPage(tabs_crl, "Thickness Error")
gui.comboBox(tab_thick, self, "has_thickness_error", label="Use Thickness Error Profile",
items=["No", "Yes"], labelWidth=300,
sendSelectedValue=False, orientation="horizontal", callback=self.set_ThicknessError)
gui.separator(tab_thick)
self.thickness_error_box_1 = oasysgui.widgetBox(tab_thick, "Thickness Error Files", addSpace=False, orientation="vertical", height=340, width=self.CONTROL_AREA_WIDTH - 30)
self.thickness_error_box_2 = oasysgui.widgetBox(tab_thick, "", addSpace=False, orientation="vertical", height=340, width=self.CONTROL_AREA_WIDTH - 30)
self.files_area = oasysgui.textArea(height=265)
self.thickness_error_box_1.layout().addWidget(self.files_area)
self.refresh_files_text_area()
oasysgui.lineEdit(self.thickness_error_box_1, self, "crl_scaling_factor", "Thickness Error Scaling Factor", labelWidth=260, valueType=float, orientation="horizontal")
self.set_ThicknessError()
def refresh_files_text_area(self):
text = ""
for file in self.crl_error_profiles: text += file + "\n"
self.files_area.setText(text)
def setThicknessErrorProfiles(self, thickness_errors_data):
try:
thickness_error_profile_data_files = thickness_errors_data.thickness_error_profile_data_files
if not thickness_error_profile_data_files is None:
self.crl_error_profiles = thickness_error_profile_data_files
self.refresh_files_text_area()
self.has_thickness_error = 1
self.set_ThicknessError()
except Exception as exception:
QMessageBox.critical(self, "Error", exception.args[0], QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def set_ThicknessError(self):
self.thickness_error_box_2.setVisible(self.has_thickness_error == 0)
self.thickness_error_box_1.setVisible(self.has_thickness_error == 1)
def set_MaterialData(self):
self.filter_box_1.setVisible(self.material_data==0)
self.filter_box_2.setVisible(self.material_data==1)
def get_optical_element(self):
wavefront = self.input_srw_data.get_srw_wavefront()
energy = wavefront.get_photon_energy()
if self.material_data == 0:
attenuation_length, delta = get_absorption_parameters(self.material, energy)
print("Refractive Index (\u03b4) :" + str(delta) + "\n" + \
"Attenuation Length [m] :" + str(attenuation_length))
else:
delta = self.refractive_index
attenuation_length = self.attenuation_length
return SRWCRL(name=self.oe_name,
plane_of_focusing=self.plane_of_focusing+1,
delta=delta,
attenuation_length=attenuation_length,
shape=self.shape+1,
horizontal_aperture_size=self.diameter*1e-6,
vertical_aperture_size=self.diameter*1e-6,
radius_of_curvature=self.radius_of_curvature*1e-6,
number_of_lenses=self.number_of_lenses,
wall_thickness=self.wall_thickness*1e-6,
horizontal_center_coordinate=self.horizontal_center_coordinate,
vertical_center_coordinate=self.vertical_center_coordinate,
void_center_coordinates=self.parse_void_center_coordinates(),
initial_photon_energy=energy,
final_photon_energy=energy,
horizontal_points=self.horizontal_points,
vertical_points=self.vertical_points,
thickness_error_profile_files=None if self.has_thickness_error==0 else self.crl_error_profiles,
scaling_factor=self.crl_scaling_factor)
def parse_void_center_coordinates(self):
if self.void_center_coordinates.strip() == "":
return None
else:
void_center_coordinates = []
tokens = self.void_center_coordinates.strip().split(",")
for token in tokens:
void_center_coordinates.append(float(token))
return void_center_coordinates
def check_data(self):
super().check_data()
if self.material_data==0:
self.material = congruence.checkEmptyString(self.material, "Chemical Formula")
else:
congruence.checkStrictlyPositiveNumber(self.refractive_index, "Refractive Index")
congruence.checkStrictlyPositiveNumber(self.attenuation_length, "Attenuation Length")
congruence.checkStrictlyPositiveNumber(self.diameter, "Diameter")
congruence.checkStrictlyPositiveNumber(self.radius_of_curvature, "Radius of Curvature")
congruence.checkStrictlyPositiveNumber(self.number_of_lenses, "Number of Lenses")
congruence.checkStrictlyPositiveNumber(self.wall_thickness, "Wall Thickness")
congruence.checkStrictlyPositiveNumber(self.horizontal_points, "Horizontal Points")
congruence.checkStrictlyPositiveNumber(self.vertical_points, "Vertical Points")
def receive_specific_syned_data(self, optical_element):
raise NotImplementedError("This element is not supported by Syned") | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/mk.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['mk']={"editor":"Rich Text Editor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Притисни ALT 0 за помош","browseServer":"Пребарај низ серверот","url":"URL","protocol":"Протокол","upload":"Прикачи","uploadSubmit":"Прикачи на сервер","image":"Слика","flash":"Flash","form":"Form","checkbox":"Checkbox","radio":"Radio Button","textField":"Поле за текст","textarea":"Големо поле за текст","hiddenField":"Скриено поле","button":"Button","select":"Selection Field","imageButton":"Копче-слика","notSet":"<not set>","id":"Id","name":"Name","langDir":"Насока на јазик","langDirLtr":"Лево кон десно","langDirRtl":"Десно кон лево","langCode":"Код на јазик","longDescr":"Long Description URL","cssClass":"Stylesheet Classes","advisoryTitle":"Advisory Title","cssStyle":"Стил","ok":"OK","cancel":"Cancel","close":"Close","preview":"Preview","resize":"Resize","generalTab":"Општо","advancedTab":"Advanced","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"You have changed some options. Are you sure you want to close the dialog window?","options":"Опции","target":"Target","targetNew":"Нов прозорец (_blank)","targetTop":"Најгорниот прозорец (_top)","targetSelf":"Истиот прозорец (_self)","targetParent":"Прозорец-родител (_parent)","langDirLTR":"Лево кон десно","langDirRTL":"Десно кон лево","styles":"Стил","cssClasses":"Stylesheet Classes","width":"Широчина","height":"Височина","align":"Alignment","left":"Лево","right":"Десно","center":"Во средина","justify":"Justify","alignLeft":"Align Left","alignRight":"Align Right","alignCenter":"Align Center","alignTop":"Горе","alignMiddle":"Средина","alignBottom":"Доле","alignNone":"Никое","invalidValue":"Невалидна вредност","invalidHeight":"Височината мора да биде број.","invalidWidth":"Широчината мора да биде број.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"},"about":{"copy":"Авторски права © $1. Сите права се задржани.","dlgTitle":"За CKEditor 4","moreInfo":"За информации околу лиценцата, ве молиме посетете го нашиот веб-сајт: "},"basicstyles":{"bold":"Здебелено","italic":"Накривено","strike":"Прецртано","subscript":"Долен индекс","superscript":"Горен индекс","underline":"Подвлечено"},"bidi":{"ltr":"Насока на текст: од лево кон десно","rtl":"Насока на текст: од десно кон лево"},"blockquote":{"toolbar":"Одвоен цитат"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"clipboard":{"copy":"Копирај (Copy)","copyError":"Опциите за безбедност на вашиот прелистувач не дозволуваат уредувачот автоматски да изврши копирање. Ве молиме употребете ја тастатурата. (Ctrl/Cmd+C)","cut":"Исечи (Cut)","cutError":"Опциите за безбедност на вашиот прелистувач не дозволуваат уредувачот автоматски да изврши сечење. Ве молиме употребете ја тастатурата. (Ctrl/Cmd+C)","paste":"Залепи (Paste)","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Простор за залепување","pasteMsg":"Paste your content inside the area below and press OK."},"colorbutton":{"auto":"Automatic","bgColorTitle":"Background Color","colors":{"000":"Black","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Brown","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Gray","F00":"Red","FF8C00":"Dark Orange","FFD700":"Gold","008000":"Green","0FF":"Cyan","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White","1ABC9C":"Strong Cyan","2ECC71":"Emerald","3498DB":"Bright Blue","9B59B6":"Amethyst","4E5F70":"Grayish Blue","F1C40F":"Vivid Yellow","16A085":"Dark Cyan","27AE60":"Dark Emerald","2980B9":"Strong Blue","8E44AD":"Dark Violet","2C3E50":"Desaturated Blue","F39C12":"Orange","E67E22":"Carrot","E74C3C":"Pale Red","ECF0F1":"Bright Silver","95A5A6":"Light Grayish Cyan","DDD":"Light Gray","D35400":"Pumpkin","C0392B":"Strong Red","BDC3C7":"Silver","7F8C8D":"Grayish Cyan","999":"Dark Gray"},"more":"More Colors...","panelTitle":"Colors","textColorTitle":"Text Color"},"colordialog":{"clear":"Clear","highlight":"Highlight","options":"Color Options","selected":"Selected Color","title":"Select color"},"templates":{"button":"Templates","emptyListMsg":"(No templates defined)","insertOption":"Replace actual contents","options":"Template Options","selectPromptMsg":"Please select the template to open in the editor","title":"Content Templates"},"contextmenu":{"options":"Контекст-мени опции"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Advisory Title","cssClassInputLabel":"Stylesheet Classes","edit":"Edit Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Лево кон десно","langDirLabel":"Насока на јазик","langDirRTLLabel":"Десно кон лево","languageCodeInputLabel":" Language Code","remove":"Remove Div","styleSelectLabel":"Стил","title":"Create Div Container","toolbar":"Create Div Container"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"find":{"find":"Пронајди","findOptions":"Опции за пронаоѓање","findWhat":"Што барате:","matchCase":"Се совпаѓа голема/мала буква,","matchCyclic":"Пребарај циклично","matchWord":"Се совпаѓа цел збор","notFoundMsg":"Внесениот текст не беше пронајден.","replace":"Замени","replaceAll":"Замени ги сите","replaceSuccessMsg":"%1 случај/и беа заменети.","replaceWith":"Замени со:","title":"Пронајди и замени"},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Скриено поле","iframe":"IFrame","unknown":"Unknown Object"},"flash":{"access":"Script Access","accessAlways":"Always","accessNever":"Never","accessSameDomain":"Same domain","alignAbsBottom":"Abs Bottom","alignAbsMiddle":"Abs Middle","alignBaseline":"Baseline","alignTextTop":"Text Top","bgcolor":"Background color","chkFull":"Allow Fullscreen","chkLoop":"Loop","chkMenu":"Enable Flash Menu","chkPlay":"Auto Play","flashvars":"Variables for Flash","hSpace":"Хоризонтален простор","properties":"Flash Properties","propertiesTab":"Properties","quality":"Quality","qualityAutoHigh":"Auto High","qualityAutoLow":"Auto Low","qualityBest":"Best","qualityHigh":"High","qualityLow":"Low","qualityMedium":"Medium","scale":"Scale","scaleAll":"Show all","scaleFit":"Exact Fit","scaleNoBorder":"No Border","title":"Flash Properties","vSpace":"Вертикален простор","validateHSpace":"HSpace must be a number.","validateSrc":"URL must not be empty.","validateVSpace":"VSpace must be a number.","windowMode":"Window mode","windowModeOpaque":"Opaque","windowModeTransparent":"Transparent","windowModeWindow":"Window"},"font":{"fontSize":{"label":"Size","voiceLabel":"Font Size","panelTitle":"Font Size"},"label":"Font","panelTitle":"Font Name","voiceLabel":"Font"},"forms":{"button":{"title":"Button Properties","text":"Text (Value)","type":"Type","typeBtn":"Button","typeSbm":"Submit","typeRst":"Reset"},"checkboxAndRadio":{"checkboxTitle":"Checkbox Properties","radioTitle":"Radio Button Properties","value":"Value","selected":"Selected","required":"Required"},"form":{"title":"Form Properties","menu":"Form Properties","action":"Action","method":"Method","encoding":"Encoding"},"hidden":{"title":"Hidden Field Properties","name":"Name","value":"Value"},"select":{"title":"Selection Field Properties","selectInfo":"Select Info","opAvail":"Available Options","value":"Value","size":"Size","lines":"lines","chkMulti":"Allow multiple selections","required":"Required","opText":"Text","opValue":"Value","btnAdd":"Add","btnModify":"Modify","btnUp":"Up","btnDown":"Down","btnSetValue":"Set as selected value","btnDelete":"Delete"},"textarea":{"title":"Textarea Properties","cols":"Columns","rows":"Rows"},"textfield":{"title":"Text Field Properties","name":"Name","value":"Value","charWidth":"Character Width","maxChars":"Maximum Characters","required":"Required","type":"Type","typeText":"Text","typePass":"Password","typeEmail":"Email","typeSearch":"Search","typeTel":"Telephone Number","typeUrl":"URL"}},"format":{"label":"Format","panelTitle":"Paragraph Format","tag_address":"Address","tag_div":"Normal (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Normal","tag_pre":"Formatted"},"horizontalrule":{"toolbar":"Insert Horizontal Line"},"iframe":{"border":"Show frame border","noUrl":"Please type the iframe URL","scrolling":"Enable scrollbars","title":"IFrame Properties","toolbar":"IFrame"},"image":{"alt":"Алтернативен текст","border":"Раб","btnUpload":"Прикачи на сервер","button2Img":"Дали сакате да направите сликата-копче да биде само слика?","hSpace":"Хоризонтален простор","img2Button":"Дали сакате да ја претворите сликата во слика-копче?","infoTab":"Информации за сликата","linkTab":"Врска","lockRatio":"Зачувај пропорција","menu":"Својства на сликата","resetSize":"Ресетирај големина","title":"Својства на сликата","titleButton":"Својства на копче-сликата","upload":"Прикачи","urlMissing":"Недостасува URL-то на сликата.","vSpace":"Вертикален простор","validateBorder":"Работ мора да биде цел број.","validateHSpace":"Хор. простор мора да биде цел број.","validateVSpace":"Верт. простор мора да биде цел број."},"indent":{"indent":"Increase Indent","outdent":"Decrease Indent"},"smiley":{"options":"Smiley Options","title":"Insert a Smiley","toolbar":"Smiley"},"language":{"button":"Set language","remove":"Remove language"},"link":{"acccessKey":"Access Key","advanced":"Advanced","advisoryContentType":"Advisory Content Type","advisoryTitle":"Advisory Title","anchor":{"toolbar":"Anchor","menu":"Edit Anchor","title":"Anchor Properties","name":"Anchor Name","errorName":"Please type the anchor name","remove":"Remove Anchor"},"anchorId":"By Element Id","anchorName":"By Anchor Name","charset":"Linked Resource Charset","cssClasses":"Stylesheet Classes","download":"Force Download","displayText":"Display Text","emailAddress":"E-Mail Address","emailBody":"Message Body","emailSubject":"Message Subject","id":"Id","info":"Link Info","langCode":"Код на јазик","langDir":"Насока на јазик","langDirLTR":"Лево кон десно","langDirRTL":"Десно кон лево","menu":"Edit Link","name":"Name","noAnchors":"(No anchors available in the document)","noEmail":"Please type the e-mail address","noUrl":"Please type the link URL","noTel":"Please type the phone number","other":"<other>","phoneNumber":"Phone number","popupDependent":"Dependent (Netscape)","popupFeatures":"Popup Window Features","popupFullScreen":"Full Screen (IE)","popupLeft":"Left Position","popupLocationBar":"Location Bar","popupMenuBar":"Menu Bar","popupResizable":"Resizable","popupScrollBars":"Scroll Bars","popupStatusBar":"Status Bar","popupToolbar":"Toolbar","popupTop":"Top Position","rel":"Relationship","selectAnchor":"Select an Anchor","styles":"Стил","tabIndex":"Tab Index","target":"Target","targetFrame":"<frame>","targetFrameName":"Target Frame Name","targetPopup":"<popup window>","targetPopupName":"Popup Window Name","title":"Врска","toAnchor":"Link to anchor in the text","toEmail":"E-mail","toUrl":"URL","toPhone":"Phone","toolbar":"Врска","type":"Link Type","unlink":"Unlink","upload":"Прикачи"},"list":{"bulletedlist":"Insert/Remove Bulleted List","numberedlist":"Insert/Remove Numbered List"},"liststyle":{"bulletedTitle":"Bulleted List Properties","circle":"Circle","decimal":"Decimal (1, 2, 3, etc.)","disc":"Disc","lowerAlpha":"Lower Alpha (a, b, c, d, e, etc.)","lowerRoman":"Lower Roman (i, ii, iii, iv, v, etc.)","none":"None","notset":"<not set>","numberedTitle":"Numbered List Properties","square":"Square","start":"Start","type":"Type","upperAlpha":"Upper Alpha (A, B, C, D, E, etc.)","upperRoman":"Upper Roman (I, II, III, IV, V, etc.)","validateStartNumber":"List start number must be a whole number."},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"newpage":{"toolbar":"New Page"},"pagebreak":{"alt":"Page Break","toolbar":"Insert Page Break for Printing"},"pastetext":{"button":"Paste as plain text","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Paste as Plain Text"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"Paste from Word","toolbar":"Paste from Word"},"preview":{"preview":"Preview"},"print":{"toolbar":"Print"},"removeformat":{"toolbar":"Remove Format"},"save":{"toolbar":"Save"},"selectall":{"toolbar":"Select All"},"showblocks":{"toolbar":"Show Blocks"},"sourcearea":{"toolbar":"Source"},"specialchar":{"options":"Special Character Options","title":"Select Special Character","toolbar":"Insert Special Character"},"scayt":{"btn_about":"About SCAYT","btn_dictionaries":"Dictionaries","btn_disable":"Disable SCAYT","btn_enable":"Enable SCAYT","btn_langs":"Languages","btn_options":"Options","text_title":"Spell Check As You Type"},"stylescombo":{"label":"Styles","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"Border size","caption":"Caption","cell":{"menu":"Cell","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"Delete Cells","merge":"Merge Cells","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"Cell padding","cellSpace":"Cell spacing","column":{"menu":"Column","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"Delete Columns"},"columns":"Columns","deleteTable":"Delete Table","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","heightUnit":"height unit","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a positive number.","invalidCellSpacing":"Cell spacing must be a positive number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"Table Properties","row":{"menu":"Row","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"Delete Rows"},"rows":"Rows","summary":"Summary","title":"Table Properties","toolbar":"Table","widthPc":"percent","widthPx":"pixels","widthUnit":"width unit"},"undo":{"redo":"Redo","undo":"Undo"},"widget":{"move":"Click and drag to move","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the user.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."},"wsc":{"btnIgnore":"Ignore","btnIgnoreAll":"Ignore All","btnReplace":"Replace","btnReplaceAll":"Replace All","btnUndo":"Undo","changeTo":"Change to","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Spell checker not installed. Do you want to download it now?","manyChanges":"Spell check complete: %1 words changed","noChanges":"Spell check complete: No words changed","noMispell":"Spell check complete: No misspellings found","noSuggestions":"- No suggestions -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Not in dictionary","oneChange":"Spell check complete: One word changed","progress":"Spell check in progress...","title":"Spell Checker","toolbar":"Check Spelling"}}; | PypiClean |
/Firenado-0.9.0a2.tar.gz/Firenado-0.9.0a2/firenado/conf/__init__.py |
from cartola.config import load_yaml_file
import firenado.config as _config
import logging
import os
import sys
import tempfile
# Setting root path
ROOT = None
if os.path.abspath(__file__).endswith(".py") or \
os.path.abspath(__file__).endswith(".pyc"):
ROOT = os.path.dirname(os.path.abspath(__file__))
else:
ROOT = os.path.abspath(__file__)
ROOT = os.path.abspath(os.path.join(ROOT, ".."))
# Getting configuration paths and files from the environment
FIRENADO_CONFIG_FILE = None
try:
FIRENADO_CONFIG_FILE = os.environ['FIRENADO_CONFIG_FILE']
except KeyError:
FIRENADO_CONFIG_FILE = "firenado"
stack = []
LIB_CONFIG_FILE = os.path.join(ROOT, "conf", FIRENADO_CONFIG_FILE)
# Application file
APP_ROOT_PATH = os.path.join(os.getcwd())
# If FIRENADO_CURRENT_APP_CONFIG_PATH is not set than return current directory
# conf dir
APP_CONFIG_PATH = os.getenv("FIRENADO_CURRENT_APP_CONFIG_PATH",
os.path.join(APP_ROOT_PATH, "conf"))
#print(APP_CONFIG_PATH)
APP_CONFIG_FILE = os.path.join(APP_CONFIG_PATH, FIRENADO_CONFIG_FILE)
# If FIRENADO_SYS_CONFIG_PATH is not set than set default sys config path
SYS_CONFIG_PATH = os.getenv("FIRENADO_SYS_CONFIG_PATH",
os.path.join(os.sep, "etc", "firenado"))
SYS_CONFIG_FILE = os.path.join(SYS_CONFIG_PATH, FIRENADO_CONFIG_FILE)
HAS_LIB_CONFIG_FILE = False
HAS_SYS_CONFIG_FILE = False
HAS_APP_CONFIG_FILE = False
config_file_extensions = ["yml", "yaml"]
for extension in config_file_extensions:
if not HAS_LIB_CONFIG_FILE:
if os.path.isfile("%s.%s" % (LIB_CONFIG_FILE, extension)):
HAS_LIB_CONFIG_FILE = True
LIB_CONFIG_FILE = "%s.%s" % (LIB_CONFIG_FILE, extension)
stack.append(LIB_CONFIG_FILE)
if not HAS_SYS_CONFIG_FILE:
if os.path.isfile("%s.%s" % (SYS_CONFIG_FILE, extension)):
HAS_SYS_CONFIG_FILE = True
SYS_CONFIG_FILE = "%s.%s" % (SYS_CONFIG_FILE, extension)
stack.append(SYS_CONFIG_FILE)
if not HAS_APP_CONFIG_FILE:
if os.path.isfile("%s.%s" % (APP_CONFIG_FILE, extension)):
HAS_APP_CONFIG_FILE = True
APP_CONFIG_FILE = "%s.%s" % (APP_CONFIG_FILE, extension)
stack.append(APP_CONFIG_FILE)
# Tmp path variable
# TODO: Should I care about windows?
TMP_SYS_PATH = tempfile.gettempdir()
TMP_APP_PATH = TMP_SYS_PATH
# Setting firenado's default variables
apps = {}
# Application section
app = _config.get_app_defaults()
is_multi_app = False
current_app_name = os.environ.get("CURRENT_APP", None)
# Component section
components = {}
# Data section
data = {}
data['connectors'] = {}
data['sources'] = {}
# Logging default configuration
log = {}
log['format'] = None
log['level'] = logging.NOTSET
# Management section
management = {}
management['commands'] = []
# Session section
session = {}
# Default session hiccup time is 10 seconds
# This is the time the callback will hiccup if purge_limit is reached
session['callback_hiccup'] = 10
# Default session callback time is 2 minutes
# This is the time the application will scan for expired sessions
session['callback_time'] = 120
session['enabled'] = False
session['encoder'] = "pickle"
session['encoders'] = {}
session['file'] = {}
session['file']['path'] = ""
session['handlers'] = {}
session['id_generators'] = {}
# Default session life time is 30 minutes or 1800 seconds
# If set to 0 the session will not expire
session['life_time'] = 1800
session['name'] = "FIRENADOSESSID"
session['prefix'] = "firenado:session"
session['purge_limit'] = 500
session['redis'] = {}
session['redis']['data'] = {}
session['redis']['data']['source'] = ""
session['type'] = ""
taskio_conf = {}
if HAS_LIB_CONFIG_FILE:
lib_config = load_yaml_file(LIB_CONFIG_FILE)
_config.process_config(sys.modules[__name__], lib_config)
taskio_conf = load_yaml_file(LIB_CONFIG_FILE)
if HAS_SYS_CONFIG_FILE:
sys_config = load_yaml_file(SYS_CONFIG_FILE)
_config.process_config(sys.modules[__name__], sys_config)
if HAS_APP_CONFIG_FILE:
app_config = load_yaml_file(APP_CONFIG_FILE)
_config.process_app_config(sys.modules[__name__], app_config) | PypiClean |
/HydPy-5.0.1-cp38-cp38-win_amd64.whl/hydpy/models/wland/wland_model.py | # import...
# ...from HydPy
from hydpy.core import modeltools
from hydpy.auxs import quadtools
from hydpy.auxs import roottools
from hydpy.cythons import modelutils
from hydpy.cythons.autogen import smoothutils
from hydpy.models.wland import wland_control
from hydpy.models.wland import wland_derived
from hydpy.models.wland import wland_fixed
from hydpy.models.wland import wland_solver
from hydpy.models.wland import wland_inputs
from hydpy.models.wland import wland_fluxes
from hydpy.models.wland import wland_states
from hydpy.models.wland import wland_aides
from hydpy.models.wland import wland_outlets
from hydpy.models.wland.wland_constants import SEALED
class Calc_FXS_V1(modeltools.Method):
r"""Query the current surface water supply/extraction.
Basic equation:
.. math::
FXS_{fluxes} = \begin{cases}
0 &|\ FXS_{inputs} = 0
\\
\frac{FXS_{inputs}}{ASR} &|\ FXS_{inputs} \neq 0 \land ASR > 0
\\
inf &|\ FXS_{inputs} \neq 0 \land ASR = 0
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.asr(0.5)
>>> inputs.fxs = 2.0
>>> model.calc_fxs_v1()
>>> fluxes.fxs
fxs(4.0)
>>> derived.asr(0.0)
>>> model.calc_fxs_v1()
>>> fluxes.fxs
fxs(inf)
>>> inputs.fxs = 0.0
>>> model.calc_fxs_v1()
>>> fluxes.fxs
fxs(0.0)
"""
DERIVEDPARAMETERS = (wland_derived.ASR,)
REQUIREDSEQUENCES = (wland_inputs.FXS,)
RESULTSEQUENCES = (wland_fluxes.FXS,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
if inp.fxs == 0.0:
flu.fxs = 0.0
elif der.asr > 0.0:
flu.fxs = inp.fxs / der.asr
else:
flu.fxs = modelutils.inf
class Calc_FXG_V1(modeltools.Method):
r"""Query the current seepage/extraction.
Basic equation:
.. math::
FXG_{fluxes} = \begin{cases}
0 &|\ FXG_{inputs} = 0
\\
\frac{FXG_{inputs}}{AGR} &|\ FXG_{inputs} \neq 0 \land AGR > 0
\\
inf &|\ FXG_{inputs} \neq 0 \land AGR = 0
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.alr(0.5)
>>> derived.agr(0.8)
>>> inputs.fxg = 2.0
>>> model.calc_fxg_v1()
>>> fluxes.fxg
fxg(5.0)
>>> derived.agr(0.0)
>>> model.calc_fxg_v1()
>>> fluxes.fxg
fxg(inf)
>>> inputs.fxg = 0.0
>>> model.calc_fxg_v1()
>>> fluxes.fxg
fxg(0.0)
"""
DERIVEDPARAMETERS = (
wland_derived.ALR,
wland_derived.AGR,
)
REQUIREDSEQUENCES = (wland_inputs.FXG,)
RESULTSEQUENCES = (wland_fluxes.FXG,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
if inp.fxg == 0.0:
flu.fxg = 0.0
else:
d_ra = der.alr * der.agr
if d_ra > 0.0:
flu.fxg = inp.fxg / d_ra
else:
flu.fxg = modelutils.inf
class Calc_PC_V1(modeltools.Method):
r"""Calculate the corrected precipitation.
Basic equation:
:math:`PC = CP \cdot P`
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> cp(1.2)
>>> inputs.p = 2.0
>>> model.calc_pc_v1()
>>> fluxes.pc
pc(2.4)
"""
CONTROLPARAMETERS = (wland_control.CP,)
REQUIREDSEQUENCES = (wland_inputs.P,)
RESULTSEQUENCES = (wland_fluxes.PC,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
flu.pc = con.cp * inp.p
class Calc_PETL_V1(modeltools.Method):
r"""Adjust the potential evapotranspiration of the land areas.
Basic equation:
:math:`PETL = CETP \cdot CPETL \cdot PET`
Examples:
>>> from hydpy import pub, UnitTest
>>> pub.timegrids = '2000-03-30', '2000-04-03', '1d'
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(2)
>>> lt(FIELD, DECIDIOUS)
>>> cpet(0.8)
>>> cpetl.field_mar = 1.25
>>> cpetl.field_apr = 1.5
>>> cpetl.decidious_mar = 1.75
>>> cpetl.decidious_apr = 2.0
>>> derived.moy.update()
>>> inputs.pet = 2.0
>>> model.idx_sim = pub.timegrids.init['2000-03-31']
>>> model.calc_petl_v1()
>>> fluxes.petl
petl(2.0, 2.8)
>>> model.idx_sim = pub.timegrids.init['2000-04-01']
>>> model.calc_petl_v1()
>>> fluxes.petl
petl(2.4, 3.2)
.. testsetup::
>>> del pub.timegrids
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.LT,
wland_control.CPET,
wland_control.CPETL,
)
DERIVEDPARAMETERS = (wland_derived.MOY,)
REQUIREDSEQUENCES = (wland_inputs.PET,)
RESULTSEQUENCES = (wland_fluxes.PETL,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
for k in range(con.nu):
d_cpetl = con.cpetl[con.lt[k] - SEALED, der.moy[model.idx_sim]]
flu.petl[k] = con.cpet * d_cpetl * inp.pet
class Calc_PES_V1(modeltools.Method):
r"""Adapt the potential evaporation for the surface water area.
Basic equation:
:math:`PES = CETP \cdot CPES \cdot PET`
Examples:
>>> from hydpy import pub, UnitTest
>>> pub.timegrids = '2000-03-30', '2000-04-03', '1d'
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> cpet(0.8)
>>> cpes.mar = 1.25
>>> cpes.apr = 1.5
>>> derived.moy.update()
>>> inputs.pet = 2.0
>>> model.idx_sim = pub.timegrids.init['2000-03-31']
>>> model.calc_pes_v1()
>>> fluxes.pes
pes(2.0)
>>> model.idx_sim = pub.timegrids.init['2000-04-01']
>>> model.calc_pes_v1()
>>> fluxes.pes
pes(2.4)
.. testsetup::
>>> del pub.timegrids
"""
CONTROLPARAMETERS = (
wland_control.CPET,
wland_control.CPES,
)
DERIVEDPARAMETERS = (wland_derived.MOY,)
REQUIREDSEQUENCES = (wland_inputs.PET,)
RESULTSEQUENCES = (wland_fluxes.PES,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
d_cpes = con.cpes[der.moy[model.idx_sim]]
flu.pes = con.cpet * d_cpes * inp.pet
class Calc_TF_V1(modeltools.Method):
r"""Calculate the total amount of throughfall.
Basic equation (discontinuous):
.. math::
TF = \begin{cases}
P &|\ IC > IT
\\
0 &|\ IC < IT
\end{cases}
Examples:
>>> from hydpy import pub, UnitTest
>>> pub.timegrids = '2000-03-30', '2000-04-03', '1d'
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> lt(FIELD)
>>> ih(0.2)
>>> lai.field_mar = 5.0
>>> lai.field_apr = 10.0
>>> derived.moy.update()
>>> fluxes.pc = 5.0
>>> test = UnitTest(
... model=model,
... method=model.calc_tf_v1,
... last_example=6,
... parseqs=(states.ic, fluxes.tf),
... )
>>> test.nexts.ic = -4.0, 0.0, 1.0, 2.0, 3.0, 7.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> model.idx_sim = pub.timegrids.init['2000-03-31']
>>> test()
| ex. | ic | tf |
--------------------
| 1 | -4.0 | 0.0 |
| 2 | 0.0 | 0.0 |
| 3 | 1.0 | 2.5 |
| 4 | 2.0 | 5.0 |
| 5 | 3.0 | 5.0 |
| 6 | 7.0 | 5.0 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> model.idx_sim = pub.timegrids.init['2000-04-01']
>>> test()
| ex. | ic | tf |
------------------------
| 1 | -4.0 | 0.0 |
| 2 | 0.0 | 0.00051 |
| 3 | 1.0 | 0.05 |
| 4 | 2.0 | 2.5 |
| 5 | 3.0 | 4.95 |
| 6 | 7.0 | 5.0 |
.. testsetup::
>>> del pub.timegrids
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.LT,
wland_control.LAI,
wland_control.IH,
)
DERIVEDPARAMETERS = (
wland_derived.MOY,
wland_derived.RH1,
)
REQUIREDSEQUENCES = (
wland_fluxes.PC,
wland_states.IC,
)
RESULTSEQUENCES = (wland_fluxes.TF,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
for k in range(con.nu):
d_lai = con.lai[con.lt[k] - SEALED, der.moy[model.idx_sim]]
flu.tf[k] = flu.pc * smoothutils.smooth_logistic1(
sta.ic[k] - con.ih * d_lai, der.rh1
)
class Calc_EI_V1(modeltools.Method):
r"""Calculate the interception evaporation.
Basic equation (discontinuous):
.. math::
EI = \begin{cases}
PETL &|\ IC > 0
\\
0 &|\ IC < 0
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.petl = 5.0
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_ei_v1,
... last_example=9,
... parseqs=(states.ic, fluxes.ei)
... )
>>> test.nexts.ic = -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> test()
| ex. | ic | ei |
--------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.0 |
| 3 | -2.0 | 0.0 |
| 4 | -1.0 | 0.0 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 5.0 |
| 7 | 2.0 | 5.0 |
| 8 | 3.0 | 5.0 |
| 9 | 4.0 | 5.0 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | ic | ei |
-------------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.000005 |
| 3 | -2.0 | 0.00051 |
| 4 | -1.0 | 0.05 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 4.95 |
| 7 | 2.0 | 4.99949 |
| 8 | 3.0 | 4.999995 |
| 9 | 4.0 | 5.0 |
"""
CONTROLPARAMETERS = (wland_control.NU,)
REQUIREDSEQUENCES = (
wland_fluxes.PETL,
wland_states.IC,
)
RESULTSEQUENCES = (
wland_fluxes.EI,
wland_derived.RH1,
)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
for k in range(con.nu):
flu.ei[k] = flu.petl[k] * (smoothutils.smooth_logistic1(sta.ic[k], der.rh1))
class Calc_FR_V1(modeltools.Method):
r"""Determine the fraction between rainfall and total precipitation.
Basic equation:
:math:`FR = \frac{T- \left( TT - TI / 2 \right)}{TI}`
Restriction:
:math:`0 \leq FR \leq 1`
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> tt(1.0)
>>> ti(4.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_fr_v1,
... last_example=9,
... parseqs=(inputs.t, aides.fr)
... )
>>> test.nexts.t = -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0
>>> test()
| ex. | t | fr |
---------------------
| 1 | -3.0 | 0.0 |
| 2 | -2.0 | 0.0 |
| 3 | -1.0 | 0.0 |
| 4 | 0.0 | 0.25 |
| 5 | 1.0 | 0.5 |
| 6 | 2.0 | 0.75 |
| 7 | 3.0 | 1.0 |
| 8 | 4.0 | 1.0 |
| 9 | 5.0 | 1.0 |
"""
CONTROLPARAMETERS = (
wland_control.TT,
wland_control.TI,
)
REQUIREDSEQUENCES = (wland_inputs.T,)
RESULTSEQUENCES = (wland_aides.FR,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
inp = model.sequences.inputs.fastaccess
aid = model.sequences.aides.fastaccess
if inp.t >= (con.tt + con.ti / 2.0):
aid.fr = 1.0
elif inp.t <= (con.tt - con.ti / 2.0):
aid.fr = 0.0
else:
aid.fr = (inp.t - (con.tt - con.ti / 2.0)) / con.ti
class Calc_RF_V1(modeltools.Method):
r"""Calculate the liquid amount of throughfall (rainfall).
Basic equation:
:math:`RF = FR \cdot TF`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.tf = 2.0
>>> aides.fr = 0.8
>>> model.calc_rf_v1()
>>> fluxes.rf
rf(1.6)
"""
CONTROLPARAMETERS = (wland_control.NU,)
REQUIREDSEQUENCES = (
wland_fluxes.TF,
wland_aides.FR,
)
RESULTSEQUENCES = (wland_fluxes.RF,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
aid = model.sequences.aides.fastaccess
for k in range(con.nu):
flu.rf[k] = aid.fr * flu.tf[k]
class Calc_SF_V1(modeltools.Method):
r"""Calculate the frozen amount of throughfall (snowfall).
Basic equation:
:math:`SF = (1-FR) \cdot TF`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.tf = 2.0
>>> aides.fr = 0.8
>>> model.calc_sf_v1()
>>> fluxes.sf
sf(0.4)
"""
CONTROLPARAMETERS = (wland_control.NU,)
REQUIREDSEQUENCES = (
wland_fluxes.TF,
wland_aides.FR,
)
RESULTSEQUENCES = (wland_fluxes.SF,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
aid = model.sequences.aides.fastaccess
for k in range(con.nu):
flu.sf[k] = (1.0 - aid.fr) * flu.tf[k]
class Calc_PM_V1(modeltools.Method):
r"""Calculate the potential snowmelt.
Basic equation (discontinous):
:math:`PM = max \left( DDF \cdot (T - DDT), 0 \right)`
Examples:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> nu(1)
>>> ddf(4.0)
>>> ddt(1.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_pm_v1,
... last_example=11,
... parseqs=(inputs.t, fluxes.pm)
... )
>>> test.nexts.t = -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0
Without smoothing:
>>> st(0.0)
>>> derived.rt2.update()
>>> test()
| ex. | t | pm |
---------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.0 |
| 3 | -2.0 | 0.0 |
| 4 | -1.0 | 0.0 |
| 5 | 0.0 | 0.0 |
| 6 | 1.0 | 0.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 4.0 |
| 9 | 4.0 | 6.0 |
| 10 | 5.0 | 8.0 |
| 11 | 6.0 | 10.0 |
With smoothing:
>>> st(1.0)
>>> derived.rt2.update()
>>> test()
| ex. | t | pm |
-------------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.000001 |
| 3 | -2.0 | 0.000024 |
| 4 | -1.0 | 0.000697 |
| 5 | 0.0 | 0.02 |
| 6 | 1.0 | 0.411048 |
| 7 | 2.0 | 2.02 |
| 8 | 3.0 | 4.000697 |
| 9 | 4.0 | 6.000024 |
| 10 | 5.0 | 8.000001 |
| 11 | 6.0 | 10.0 |
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.DDF,
wland_control.DDT,
)
DERIVEDPARAMETERS = (wland_derived.RT2,)
REQUIREDSEQUENCES = (wland_inputs.T,)
RESULTSEQUENCES = (wland_fluxes.PM,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
inp = model.sequences.inputs.fastaccess
flu = model.sequences.fluxes.fastaccess
for k in range(con.nu):
flu.pm[k] = con.ddf[k] * smoothutils.smooth_logistic2(
inp.t - con.ddt, der.rt2
)
class Calc_AM_V1(modeltools.Method):
r"""Calculate the actual snowmelt.
Basic equation (discontinous):
.. math::
AM = \begin{cases}
PM &|\ SP > 0
\\
0 &|\ SP < 0
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.pm = 2.0
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_am_v1,
... last_example=9,
... parseqs=(states.sp, fluxes.am)
... )
>>> test.nexts.sp = -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> test()
| ex. | sp | am |
--------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.0 |
| 3 | -2.0 | 0.0 |
| 4 | -1.0 | 0.0 |
| 5 | 0.0 | 1.0 |
| 6 | 1.0 | 2.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 2.0 |
| 9 | 4.0 | 2.0 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | sp | am |
-------------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.000002 |
| 3 | -2.0 | 0.000204 |
| 4 | -1.0 | 0.02 |
| 5 | 0.0 | 1.0 |
| 6 | 1.0 | 1.98 |
| 7 | 2.0 | 1.999796 |
| 8 | 3.0 | 1.999998 |
| 9 | 4.0 | 2.0 |
"""
CONTROLPARAMETERS = (wland_control.NU,)
DERIVEDPARAMETERS = (wland_derived.RH1,)
REQUIREDSEQUENCES = (
wland_fluxes.PM,
wland_states.SP,
)
RESULTSEQUENCES = (wland_fluxes.AM,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
for k in range(con.nu):
flu.am[k] = flu.pm[k] * smoothutils.smooth_logistic1(sta.sp[k], der.rh1)
class Calc_PS_V1(modeltools.Method):
r"""Calculate the precipitation entering the surface water reservoir.
Basic equation:
:math:`PS = PC`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> fluxes.pc = 3.0
>>> model.calc_ps_v1()
>>> fluxes.ps
ps(3.0)
"""
REQUIREDSEQUENCES = (wland_fluxes.PC,)
RESULTSEQUENCES = (wland_fluxes.PS,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
flu = model.sequences.fluxes.fastaccess
flu.ps = flu.pc
class Calc_W_V1(modeltools.Method):
r"""Calculate the wetness index.
Basic equation:
:math:`W = cos \left(
\frac{max(min(DV, CW), 0) \cdot Pi}{CW} \right) \cdot \frac{1}{2} + \frac{1}{2}`
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> cw(200.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_w_v1,
... last_example=11,
... parseqs=(states.dv, aides.w)
... )
>>> test.nexts.dv = (
... -50.0, -5.0, 0.0, 5.0, 50.0, 100.0, 150.0, 195.0, 200.0, 205.0, 250.0)
>>> test()
| ex. | dv | w |
--------------------------
| 1 | -50.0 | 1.0 |
| 2 | -5.0 | 1.0 |
| 3 | 0.0 | 1.0 |
| 4 | 5.0 | 0.998459 |
| 5 | 50.0 | 0.853553 |
| 6 | 100.0 | 0.5 |
| 7 | 150.0 | 0.146447 |
| 8 | 195.0 | 0.001541 |
| 9 | 200.0 | 0.0 |
| 10 | 205.0 | 0.0 |
| 11 | 250.0 | 0.0 |
"""
CONTROLPARAMETERS = (wland_control.CW,)
FIXEDPARAMETERS = (wland_fixed.Pi,)
REQUIREDSEQUENCES = (wland_states.DV,)
RESULTSEQUENCES = (wland_aides.W,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
fix = model.parameters.fixed.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
aid.w = 0.5 + 0.5 * modelutils.cos(
max(min(sta.dv, con.cw), 0.0) * fix.pi / con.cw
)
class Calc_PV_V1(modeltools.Method):
r"""Calculate the rainfall (and snowmelt) entering the vadose zone.
Basic equation:
.. math::
PV = \Sigma \left ( \frac{AUR}{AGR} \cdot (RF + AM) \cdot \begin{cases}
0 &|\ LT = SEALED
\\
1-W &|\ LT \neq SEALED
\end{cases} \right )
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(3)
>>> lt(FIELD, SOIL, SEALED)
>>> aur(0.7, 0.2, 0.1)
>>> derived.agr.update()
>>> fluxes.rf = 3.0, 2.0, 1.0
>>> fluxes.am = 1.0, 2.0, 3.0
>>> aides.w = 0.75
>>> model.calc_pv_v1()
>>> fluxes.pv
pv(1.0)
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.LT,
wland_control.AUR,
)
DERIVEDPARAMETERS = (wland_derived.AGR,)
REQUIREDSEQUENCES = (
wland_fluxes.RF,
wland_fluxes.AM,
wland_aides.W,
)
RESULTSEQUENCES = (wland_fluxes.PV,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
aid = model.sequences.aides.fastaccess
flu.pv = 0.0
for k in range(con.nu):
if con.lt[k] != SEALED:
flu.pv += (1.0 - aid.w) * con.aur[k] / der.agr * (flu.rf[k] + flu.am[k])
class Calc_PQ_V1(modeltools.Method):
r"""Calculate the rainfall (and snowmelt) entering the quickflow reservoir.
Basic equation:
.. math::
PQ = \Sigma \left( AUR \cdot (RF + AM) \cdot \begin{cases}
1 &|\ LT = SEALED
\\
W &|\ LT \neq SEALED
\end{cases} \right)
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(3)
>>> lt(FIELD, SOIL, SEALED)
>>> aur(0.6, 0.3, 0.1)
>>> fluxes.rf = 3.0, 2.0, 1.0
>>> fluxes.am = 1.0, 2.0, 2.0
>>> aides.w = 0.75
>>> model.calc_pq_v1()
>>> fluxes.pq
pq(3.0)
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.LT,
wland_control.AUR,
)
REQUIREDSEQUENCES = (
wland_fluxes.RF,
wland_fluxes.AM,
wland_aides.W,
)
RESULTSEQUENCES = (wland_fluxes.PQ,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
aid = model.sequences.aides.fastaccess
flu.pq = 0.0
for k in range(con.nu):
d_pq = con.aur[k] * (flu.rf[k] + flu.am[k])
if con.lt[k] != SEALED:
d_pq *= aid.w
flu.pq += d_pq
class Calc_Beta_V1(modeltools.Method):
r"""Calculate the evapotranspiration reduction factor.
Basic equations:
:math:`Beta = \frac{1 - x}{1 + x} \cdot \frac{1}{2} + \frac{1}{2}`
:math:`x = exp \left( Zeta1 \cdot (DV - Zeta2) \right)`
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> zeta1(0.02)
>>> zeta2(400.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_beta_v1,
... last_example=12,
... parseqs=(states.dv, aides.beta)
... )
>>> test.nexts.dv = (
... -100.0, 0.0, 100.0, 200.0, 300.0, 400.0,
... 500.0, 600.0, 700.0, 800.0, 900.0, 100000.0
... )
>>> test()
| ex. | dv | beta |
-----------------------------
| 1 | -100.0 | 0.999955 |
| 2 | 0.0 | 0.999665 |
| 3 | 100.0 | 0.997527 |
| 4 | 200.0 | 0.982014 |
| 5 | 300.0 | 0.880797 |
| 6 | 400.0 | 0.5 |
| 7 | 500.0 | 0.119203 |
| 8 | 600.0 | 0.017986 |
| 9 | 700.0 | 0.002473 |
| 10 | 800.0 | 0.000335 |
| 11 | 900.0 | 0.000045 |
| 12 | 100000.0 | 0.0 |
"""
CONTROLPARAMETERS = (
wland_control.Zeta1,
wland_control.Zeta2,
)
REQUIREDSEQUENCES = (wland_states.DV,)
RESULTSEQUENCES = (wland_aides.Beta,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
d_temp = con.zeta1 * (sta.dv - con.zeta2)
if d_temp > 700.0:
aid.beta = 0.0
else:
d_temp = modelutils.exp(d_temp)
aid.beta = 0.5 + 0.5 * (1.0 - d_temp) / (1.0 + d_temp)
class Calc_ETV_V1(modeltools.Method):
r"""Calculate the actual evapotranspiration from the vadose zone.
Basic equation:
.. math::
ETV = \Sigma \left( \frac{AUR}{AGR} \cdot (PETL - EI) \cdot \begin{cases}
0 &|\ LT = SEALED
\\
Beta &|\ LT \neq SEALED
\end{cases} \right)
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(3)
>>> lt(FIELD, SOIL, SEALED)
>>> aur(0.4, 0.4, 0.2)
>>> derived.agr.update()
>>> fluxes.petl = 5.0
>>> fluxes.ei = 1.0, 3.0, 2.0
>>> aides.beta = 0.75
>>> model.calc_etv_v1()
>>> fluxes.etv
etv(2.25)
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.LT,
wland_control.AUR,
)
DERIVEDPARAMETERS = (wland_derived.AGR,)
REQUIREDSEQUENCES = (
wland_fluxes.PETL,
wland_fluxes.EI,
wland_aides.Beta,
)
RESULTSEQUENCES = (wland_fluxes.ETV,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
aid = model.sequences.aides.fastaccess
flu.etv = 0.0
for k in range(con.nu):
if con.lt[k] != SEALED:
flu.etv += aid.beta * con.aur[k] / der.agr * (flu.petl[k] - flu.ei[k])
class Calc_ES_V1(modeltools.Method):
r"""Calculate the actual evaporation from the surface water reservoir.
Basic equation (discontinous):
.. math::
ES = \begin{cases}
PES &|\ HS > 0
\\
0 &|\ HS \leq 0
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> fluxes.pes = 5.0
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_es_v1,
... last_example=9,
... parseqs=(states.hs, fluxes.es)
... )
>>> test.nexts.hs = -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> test()
| ex. | hs | es |
--------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.0 |
| 3 | -2.0 | 0.0 |
| 4 | -1.0 | 0.0 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 5.0 |
| 7 | 2.0 | 5.0 |
| 8 | 3.0 | 5.0 |
| 9 | 4.0 | 5.0 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | hs | es |
-------------------------
| 1 | -4.0 | 0.0 |
| 2 | -3.0 | 0.000005 |
| 3 | -2.0 | 0.00051 |
| 4 | -1.0 | 0.05 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 4.95 |
| 7 | 2.0 | 4.99949 |
| 8 | 3.0 | 4.999995 |
| 9 | 4.0 | 5.0 |
"""
DERIVEDPARAMETERS = (wland_derived.RH1,)
REQUIREDSEQUENCES = (
wland_fluxes.PES,
wland_states.HS,
)
RESULTSEQUENCES = (wland_fluxes.ES,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
flu.es = flu.pes * smoothutils.smooth_logistic1(sta.hs, der.rh1)
class Calc_ET_V1(modeltools.Method):
r"""Calculate the total actual evapotranspiration.
Basic equation:
:math:`ET = ALR \cdot \bigl( \Sigma (AUR \cdot EI) + AGR \cdot ETV \bigl ) +
ASR \cdot ES`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(2)
>>> aur(0.8, 0.2)
>>> derived.alr(0.8)
>>> derived.asr(0.2)
>>> derived.agr(0.5)
>>> fluxes.ei = 0.5, 3.0
>>> fluxes.etv = 2.0
>>> fluxes.es = 3.0
>>> model.calc_et_v1()
>>> fluxes.et
et(2.2)
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.AUR,
)
DERIVEDPARAMETERS = (
wland_derived.ALR,
wland_derived.ASR,
wland_derived.AGR,
)
REQUIREDSEQUENCES = (
wland_fluxes.EI,
wland_fluxes.ETV,
wland_fluxes.ES,
)
RESULTSEQUENCES = (wland_fluxes.ET,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
d_ei = 0.0
for k in range(con.nu):
d_ei += con.aur[k] * flu.ei[k]
flu.et = der.alr * (d_ei + der.agr * flu.etv) + der.asr * flu.es
class Calc_DVEq_V1(modeltools.Method):
r"""Calculate the equilibrium storage deficit of the vadose zone.
Basic equation (discontinuous):
.. math::
DVEq = \begin{cases}
0 &|\ DG \leq PsiAE
\\
ThetaS \cdot \left( DG - \frac{DG^{1-1/b}}{(1-1/b) \cdot PsiAE^{-1/B}} -
\frac{PsiAE}{1-B} \right) &|\ PsiAE < DG
\end{cases}
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> psiae(300.0)
>>> b(5.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_dveq_v1,
... last_example=6,
... parseqs=(states.dg, aides.dveq)
... )
>>> test.nexts.dg = 200.0, 300.0, 400.0, 800.0, 1600.0, 3200.0
Without smoothing:
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 0.0 |
| 2 | 300.0 | 0.0 |
| 3 | 400.0 | 1.182498 |
| 4 | 800.0 | 21.249634 |
| 5 | 1600.0 | 97.612368 |
| 6 | 3200.0 | 313.415248 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (wland_states.DG,)
RESULTSEQUENCES = (wland_aides.DVEq,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
if sta.dg < con.psiae:
aid.dveq = 0.0
else:
aid.dveq = con.thetas * (
sta.dg
- sta.dg ** (1.0 - 1.0 / con.b)
/ (1.0 - 1.0 / con.b)
/ con.psiae ** (-1.0 / con.b)
- con.psiae / (1.0 - con.b)
)
else:
aid.dveq = modelutils.nan
class Return_DVH_V1(modeltools.Method):
r"""Return the storage deficit of the vadose zone at a specific height above
the groundwater table.
Basic equation (discontinous):
.. math::
DVH = \begin{cases}
0 &|\ DG \leq PsiAE
\\
ThetaS \cdot \left(1 - \left( \frac{h}{PsiAE} \right)^{-1/b} \right)
&|\ PsiAE < DG
\end{cases}
This power law is the differential of the equation underlying method
|Calc_DVEq_V1| with respect to height. :cite:t:`ref-Brauer2014` also cites it
(equation 6) but does not use it directly.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> psiae(300.0)
>>> b(5.0)
With smoothing:
>>> from hydpy import repr_
>>> sh(0.0)
>>> derived.rh1.update()
>>> for h in [200.0, 299.0, 300.0, 301.0, 400.0, 500.0, 600.0]:
... print(repr_(h), repr_(model.return_dvh_v1(h)))
200.0 0.0
299.0 0.0
300.0 0.0
301.0 0.000266
400.0 0.022365
500.0 0.038848
600.0 0.05178
Without smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> for h in [200.0, 299.0, 300.0, 301.0, 400.0, 500.0, 600.0]:
... print(repr_(h), repr_(model.return_dvh_v1(h)))
200.0 0.0
299.0 0.000001
300.0 0.00004
301.0 0.000267
400.0 0.022365
500.0 0.038848
600.0 0.05178
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.RH1,)
@staticmethod
def __call__(model: modeltools.Model, h: float) -> float:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
d_h = smoothutils.smooth_max1(h, con.psiae, der.rh1)
return con.thetas * (1.0 - (d_h / con.psiae) ** (-1.0 / con.b))
class Calc_DVEq_V2(modeltools.Method):
r"""Calculate the equilibrium storage deficit of the vadose zone.
Basic equation:
:math:`DHEq = \int_{0}^{DG} Return\_DVH\_V1(h) \ \ dh`
Method |Calc_DVEq_V2| integrates |Return_DVH_V1| numerically, based on the
Lobatto-Gauß quadrature. Hence, it should give nearly identical results as
method |Calc_DVEq_V1|, which provides the analytical solution to the underlying
power law. The benefit of method |Calc_DVEq_V2| is that it supports the
regularisation of |Return_DVH_V1|, which |Calc_DVEq_V1| does not. In our
experience, this benefit does not justify the additional numerical cost.
However, we keep it for educational purposes; mainly as a starting point to
implement alternative relationships between the soil water deficit and the
groundwater table that we cannot solve analytically.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.nug(0)
>>> model.calc_dveq_v2()
>>> aides.dveq
dveq(nan)
>>> derived.nug(1)
>>> thetas(0.4)
>>> psiae(300.0)
>>> b(5.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_dveq_v2,
... last_example=8,
... parseqs=(states.dg, aides.dveq)
... )
>>> test.nexts.dg = 200.0, 299.0, 300.0, 301.0, 400.0, 800.0, 1600.0, 3200.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 0.0 |
| 2 | 299.0 | 0.0 |
| 3 | 300.0 | 0.0 |
| 4 | 301.0 | 0.000133 |
| 5 | 400.0 | 1.182498 |
| 6 | 800.0 | 21.249634 |
| 7 | 1600.0 | 97.612368 |
| 8 | 3200.0 | 313.415248 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 0.0 |
| 2 | 299.0 | 0.0 |
| 3 | 300.0 | 0.000033 |
| 4 | 301.0 | 0.000176 |
| 5 | 400.0 | 1.182542 |
| 6 | 800.0 | 21.24972 |
| 7 | 1600.0 | 97.612538 |
| 8 | 3200.0 | 313.415588 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.PsiAE,
wland_control.B,
wland_control.SH,
)
DERIVEDPARAMETERS = (
wland_derived.NUG,
wland_derived.RH1,
)
REQUIREDSEQUENCES = (wland_states.DG,)
RESULTSEQUENCES = (wland_aides.DVEq,)
SUBMETHODS = (Return_DVH_V1,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
d_x0 = -10.0 * con.sh
if sta.dg > con.psiae:
d_below = model.quaddveq_v1.integrate(d_x0, con.psiae, 2, 20, 1e-8)
d_above = model.quaddveq_v1.integrate(con.psiae, sta.dg, 2, 20, 1e-8)
aid.dveq = d_below + d_above
else:
aid.dveq = model.quaddveq_v1.integrate(d_x0, sta.dg, 2, 20, 1e-8)
else:
aid.dveq = modelutils.nan
class Calc_DVEq_V3(modeltools.Method):
r"""Calculate the equilibrium storage deficit of the vadose zone.
Basic equation (discontinuous):
.. math::
DHEq = ThetaR \cdot DG + \begin{cases}
0 &|\ DG \leq PsiAE
\\
ThetaS \cdot \left( DG - \frac{DG^{1-1/b}}{(1-1/b) \cdot PsiAE^{-1/B}} -
\frac{PsiAE}{1-B} \right) &|\ PsiAE < DG
\end{cases}
Method |Calc_DVEq_V3| extends the original `WALRUS`_ relationship between the
groundwater depth and the equilibrium water deficit of the vadose zone defined
by equation 5 of :cite:t:`ref-Brauer2014` and implemented into application model
|wland| by method |Calc_DVEq_V1|. Parameter |ThetaR| introduces a (small)
amount of water to fill the tension-saturated area directly above the groundwater
table. This "residual saturation" allows the direct injection of water into
groundwater without risking infinitely fast groundwater depth changes.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_dveq_v3,
... last_example=8,
... parseqs=(states.dg, aides.dveq)
... )
>>> test.nexts.dg = 200.0, 299.0, 300.0, 301.0, 400.0, 800.0, 1600.0, 3200.0
Without smoothing:
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 2.0 |
| 2 | 299.0 | 2.99 |
| 3 | 300.0 | 3.0 |
| 4 | 301.0 | 3.01013 |
| 5 | 400.0 | 5.152935 |
| 6 | 800.0 | 28.718393 |
| 7 | 1600.0 | 111.172058 |
| 8 | 3200.0 | 337.579867 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (wland_states.DG,)
RESULTSEQUENCES = (wland_aides.DVEq,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
if sta.dg < con.psiae:
aid.dveq = con.thetar * sta.dg
else:
aid.dveq = (con.thetas - con.thetar) * (
sta.dg
- sta.dg ** (1.0 - 1.0 / con.b)
/ (1.0 - 1.0 / con.b)
/ con.psiae ** (-1.0 / con.b)
- con.psiae / (1.0 - con.b)
) + con.thetar * sta.dg
else:
aid.dveq = modelutils.nan
class Return_DVH_V2(modeltools.Method):
r"""Return the storage deficit of the vadose zone at a specific height above
the groundwater table.
Basic equation (discontinous):
.. math::
DVH = ThetaR + \begin{cases}
0 &|\ DG \leq PsiAE
\\
(ThetaS-ThetaR) \cdot \left(1 - \left( \frac{h}{PsiAE} \right)^{-1/b} \right)
&|\ PsiAE < DG
\end{cases}
The given equation is the differential of the equation underlying method
|Calc_DVEq_V3| with respect to height.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
With smoothing:
>>> from hydpy import repr_
>>> sh(0.0)
>>> derived.rh1.update()
>>> for h in [200.0, 299.0, 300.0, 301.0, 400.0, 500.0, 600.0]:
... print(repr_(h), repr_(model.return_dvh_v2(h)))
200.0 0.01
299.0 0.01
300.0 0.01
301.0 0.010259
400.0 0.031806
500.0 0.047877
600.0 0.060485
Without smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> for h in [200.0, 299.0, 300.0, 301.0, 400.0, 500.0, 600.0]:
... print(repr_(h), repr_(model.return_dvh_v2(h)))
200.0 0.01
299.0 0.010001
300.0 0.010039
301.0 0.01026
400.0 0.031806
500.0 0.047877
600.0 0.060485
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.RH1,)
@staticmethod
def __call__(model: modeltools.Model, h: float) -> float:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
d_h = smoothutils.smooth_max1(h, con.psiae, der.rh1)
return con.thetar + (
(con.thetas - con.thetar) * (1.0 - (d_h / con.psiae) ** (-1.0 / con.b))
)
class Calc_DVEq_V4(modeltools.Method):
r"""Calculate the equilibrium storage deficit of the vadose zone.
Basic equation:
:math:`DHEq = \int_{0}^{DG} Return\_DVH\_V2(h) \ \ dh`
Method |Calc_DVEq_V4| integrates |Return_DVH_V2| numerically, based on the
Lobatto-Gauß quadrature. The short discussion in the documentation on
|Calc_DVEq_V2| (which integrates |Return_DVH_V1|) also applies on |Calc_DVEq_V4|.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.nug(0)
>>> model.calc_dveq_v4()
>>> aides.dveq
dveq(nan)
>>> derived.nug(1)
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_dveq_v4,
... last_example=8,
... parseqs=(states.dg, aides.dveq)
... )
>>> test.nexts.dg = 200.0, 299.0, 300.0, 301.0, 400.0, 800.0, 1600.0, 3200.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 2.0 |
| 2 | 299.0 | 2.99 |
| 3 | 300.0 | 3.0 |
| 4 | 301.0 | 3.01013 |
| 5 | 400.0 | 5.152935 |
| 6 | 800.0 | 28.718393 |
| 7 | 1600.0 | 111.172058 |
| 8 | 3200.0 | 337.579867 |
With smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | dveq |
-----------------------------
| 1 | 200.0 | 2.1 |
| 2 | 299.0 | 3.09 |
| 3 | 300.0 | 3.100032 |
| 4 | 301.0 | 3.110172 |
| 5 | 400.0 | 5.252979 |
| 6 | 800.0 | 28.818477 |
| 7 | 1600.0 | 111.272224 |
| 8 | 3200.0 | 337.680198 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
wland_control.SH,
)
DERIVEDPARAMETERS = (
wland_derived.NUG,
wland_derived.RH1,
)
REQUIREDSEQUENCES = (wland_states.DG,)
RESULTSEQUENCES = (wland_aides.DVEq,)
SUBMETHODS = (Return_DVH_V2,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
d_x0 = -10.0 * con.sh
if sta.dg > con.psiae:
d_below = model.quaddveq_v2.integrate(d_x0, con.psiae, 2, 20, 1e-8)
d_above = model.quaddveq_v2.integrate(con.psiae, sta.dg, 2, 20, 1e-8)
aid.dveq = d_below + d_above
else:
aid.dveq = model.quaddveq_v2.integrate(d_x0, sta.dg, 2, 20, 1e-8)
else:
aid.dveq = modelutils.nan
class Return_ErrorDV_V1(modeltools.Method):
r"""Calculate the difference between the equilibrium and the actual storage
deficit of the vadose zone.
Basic equation:
:math:`DVEq_{Calc\_DVEq\_V3} - DV`
Method |Return_ErrorDV_V1| uses |Calc_DVEq_V3| to calculate the equilibrium
deficit corresponding to the current groundwater depth. The following example
shows that it resets the values |DG| and |DVEq|, which it needs to change
temporarily, to their original states.
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
>>> states.dg = -9.0
>>> aides.dveq = -99.0
>>> states.dv = 3.152935
>>> from hydpy import round_
>>> round_(model.return_errordv_v1(400.0))
2.0
>>> states.dg
dg(-9.0)
>>> aides.dveq
dveq(-99.0)
Technical checks:
As mentioned above, method |Return_ErrorDV_V1| changes the values of the
sequences |DG| and |DVEq|, but only temporarily. Hence, we do not include
them into the method specifications, even if the following check considers
this to be erroneous:
>>> from hydpy.core.testtools import check_selectedvariables
>>> from hydpy.models.wland.wland_model import Return_ErrorDV_V1
>>> print(check_selectedvariables(Return_ErrorDV_V1))
Definitely missing: dg and dveq
Possibly missing (REQUIREDSEQUENCES):
Calc_DVEq_V3: DG
Possibly missing (RESULTSEQUENCES):
Calc_DVEq_V3: DVEq
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (wland_states.DV,)
SUBMETHODS = (Calc_DVEq_V3,)
@staticmethod
def __call__(model: modeltools.Model, dg: float) -> float:
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
d_dveq, d_dg = aid.dveq, sta.dg
sta.dg = dg
model.calc_dveq_v3()
d_delta = aid.dveq - sta.dv
aid.dveq, sta.dg = d_dveq, d_dg
return d_delta
class Calc_DGEq_V1(modeltools.Method):
r"""Calculate the equilibrium groundwater depth.
Method |Calc_DGEq_V1| calculates the equilibrium groundwater depth for the
current water deficit of the vadose zone, following methods |Return_DVH_V2|
and |Calc_DVEq_V3|. As we are not aware of an analytical solution, we solve
it numerically via class |PegasusDGEq|, which performs an iterative root-search
based on the `Pegasus method`_.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_dgeq_v1,
... last_example=13,
... parseqs=(states.dv, aides.dgeq)
... )
>>> test.nexts.dv = (
... -1.0, -0.01, 0.0, 0.01, 1.0, 2.0, 2.99, 3.0,
... 3.01012983, 5.1529353, 28.71839324, 111.1720584, 337.5798671)
>>> test()
| ex. | dv | dgeq |
-----------------------------
| 1 | -1.0 | 0.0 |
| 2 | -0.01 | 0.0 |
| 3 | 0.0 | 0.0 |
| 4 | 0.01 | 1.0 |
| 5 | 1.0 | 100.0 |
| 6 | 2.0 | 200.0 |
| 7 | 2.99 | 299.0 |
| 8 | 3.0 | 300.0 |
| 9 | 3.01013 | 301.0 |
| 10 | 5.152935 | 400.0 |
| 11 | 28.718393 | 800.0 |
| 12 | 111.172058 | 1600.0 |
| 13 | 337.579867 | 3200.0 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (wland_states.DV,)
RESULTSEQUENCES = (wland_aides.DGEq,)
SUBMETHODS = (Return_ErrorDV_V1,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.psiae.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if sta.dv > 0.0:
d_error = model.return_errordv_v1(con.psiae)
if d_error <= 0.0:
aid.dgeq = model.pegasusdgeq.find_x(
con.psiae,
10000.0,
con.psiae,
1000000.0,
0.0,
1e-8,
20,
)
else:
aid.dgeq = model.pegasusdgeq.find_x(
0.0,
con.psiae,
0.0,
con.psiae,
0.0,
1e-8,
20,
)
else:
aid.dgeq = 0.0
class Calc_GF_V1(modeltools.Method):
r"""Calculate the gain factor for changes in groundwater depth.
Basic equation (discontinuous):
.. math::
GF = \begin{cases}
0 &|\ DG \leq 0
\\
Return\_DVH\_V2(DGEq - DG)^{-1} &|\ 0 < DG
\end{cases}
The original `WALRUS`_ model attributes a passive role to groundwater dynamics.
All water entering or leaving the underground is added to or subtracted from the
vadose zone, and the groundwater table only reacts on such changes until it is in
equilibrium with the updated water deficit in the vadose zone. Hence, the movement
of the groundwater table is generally slow. However, in catchments with
near-surface water tables, we often observe fast responses of groundwater to input
forcings, maybe due to rapid infiltration along macropores or the re-infiltration
of channel water. In such situations, where the input water somehow bypasses the
vadose zone, the speed of the rise of the groundwater table depends not only on
the effective pore size of the soil material but also on the soil's degree of
saturation directly above the groundwater table. The smaller the remaining pore
size, the larger the fraction between the water table's rise and the actual
groundwater recharge. We call this fraction the "gain factor" (|GF|).
The `WALRUS`_ model does not explicitly account for the soil moisture in different
depths above the groundwater table. To keep the vertically lumped approach, we
use the difference between the actual (|DG|) and the equilibrium groundwater depth
(|DGEq|) as an indicator for the wetness above the groundwater table. When |DG|
is identical with |DGEq|, soil moisture and groundwater are in equilibrium. Then,
the tension-saturated area is fully developed, and the groundwater table moves
quickly (depending on |ThetaR|). The opposite case is when |DG| is much smaller
than |DGEq|. Such a situation occurs after a fast rise of the groundwater table
when the soil water still needs much redistribution before it can be in equilibrium
with groundwater. In the most extreme case, the gain factor is just as large as
indicated by the effective pore size alone (depending on |ThetaS|).
The above discussion only applies as long as the groundwater table is below the
soil surface. For large-scale ponding (see :cite:t:`ref-Brauer2014`, section 5.11),
we set |GF| to zero. See the documentation on the methods |Calc_CDG_V1| and
|Calc_FGS_V1| for related discussions.
Examples:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> thetas(0.4)
>>> thetar(0.01)
>>> psiae(300.0)
>>> b(5.0)
>>> sh(0.0)
>>> aides.dgeq = 5000.0
>>> derived.rh1.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_gf_v1,
... last_example=16,
... parseqs=(states.dg, aides.gf)
... )
>>> test.nexts.dg = (
... -10.0, -1.0, 0.0, 1.0, 10.0,
... 1000.0, 2000.0, 3000.0, 4000.0, 4500.0, 4600.0,
... 4690.0, 4699.0, 4700.0, 4701.0, 4710.0)
>>> test()
| ex. | dg | gf |
----------------------------
| 1 | -10.0 | 0.0 |
| 2 | -1.0 | 0.0 |
| 3 | 0.0 | 2.81175 |
| 4 | 1.0 | 5.623782 |
| 5 | 10.0 | 5.626316 |
| 6 | 1000.0 | 5.963555 |
| 7 | 2000.0 | 6.496601 |
| 8 | 3000.0 | 7.510869 |
| 9 | 4000.0 | 10.699902 |
| 10 | 4500.0 | 20.88702 |
| 11 | 4600.0 | 31.440737 |
| 12 | 4690.0 | 79.686112 |
| 13 | 4699.0 | 97.470815 |
| 14 | 4700.0 | 100.0 |
| 15 | 4701.0 | 100.0 |
| 16 | 4710.0 | 100.0 |
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | gf |
----------------------------
| 1 | -10.0 | 0.0 |
| 2 | -1.0 | 0.056232 |
| 3 | 0.0 | 2.81175 |
| 4 | 1.0 | 5.567544 |
| 5 | 10.0 | 5.626316 |
| 6 | 1000.0 | 5.963555 |
| 7 | 2000.0 | 6.496601 |
| 8 | 3000.0 | 7.510869 |
| 9 | 4000.0 | 10.699902 |
| 10 | 4500.0 | 20.88702 |
| 11 | 4600.0 | 31.440737 |
| 12 | 4690.0 | 79.686112 |
| 13 | 4699.0 | 97.465434 |
| 14 | 4700.0 | 99.609455 |
| 15 | 4701.0 | 99.994314 |
| 16 | 4710.0 | 100.0 |
"""
CONTROLPARAMETERS = (
wland_control.ThetaS,
wland_control.ThetaR,
wland_control.PsiAE,
wland_control.B,
)
DERIVEDPARAMETERS = (wland_derived.RH1,)
REQUIREDSEQUENCES = (
wland_states.DG,
wland_aides.DGEq,
)
RESULTSEQUENCES = (wland_aides.GF,)
SUBMETHODS = (Return_DVH_V2,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
aid.gf = smoothutils.smooth_logistic1(sta.dg, der.rh1) / model.return_dvh_v2(
aid.dgeq - sta.dg
)
class Calc_CDG_V1(modeltools.Method):
r"""Calculate the change in the groundwater depth due to percolation and
capillary rise.
Basic equation (discontinuous):
:math:`CDG = \frac{DV-min(DVEq, DG)}{CV}`
Note that this equation slightly differs from equation 6 of
:cite:t:`ref-Brauer2014`.
In case of large-scale ponding, |DVEq| always stays at zero and we let |DG|
take control of the speed of the water table movement. See the documentation
on method |Calc_FGS_V1| for additional information on the differences between
|wland| and `WALRUS`_ for this rare situation.
Examples:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> cv(10.0)
>>> sh(0.0)
>>> derived.rh1.update()
>>> states.dv = 100.0
>>> states.dg = 1000.0
>>> aides.dveq = 80.0
>>> model.calc_cdg_v1()
>>> fluxes.cdg
cdg(1.0)
Without large-scale ponding:
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_cdg_v1,
... last_example=5,
... parseqs=(states.dg, fluxes.cdg)
... )
With large-scale ponding and without smoothing:
>>> states.dv = -10.0
>>> aides.dveq = 0.0
>>> test.nexts.dg = 10.0, 1.0, 0.0, -1.0, -10.0
>>> test()
| ex. | dg | cdg |
-----------------------
| 1 | 10.0 | -0.5 |
| 2 | 1.0 | -0.5 |
| 3 | 0.0 | -0.5 |
| 4 | -1.0 | -0.45 |
| 5 | -10.0 | 0.0 |
With large-scale ponding and with smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | cdg |
---------------------------
| 1 | 10.0 | -0.5 |
| 2 | 1.0 | -0.499891 |
| 3 | 0.0 | -0.492458 |
| 4 | -1.0 | -0.449891 |
| 5 | -10.0 | 0.0 |
"""
CONTROLPARAMETERS = (wland_control.CV,)
DERIVEDPARAMETERS = (
wland_derived.NUG,
wland_derived.RH1,
)
REQUIREDSEQUENCES = (
wland_states.DG,
wland_states.DV,
wland_aides.DVEq,
)
RESULTSEQUENCES = (wland_fluxes.CDG,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
d_target = smoothutils.smooth_min1(aid.dveq, sta.dg, der.rh1)
flu.cdg = (sta.dv - d_target) / con.cv
else:
flu.cdg = 0.0
class Calc_CDG_V2(modeltools.Method):
r"""Calculate the change in the vadose zone's storage deficit due to percolation,
capillary rise, macropore-infiltration, seepage, groundwater drainage, and
channel water infiltration.
Basic equation:
:math:`CDG = \frac{DV-min(DVEq, DG)}{CV} + GF \cdot \big( FGS - PV - FXG \big)`
Method |Calc_CDG_V2| extends |Calc_CDG_V1|, which implements the (nearly) original
`WALRUS`_ relationship defined by equation 6 of :cite:t:`ref-Brauer2014`). See the
documentation on method |Calc_GF_V1| for a comprehensive explanation of the reason
for this extension.
Examples:
Without large-scale ponding:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> cv(10.0)
>>> sh(0.0)
>>> derived.rh1.update()
>>> states.dv = 100.0
>>> states.dg = 1000.0
>>> fluxes.pv = 1.0
>>> fluxes.fxg = 2.0
>>> fluxes.fgs = 4.0
>>> aides.dveq = 80.0
>>> aides.gf = 2.0
>>> model.calc_cdg_v2()
>>> fluxes.cdg
cdg(3.0)
With large-scale ponding and without smoothing:
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_cdg_v2,
... last_example=5,
... parseqs=(states.dg, fluxes.cdg)
... )
>>> aides.gf = 0.0
>>> states.dv = -10.0
>>> aides.dveq = 0.0
>>> test.nexts.dg = 10.0, 1.0, 0.0, -1.0, -10.0
>>> test()
| ex. | dg | cdg |
-----------------------
| 1 | 10.0 | -0.5 |
| 2 | 1.0 | -0.5 |
| 3 | 0.0 | -0.5 |
| 4 | -1.0 | -0.45 |
| 5 | -10.0 | 0.0 |
With large-scale ponding and with smoothing:
>>> sh(1.0)
>>> derived.rh1.update()
>>> test()
| ex. | dg | cdg |
---------------------------
| 1 | 10.0 | -0.5 |
| 2 | 1.0 | -0.499891 |
| 3 | 0.0 | -0.492458 |
| 4 | -1.0 | -0.449891 |
| 5 | -10.0 | 0.0 |
"""
CONTROLPARAMETERS = (wland_control.CV,)
DERIVEDPARAMETERS = (
wland_derived.NUG,
wland_derived.RH1,
)
REQUIREDSEQUENCES = (
wland_fluxes.PV,
wland_fluxes.FGS,
wland_fluxes.FXG,
wland_states.DG,
wland_states.DV,
wland_aides.DVEq,
wland_aides.GF,
)
RESULTSEQUENCES = (wland_fluxes.CDG,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
aid = model.sequences.aides.fastaccess
if der.nug:
d_target = smoothutils.smooth_min1(aid.dveq, sta.dg, der.rh1)
d_cdg_slow = (sta.dv - d_target) / con.cv
d_cdg_fast = aid.gf * (flu.fgs - flu.pv - flu.fxg)
flu.cdg = d_cdg_slow + d_cdg_fast
else:
flu.cdg = 0.0
class Calc_FGS_V1(modeltools.Method):
r"""Calculate the groundwater drainage or surface water infiltration.
For large-scale ponding, |wland| and `WALRUS`_ calculate |FGS| differently
(even for discontinuous parameterisations). The `WALRUS`_ model redistributes
water instantaneously in such cases (see :cite:t:`ref-Brauer2014`, section 5.11),
which relates to infinitely high flow velocities and cannot be handled by the
numerical integration algorithm underlying |wland|. Hence, we instead introduce
the parameter |CGF|. Setting it to a value larger zero increases the flow
velocity with increasing large-scale ponding. The larger the value of |CGF|,
the stronger the functional similarity of both approaches. But note that very
high values can result in increased computation times.
Basic equations (discontinous):
:math:`Gradient = CD - DG - HS`
:math:`ContactSurface = max \left( CD - DG, HS \right)`
:math:`Excess = max \left( -DG, HS - CD, 0 \right)`
:math:`Conductivity = \frac{1 + CGF \cdot Excess}{CG}`
:math:`FGS = Gradient \cdot ContactSurface \cdot Conductivity`
Examples:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> cd(600.0)
>>> cg(10000.0)
>>> states.hs = 300.0
>>> from hydpy import UnitTest
>>> test = UnitTest(model=model,
... method=model.calc_fgs_v1,
... last_example=15,
... parseqs=(states.dg, states.hs, fluxes.fgs))
>>> test.nexts.dg = (
... -100.0, -1.0, 0.0, 1.0, 100.0, 200.0, 290.0, 299.0,
... 300.0, 301.0, 310.0, 400.0, 500.0, 600.0, 700.0)
Without smoothing and without increased conductivity for large-scale ponding:
>>> cgf(0.0)
>>> sh(0.0)
>>> derived.rh2.update()
>>> test()
| ex. | dg | hs | fgs |
----------------------------------
| 1 | -100.0 | 300.0 | 14.0 |
| 2 | -1.0 | 300.0 | 9.04505 |
| 3 | 0.0 | 300.0 | 9.0 |
| 4 | 1.0 | 300.0 | 8.95505 |
| 5 | 100.0 | 300.0 | 5.0 |
| 6 | 200.0 | 300.0 | 2.0 |
| 7 | 290.0 | 300.0 | 0.155 |
| 8 | 299.0 | 300.0 | 0.01505 |
| 9 | 300.0 | 300.0 | 0.0 |
| 10 | 301.0 | 300.0 | -0.015 |
| 11 | 310.0 | 300.0 | -0.15 |
| 12 | 400.0 | 300.0 | -1.5 |
| 13 | 500.0 | 300.0 | -3.0 |
| 14 | 600.0 | 300.0 | -4.5 |
| 15 | 700.0 | 300.0 | -6.0 |
Without smoothing but with increased conductivity for large-scale ponding:
>>> cgf(0.1)
>>> test()
| ex. | dg | hs | fgs |
-----------------------------------
| 1 | -100.0 | 300.0 | 294.0 |
| 2 | -1.0 | 300.0 | 10.85406 |
| 3 | 0.0 | 300.0 | 9.0 |
| 4 | 1.0 | 300.0 | 8.95505 |
| 5 | 100.0 | 300.0 | 5.0 |
| 6 | 200.0 | 300.0 | 2.0 |
| 7 | 290.0 | 300.0 | 0.155 |
| 8 | 299.0 | 300.0 | 0.01505 |
| 9 | 300.0 | 300.0 | 0.0 |
| 10 | 301.0 | 300.0 | -0.015 |
| 11 | 310.0 | 300.0 | -0.15 |
| 12 | 400.0 | 300.0 | -1.5 |
| 13 | 500.0 | 300.0 | -3.0 |
| 14 | 600.0 | 300.0 | -4.5 |
| 15 | 700.0 | 300.0 | -6.0 |
With smoothing and with increased conductivity for large-scale ponding:
>>> sh(1.0)
>>> derived.rh2.update()
>>> test()
| ex. | dg | hs | fgs |
-----------------------------------
| 1 | -100.0 | 300.0 | 294.0 |
| 2 | -1.0 | 300.0 | 10.87215 |
| 3 | 0.0 | 300.0 | 9.369944 |
| 4 | 1.0 | 300.0 | 8.97296 |
| 5 | 100.0 | 300.0 | 5.0 |
| 6 | 200.0 | 300.0 | 2.0 |
| 7 | 290.0 | 300.0 | 0.155 |
| 8 | 299.0 | 300.0 | 0.01505 |
| 9 | 300.0 | 300.0 | 0.0 |
| 10 | 301.0 | 300.0 | -0.015 |
| 11 | 310.0 | 300.0 | -0.15 |
| 12 | 400.0 | 300.0 | -1.5 |
| 13 | 500.0 | 300.0 | -3.0 |
| 14 | 600.0 | 300.0 | -4.5 |
| 15 | 700.0 | 300.0 | -6.0 |
"""
CONTROLPARAMETERS = (
wland_control.CD,
wland_control.CG,
wland_control.CGF,
)
DERIVEDPARAMETERS = (
wland_derived.NUG,
wland_derived.RH2,
)
REQUIREDSEQUENCES = (
wland_states.DG,
wland_states.HS,
)
RESULTSEQUENCES = (wland_fluxes.FGS,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
if der.nug:
d_gradient = con.cd - sta.dg - sta.hs
d_contactsurface = smoothutils.smooth_max1(con.cd - sta.dg, sta.hs, der.rh2)
d_excess = smoothutils.smooth_max2(-sta.dg, sta.hs - con.cd, 0.0, der.rh2)
d_conductivity = (1.0 + con.cgf * d_excess) / con.cg
flu.fgs = d_gradient * d_contactsurface * d_conductivity
else:
flu.fgs = 0.0
class Calc_FQS_V1(modeltools.Method):
r"""Calculate the quickflow.
Basic equation:
:math:`FQS = \frac{HQ}{CQ}`
Example:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> cq(10.0)
>>> states.hq = 100.0
>>> model.calc_fqs_v1()
>>> fluxes.fqs
fqs(5.0)
"""
CONTROLPARAMETERS = (
wland_control.NU,
wland_control.CQ,
)
REQUIREDSEQUENCES = (wland_states.HQ,)
RESULTSEQUENCES = (wland_fluxes.FQS,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
if con.nu:
flu.fqs = sta.hq / con.cq
else:
flu.fqs = 0.0
class Calc_RH_V1(modeltools.Method):
r"""Calculate the runoff height.
Basic equation (discontinuous):
:math:`RH = CS \cdot \left( \frac{max(HS-HSMin, 0)}{CD-HSMin} \right) ^ {XS}`
Examples:
>>> from hydpy.models.wland import *
>>> simulationstep('12h')
>>> parameterstep('1d')
>>> cs(2.0)
>>> cd(5.0)
>>> hsmin(2.0)
>>> xs(2.0)
>>> from hydpy import UnitTest
>>> test = UnitTest(
... model=model,
... method=model.calc_rh_v1,
... last_example=11,
... parseqs=(states.hs, fluxes.rh)
... )
>>> test.nexts.hs = 0.0, 1.0, 1.9, 2.0, 2.1, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0
Without smoothing:
>>> sh(0.0)
>>> derived.rh2.update()
>>> test()
| ex. | hs | rh |
------------------------
| 1 | 0.0 | 0.0 |
| 2 | 1.0 | 0.0 |
| 3 | 1.9 | 0.0 |
| 4 | 2.0 | 0.0 |
| 5 | 2.1 | 0.001111 |
| 6 | 3.0 | 0.111111 |
| 7 | 4.0 | 0.444444 |
| 8 | 5.0 | 1.0 |
| 9 | 6.0 | 1.777778 |
| 10 | 7.0 | 2.777778 |
| 11 | 8.0 | 4.0 |
With smoothing:
>>> sh(0.1)
>>> derived.rh2.update()
>>> test()
| ex. | hs | rh |
------------------------
| 1 | 0.0 | 0.0 |
| 2 | 1.0 | 0.0 |
| 3 | 1.9 | 0.000011 |
| 4 | 2.0 | 0.000187 |
| 5 | 2.1 | 0.001344 |
| 6 | 3.0 | 0.111111 |
| 7 | 4.0 | 0.444444 |
| 8 | 5.0 | 1.0 |
| 9 | 6.0 | 1.777778 |
| 10 | 7.0 | 2.777778 |
| 11 | 8.0 | 4.0 |
"""
CONTROLPARAMETERS = (
wland_control.CS,
wland_control.CD,
wland_control.HSMin,
wland_control.XS,
)
DERIVEDPARAMETERS = (wland_derived.RH2,)
REQUIREDSEQUENCES = (wland_states.HS,)
RESULTSEQUENCES = (wland_fluxes.RH,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
sta = model.sequences.states.fastaccess
d_hs = smoothutils.smooth_logistic2(sta.hs - con.hsmin, der.rh2)
flu.rh = con.cs * (d_hs / (con.cd - con.hsmin)) ** con.xs
class Update_IC_V1(modeltools.Method):
r"""Update the interception storage.
Basic equation:
:math:`\frac{dIC}{dt} = PC - TF - EI`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.pc = 2.0
>>> fluxes.tf = 1.0
>>> fluxes.ei = 3.0
>>> states.ic.old = 4.0
>>> model.update_ic_v1()
>>> states.ic
ic(2.0)
"""
CONTROLPARAMETERS = (wland_control.NU,)
REQUIREDSEQUENCES = (
wland_fluxes.PC,
wland_fluxes.TF,
wland_fluxes.EI,
)
UPDATEDSEQUENCES = (wland_states.IC,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
for k in range(con.nu):
new.ic[k] = old.ic[k] + (flu.pc - flu.tf[k] - flu.ei[k])
class Update_SP_V1(modeltools.Method):
r"""Update the storage deficit.
Basic equation:
:math:`\frac{dSP}{dt} = SF - AM`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> nu(1)
>>> fluxes.sf = 1.0
>>> fluxes.am = 2.0
>>> states.sp.old = 3.0
>>> model.update_sp_v1()
>>> states.sp
sp(2.0)
"""
CONTROLPARAMETERS = (wland_control.NU,)
REQUIREDSEQUENCES = (
wland_fluxes.SF,
wland_fluxes.AM,
)
UPDATEDSEQUENCES = (wland_states.SP,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
con = model.parameters.control.fastaccess
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
for k in range(con.nu):
new.sp[k] = old.sp[k] + (flu.sf[k] - flu.am[k])
class Update_DV_V1(modeltools.Method):
r"""Update the storage deficit of the vadose zone.
Basic equation:
:math:`\frac{dDV}{dt} = -(FXG + PV - ETV - FGS)`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> fluxes.fxg = 1.0
>>> fluxes.pv = 2.0
>>> fluxes.etv = 3.0
>>> fluxes.fgs = 4.0
>>> states.dv.old = 5.0
>>> model.update_dv_v1()
>>> states.dv
dv(9.0)
"""
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (
wland_fluxes.FXG,
wland_fluxes.PV,
wland_fluxes.ETV,
wland_fluxes.FGS,
)
UPDATEDSEQUENCES = (wland_states.DV,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
if der.nug:
new.dv = old.dv - (flu.fxg + flu.pv - flu.etv - flu.fgs)
else:
new.dv = modelutils.nan
class Update_DG_V1(modeltools.Method):
r"""Update the groundwater depth.
Basic equation:
:math:`\frac{dDG}{dt} = CDG`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> states.dg.old = 2.0
>>> fluxes.cdg = 3.0
>>> model.update_dg_v1()
>>> states.dg
dg(5.0)
"""
DERIVEDPARAMETERS = (wland_derived.NUG,)
REQUIREDSEQUENCES = (wland_fluxes.CDG,)
UPDATEDSEQUENCES = (wland_states.DG,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
if der.nug:
new.dg = old.dg + flu.cdg
else:
new.dg = modelutils.nan
class Update_HQ_V1(modeltools.Method):
r"""Update the level of the quickflow reservoir.
Basic equation:
:math:`\frac{dHQ}{dt} = PQ - FQS`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> states.hq.old = 2.0
>>> fluxes.pq = 3.0
>>> fluxes.fqs = 4.0
>>> model.update_hq_v1()
>>> states.hq
hq(1.0)
"""
REQUIREDSEQUENCES = (
wland_fluxes.PQ,
wland_fluxes.FQS,
)
UPDATEDSEQUENCES = (wland_states.HQ,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
new.hq = old.hq + (flu.pq - flu.fqs)
class Update_HS_V1(modeltools.Method):
r"""Update the surface water level.
Basic equation:
:math:`\frac{dHS}{dt} = PS - ETS + FXS
+ \frac{ALR \cdot \left(AGR \cdot FGS + FQS \right) - RH}{ASR}`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.alr(0.8)
>>> derived.asr(0.2)
>>> derived.agr(1.0)
>>> states.hs.old = 2.0
>>> fluxes.fxs = 3.0
>>> fluxes.ps = 4.0
>>> fluxes.es = 5.0
>>> fluxes.fgs = 6.0
>>> fluxes.fqs = 7.0
>>> fluxes.rh = 8.0
>>> model.update_hs_v1()
>>> states.hs
hs(16.0)
"""
DERIVEDPARAMETERS = (
wland_derived.ALR,
wland_derived.ASR,
wland_derived.AGR,
)
REQUIREDSEQUENCES = (
wland_fluxes.FXS,
wland_fluxes.PS,
wland_fluxes.ES,
wland_fluxes.FGS,
wland_fluxes.FQS,
wland_fluxes.RH,
)
UPDATEDSEQUENCES = (wland_states.HS,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
old = model.sequences.states.fastaccess_old
new = model.sequences.states.fastaccess_new
new.hs = (
old.hs
+ (flu.fxs + flu.ps - flu.es)
- flu.rh / der.asr
+ der.alr / der.asr * flu.fqs
+ (der.alr * der.agr) / der.asr * flu.fgs
)
class Calc_R_V1(modeltools.Method):
r"""Calculate the runoff in m³/s.
Basic equation:
:math:`R = QF \cdot RH`
Example:
>>> from hydpy.models.wland import *
>>> parameterstep()
>>> derived.qf(2.0)
>>> fluxes.rh = 3.0
>>> model.calc_r_v1()
>>> fluxes.r
r(6.0)
"""
DERIVEDPARAMETERS = (wland_derived.QF,)
REQUIREDSEQUENCES = (wland_fluxes.RH,)
RESULTSEQUENCES = (wland_fluxes.R,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
der = model.parameters.derived.fastaccess
flu = model.sequences.fluxes.fastaccess
flu.r = der.qf * flu.rh
class Pass_R_V1(modeltools.Method):
r"""Update the outlet link sequence.
Basic equation:
:math:`Q = R`
"""
REQUIREDSEQUENCES = (wland_fluxes.R,)
RESULTSEQUENCES = (wland_outlets.Q,)
@staticmethod
def __call__(model: modeltools.Model) -> None:
flu = model.sequences.fluxes.fastaccess
out = model.sequences.outlets.fastaccess
out.q[0] += flu.r
class PegasusDGEq(roottools.Pegasus):
"""Pegasus iterator for finding the equilibrium groundwater depth."""
METHODS = (Return_ErrorDV_V1,)
class QuadDVEq_V1(quadtools.Quad):
"""Adaptive quadrature method for integrating the equilibrium storage deficit
of the vadose zone."""
METHODS = (Return_DVH_V1,)
class QuadDVEq_V2(quadtools.Quad):
"""Adaptive quadrature method for integrating the equilibrium storage deficit
of the vadose zone."""
METHODS = (Return_DVH_V2,)
class Model(modeltools.ELSModel):
"""The *HydPy-W-Land* model."""
SOLVERPARAMETERS = (
wland_solver.AbsErrorMax,
wland_solver.RelErrorMax,
wland_solver.RelDTMin,
wland_solver.RelDTMax,
)
SOLVERSEQUENCES = ()
INLET_METHODS = (
Calc_FR_V1,
Calc_PM_V1,
)
RECEIVER_METHODS = ()
ADD_METHODS = (
Return_ErrorDV_V1,
Return_DVH_V1,
Return_DVH_V2,
)
PART_ODE_METHODS = (
Calc_FXS_V1,
Calc_FXG_V1,
Calc_PC_V1,
Calc_PETL_V1,
Calc_PES_V1,
Calc_TF_V1,
Calc_EI_V1,
Calc_SF_V1,
Calc_RF_V1,
Calc_AM_V1,
Calc_PS_V1,
Calc_W_V1,
Calc_PV_V1,
Calc_PQ_V1,
Calc_Beta_V1,
Calc_ETV_V1,
Calc_ES_V1,
Calc_FQS_V1,
Calc_FGS_V1,
Calc_RH_V1,
Calc_DVEq_V1,
Calc_DVEq_V2,
Calc_DVEq_V3,
Calc_DVEq_V4,
Calc_DGEq_V1,
Calc_GF_V1,
Calc_CDG_V1,
Calc_CDG_V2,
)
FULL_ODE_METHODS = (
Update_IC_V1,
Update_SP_V1,
Update_DV_V1,
Update_DG_V1,
Update_HQ_V1,
Update_HS_V1,
)
OUTLET_METHODS = (
Calc_ET_V1,
Calc_R_V1,
Pass_R_V1,
)
SENDER_METHODS = ()
SUBMODELS = (
PegasusDGEq,
QuadDVEq_V1,
QuadDVEq_V2,
) | PypiClean |
/BxModels-0.4.0-py3-none-any.whl/bxmodels/graph/gcn.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit as jit
import bxtorch.nn as xnn
class GCNConfig(xnn.Config):
"""
The GCN config can be used to customize the GCN.
"""
# The number of features attached to each node.
num_features: int
# The number of classes to use for a classification task.
num_classes: int
# The size(s) of the hidden layer(s).
hidden_layers: list = []
# The dropout rate to use in between any layers (apart from the output layer).
dropout: float = 0.5
# Whether to use ReLU acitvation after every layer (apart from the output layer).
use_relu: bool = True
class GCN(xnn.Configurable, xnn.Estimator, nn.Module):
"""
Implementation of the graph convolutional network as presented in
"Semi-Supervised Classification with Graph Convolutional Networks" (Kipf & Welling, 2017).
"""
__config__ = GCNConfig
__engine__ = xnn.SupervisedEngine
# MARK: Initialization
def __init__(self, config):
super().__init__(config)
convs = []
sizes = [self.num_features] + self.hidden_layers + [self.num_classes]
for in_size, out_size in zip(sizes, sizes[1:]):
convs.append(GraphConvolution(in_size, out_size))
self.convolutions = nn.ModuleList(convs)
# MARK: Instance Methods
# pylint: disable=arguments-differ
def forward(self, A, X):
"""
Computes the forward pass of the GCN.
Parameters:
-----------
- A: torch.sparse.FloatTensor [N, N]
The normalized adjacency matrix of the graph with N nodes.
- X: torch.FloatTensor [N, D]
The normalized feature matrix where each node has D features.
Returns:
--------
- torch.FloatTensor [N, C]
Class probabilities for all C classes for each node of the graph.
"""
H = self.convolutions[0](A, X)
for conv in self.convolutions[1:]:
if self.use_relu:
H = F.relu(H)
H = F.dropout(H, self.dropout, training=self.training)
H = conv(A, H)
if self.num_classes == 1:
return torch.sigmoid(H).view(-1)
return F.softmax(H, dim=-1)
class GraphConvolution(nn.Module):
"""
Graph convolution layer for the graph convolutional network.
"""
use_bias: jit.Final[bool]
# MARK: Initialization
def __init__(self, in_dim, out_dim, use_bias=True):
"""
Initializes a new graph convolution layer.
Parameters:
-----------
- in_dim: int
The dimension of the inputs.
- out_dim: int
The dimension of the outputs.
"""
super().__init__()
self.use_bias = use_bias
self.weight = nn.Parameter(torch.FloatTensor(in_dim, out_dim))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(out_dim))
self.reset_parameters()
# MARK: Instance Methods
def reset_parameters(self):
"""
Resets the parameters of the model's weights.
"""
std = 1 / math.sqrt(self.weight.size(1))
nn.init.uniform_(self.weight, -std, std)
if self.use_bias:
nn.init.uniform_(self.bias, -std, std)
# pylint: disable=arguments-differ
def forward(self, A, H):
"""
Computes the forward pass of the graph convolution layer.
Parameters:
-----------
- A: torch.sparse.Tensor [N, N]
The normalized adjacency matrix of the graph with N nodes.
- H: torch.Tensor [N, K]
The hidden node representations of the preceding layer with K
dimensions.
Returns:
--------
- torch.Tensor [N, D]
The hidden representation of this layer with D dimensions.
"""
H = H.matmul(self.weight)
H = torch.spmm(A, H)
if self.use_bias:
return H + self.bias
return H | PypiClean |
/KeralaPyApiV2-2.0.2020.tar.gz/KeralaPyApiV2-2.0.2020/pyrogram/client/types/user_and_chats/chat_photo.py |
from struct import pack
import pyrogram
from pyrogram.api import types
from pyrogram.client.ext import utils
from ..object import Object
from ...ext.utils import encode_file_id
class ChatPhoto(Object):
"""A chat photo.
Parameters:
small_file_id (``str``):
File identifier of small (160x160) chat photo.
This file_id can be used only for photo download and only for as long as the photo is not changed.
big_file_id (``str``):
File identifier of big (640x640) chat photo.
This file_id can be used only for photo download and only for as long as the photo is not changed.
"""
def __init__(
self,
*,
client: "pyrogram.BaseClient" = None,
small_file_id: str,
big_file_id: str
):
super().__init__(client)
self.small_file_id = small_file_id
self.big_file_id = big_file_id
@staticmethod
def _parse(client, chat_photo: types.UserProfilePhoto or types.ChatPhoto, peer_id: int, peer_access_hash: int):
if not isinstance(chat_photo, (types.UserProfilePhoto, types.ChatPhoto)):
return None
if not peer_access_hash:
return None
photo_id = getattr(chat_photo, "photo_id", 0)
loc_small = chat_photo.photo_small
loc_big = chat_photo.photo_big
peer_type = utils.get_peer_type(peer_id)
if peer_type == "user":
x = 0
elif peer_type == "chat":
x = -1
else:
peer_id += 1000727379968
x = -234
return ChatPhoto(
small_file_id=encode_file_id(
pack(
"<iiqqqiiiqi",
1, chat_photo.dc_id, photo_id,
0, loc_small.volume_id,
2, peer_id, x, peer_access_hash, loc_small.local_id
)
),
big_file_id=encode_file_id(
pack(
"<iiqqqiiiqi",
1, chat_photo.dc_id, photo_id,
0, loc_big.volume_id,
3, peer_id, x, peer_access_hash, loc_big.local_id
)
),
client=client
) | PypiClean |
/FuzzyClassificator-1.3.84-py3-none-any.whl/pybrain/structure/evolvables/cheaplycopiable.py | __author__ = 'Tom Schaul, [email protected]'
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.modules.module import Module
class CheaplyCopiable(ParameterContainer, Module):
""" a shallow version of a module, that it only copies/mutates the params, not the structure. """
def __init__(self, module):
self.__stored = module
self._params = module.params.copy()
self.paramdim = module.paramdim
self.name = module.name+'-COPY'
self.indim = module.indim
self.outdim = module.outdim
def copy(self):
self.__stored._params[:] = self._params
cp = CheaplyCopiable(self.__stored)
return cp
def convertToFastNetwork(self):
self.__stored._params[:] = self._params
cp = CheaplyCopiable(self.__stored.convertToFastNetwork())
return cp
@property
def derivs(self):
return self.__stored.derivs
@property
def _derivs(self):
return self.__stored.derivs
@property
def outputbuffer(self):
return self.__stored.outputbuffer
@property
def inputerror(self):
return self.__stored.inputerror
def reset(self):
self.__stored.reset()
def _resetBuffers(self):
self.__stored._resetBuffers()
def forward(self, *args, **kwargs):
self.__stored._params[:] = self._params
return self.__stored.forward(*args, **kwargs)
def backward(self, *args, **kwargs):
self.__stored._params[:] = self._params
return self.__stored.backward(*args, **kwargs)
def activate(self, *args, **kwargs):
self.__stored._params[:] = self._params
return self.__stored.activate(*args, **kwargs)
def backActivate(self, *args, **kwargs):
self.__stored._params[:] = self._params
return self.__stored.backActivate(*args, **kwargs)
def randomize(self, *args, **kwargs):
ParameterContainer.randomize(self, *args, **kwargs)
self.__stored._params[:] = self._params
def mutate(self, *args, **kwargs):
ParameterContainer.mutate(self, *args, **kwargs)
self.__stored._params[:] = self._params
def getBase(self):
self.__stored._params[:] = self._params
return self.__stored
def resetDerivatives(self):
self.__stored.resetDerivatives() | PypiClean |
/NSI_Bertrand-0.5.1.tar.gz/NSI_Bertrand-0.5.1/NSI_Bertrand/tri/tri.py | __all__ = [
"est_triee",
"bulles",
"insertion",
"selection_min",
"fusion",
"pivot",
"comptage",
]
def est_triee(l):
""" Vérifie si un liste est triée
:param l: une liste de nombre
:return: True si la liste est triée, False sinon
:example:
>>> est_triee([1, 2, 5, 6])
True
>>> est_triee([1, 2, 6, 5])
False
"""
for i in range(len(l) - 1):
if l[i] > l[i + 1]:
return False
return True
def bulles(l):
""" Trie la liste avec la méthode du tri à bulles
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> bulles([1, 2, 6, 5])
[1, 2, 5, 6]
"""
tab = l[:]
n = len(tab)
for i in range(n):
for j in range(n - i - 1):
if tab[j] > tab[j + 1]:
tab[j], tab[j + 1] = tab[j + 1], tab[j]
return tab
def _insert(elem, tab):
if len(tab) == 0:
return [elem]
if tab[0] > elem:
return [elem] + tab
return [tab[0]] + _insert(elem, tab[1:])
def insertion(tab):
""" Trie la liste avec la méthode du tri par insertion
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> insertion([1, 2, 6, 5])
[1, 2, 5, 6]
"""
ordered_tab = []
for i in tab:
ordered_tab = _insert(i, ordered_tab)
return ordered_tab
def _extract_min(tab):
vmin = tab[0]
head = []
stack = []
for e in tab[1:]:
if e < vmin:
head.append(vmin)
head += stack
stack = []
vmin = e
else:
stack.append(e)
return (vmin, head + stack)
def selection_min(tab):
""" Trie la liste avec la méthode du tri selection du minimum
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> selection_min([1, 2, 6, 5])
[1, 2, 5, 6]
"""
ordered = []
to_order = tab[:]
while len(to_order):
vmin, to_order = _extract_min(to_order)
ordered.append(vmin)
return ordered
def _fusion(tab1, tab2):
""" Fusion de deux listes ordonnées
>>> _fusion([1, 4], [2, 3, 6])
[1, 2, 3, 4, 6]
>>> _fusion([2, 3, 6], [1, 4])
[1, 2, 3, 4, 6]
>>> _fusion([1], [2])
[1, 2]
>>> _fusion([2], [1])
[1, 2]
"""
i = 0
j = 0
l_tab1 = len(tab1)
l_tab2 = len(tab2)
fusionned = []
while i < l_tab1 and j < l_tab2:
if tab1[i] < tab2[j]:
fusionned.append(tab1[i])
i += 1
else:
fusionned.append(tab2[j])
j += 1
if i == len(tab1):
fusionned += tab2[j:]
if j == len(tab2):
fusionned += tab1[i:]
return fusionned
def fusion(tab):
""" Trie la liste avec la méthode du tri par fusion
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> fusion([1, 2, 6, 5])
[1, 2, 5, 6]
"""
if len(tab) == 1:
return tab
middle = len(tab) // 2
ans = _fusion(fusion(tab[middle:]), fusion(tab[:middle]))
return ans
def pivot(tab):
""" Trie la liste avec la méthode du tri par pivot
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> pivot([1, 2, 6, 5])
[1, 2, 5, 6]
"""
if len(tab) <= 1:
return tab
_pivot = tab[-1]
bigger = []
smaller = []
for i in tab[:-1]:
if i > _pivot:
bigger.append(i)
else:
smaller.append(i)
return pivot(smaller) + [_pivot] + pivot(bigger)
def comptage(L):
""" Trie la liste avec la méthode du tri par comptage
:param l: Un tableau de nombre
:return: une copie de la liste triée
>>> comptage([1, 2, 6, 5])
[1, 2, 5, 6]
"""
vmin = min(L)
vmax = max(L)
hist = [0 for _ in range(vmin, vmax + 1)]
for l in L:
hist[l - vmin] += 1
ans = []
for i, v in enumerate(hist):
ans += [i+vmin] * v
return ans | PypiClean |
/D365BCAPI_GEDASB-0.0.1b1-py3-none-any.whl/D365BCAPI/sample.py | from D365BCAPI.D365BCv1API import Connect
"""
This is sample usage of D365BCv1API. Used standard Dynamics 365 Business Central platform 16.xx(17.xx) objects
API v1.0 pages.
Existing API pages can be get http://{server}:{port}/{tenant}/api/v1.0/
metadata can be get http://{server}:{port}/{tenant}/api/v1.0/$metadata
Sample flow is:
1. Looking for customer name starting by "Cronus". If do not exists - create. "Recordset read & record create" example
2. Looking for items '1996-S' '2000-S' and g/l account '6610'. Get it id - requires for sales order creation.
"Get field value" example
3. Create sales order with 4 lines: 2 item lines, g/l account line, comment line. Get created sales order documentid.
Few related "records creation" example
4. Add 2 more lines to the existing order. "Add lines to existing order" example.
5. Modify description in comment line in existing order. "Modify existing line" example
6. Delete one line from existing order. "Record delete" example
7. Execute action Microsoft.NAV.shipAndInvoice on just created sales order. "Execute action" example
8. Check is invoice created and what are Total Amount and Remaining Amount in it.
"""
user = psw = "a" # basic authentication
# customers
url_customers = "http://bs17:7048/BC/api/v1.0/customers" # page 5471
custname = 'Cronus' # begin of customer name
# create connection object: url, basic authentication, headers recommended by MS
customers = Connect(url_customers, (user, psw), {"Accept-Language": "en-us"})
# we can only find customer by begin of name
customers.filter_text = f"displayName ge '{custname}' and displayName le '{custname}Z'"
# filter is: where displayName is greater or equal to Cronus and less or equal CronusZ
response_list = customers.read() # read filtered customers
if len(response_list) > 0:
print("Read customers", response_list[0].get("displayName")) # 1st customer name
if customers.except_error is not None:
raise Exception(customers.except_error)
if len(response_list) > 0: # customer exists
custno = response_list[0].get("number") # if customer exists then get it No.
else: # create customer if not found
custno = "91000"
new_customer = {
"number": custno,
"displayName": "Cronusb Ski House",
"type": "Company",
"phoneNumber": "256 123456",
"email": "[email protected]",
"website": "cronus.co.uk",
"taxLiable": False,
"currencyCode": "EUR",
"blocked": " ",
"address": {
"street": "Paco str 2",
"city": "Vilnius",
"state": "",
"countryLetterCode": "LT",
"postalCode": "LT-25126"}
}
response_list = customers.insert(new_customer) # new customer is created
print("Sales order Customer No", custno)
# find item and itemId - it requires for sales document lines creation
url_item = "http://bs17:7048/BC/api/v1.0/items" # page 5470
item = Connect(url_item, (user, psw), {"Accept-Language": "en-us"})
item.filter_text = "number eq '1996-S'"
item_response = item.read()
item_1_id = None
if len(item_response) > 0: # item exists
item_1_id = item_response[0].get("id") # get item1 id
item.filter_text = "number eq '2000-S'" # change filter and call for another item
item_response = item.read()
item_2_id = None
if len(item_response) > 0: # customer exists
item_2_id = item_response[0].get("id") # get item2 id
# find g/l account and itemId - it requires for sales document lines
url_account = "http://bs17:7048/BC/api/v1.0/accounts" # page 5470
account = Connect(url_account, (user, psw), {"Accept-Language": "en-us"})
account.filter_text = "number eq '6610'" # g/l account no is 6610
account_response = account.read()
account_id = None
if len(account_response) > 0: # item exists
account_id = account_response[0].get("id") # get account id, will be used in order line
# create sales order
# new order dictionary NAV page 5495 and lines NAV page 5496
ext_doc_no = "FDA 17598" # Only by external document no we can find sales order,
# as for document no is used No. Series
new_order = {
"externalDocumentNumber": ext_doc_no, # this is number we'll search created document and get it No.
"orderDate": "2020-02-06", # limited by CRONUS demo license date range
"customerNumber": custno, # customer number taken/created in previous step
"currencyCode": "EUR",
"pricesIncludeTax": False,
"salesperson": "PS",
"requestedDeliveryDate": "2020-02-15",
"status": "Draft",
"phoneNumber": "370 698 13123",
"email": "[email protected]",
"shippingPostalAddress": { # Changing shipping address just for fun
"street": "Another Str. 77",
"city": "Hamburg",
"state": "",
"countryLetterCode": "DE",
"postalCode": "DE-20097"
},
"salesOrderLines": [ # Navigation property to "Collection(Microsoft.NAV.salesOrderLine)"
{
"sequence": "10000", # mandatory line number
"lineType": "Item",
# line type (Comment, Accounts, Item, Resource, Fixed Asset, Charge) can be found at table 5476 >
# field(9029; "API Type"; Enum "Invoice Line Agg. Line Type")
"itemId": item_1_id, # mandatory item_Id (or blank if account_id is used)
"description": "Customized item description in line",
"quantity": 2.0,
"discountPercent": 5
},
{
"sequence": "20000", # 2nd line
"lineType": "Item",
"itemId": item_2_id,
"quantity": 1.0
},
{
"sequence": "30000", # 3rd line comments
"lineType": "Comment",
"description": "This is Comments line"
},
{
"sequence": "40000", # 4th line g/l account
"lineType": "Account",
"accountId": account_id, # mandatory account id
"quantity": 1.0,
"unitPrice": 100
}
]
}
url_so = "http://bs17:7048/BC/api/v1.0/salesOrders" # NAV page 5495
so = Connect(url_so, (user, psw), {"Accept-Language": "en-us"}) # create sales order header object
so.filter_text = f"externalDocumentNumber eq '{ext_doc_no}'"
response_list = so.read() # looking for Sales Order with known external doc no
if len(response_list) > 0: # order exists and we take order id
so_number = response_list[0].get("number") # get order No. just for fun - it is not used in further actions
so_id = response_list[0].get("id") # SO id we need later for lines edit
else: # no order with specified external document No. exists
response_list = so.insert(new_order) # create new order with specified external doc no
if (len(response_list) > 0) and so.except_error is None:
print("Sales order is created", response_list) # [201, 'Created'] if everything is OK
else:
raise Exception(so.except_error)
so.filter_text = f"externalDocumentNumber eq '{ext_doc_no}'"
response_list = so.read() # looking for Sales Order with known external doc no
if len(response_list) > 0:
so_number = response_list[0].get("number") # get just created order No. - later will use it to find invoice
so_id = response_list[0].get("id") # SO id we need later for lines edit
print("SO No", so_number)
# created order lines management
# we need sales order document_id to add it to endpoint url for record editing
if len(so_id) > 0: # if doc id exists then we go to read lines of this doc
url_sol = f"http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/salesOrderLines"
else:
raise Exception('Critical error - Can not find document')
sol = Connect(url_sol, (user, psw), {"Accept-Language": "en-us"}) # new connection to lines
response_list = sol.read() # read all lines just for fun as we need only few lines for editing
if (len(response_list) > 0) and sol.except_error is None:
print(f"SO has {len(response_list)} lines") # number of lines in the document
else:
raise Exception(sol.except_error)
# add new line in order
line_no = 35000 # line No or sequence
line2_no = 37500 # line No or sequence
line_insert = {
"sequence": line_no, # after 3rd line
"lineType": "Item",
"itemId": item_2_id,
"quantity": 3.0
}
response_list = sol.insert(line_insert) # insert line
if (len(response_list) > 0) and sol.except_error is None:
print("Added line 35000: Item - 1996-S", response_list) # [201, 'Created'] if everything is OK
else:
raise Exception(sol.except_error)
# add one more line and later delete it
line_insert = {
"sequence": line2_no, # after 3rd line
"lineType": "Item",
"itemId": item_1_id,
"quantity": 1.0
}
response_list = sol.insert(line_insert) # insert fake line
if (len(response_list) > 0) and sol.except_error is None:
print("Added line 37500: Item - '2000-S'", response_list) # [201, 'Created'] if everything is OK
else:
raise Exception(sol.except_error)
# count lines
sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/salesOrderLines"
response_list = sol.read() # read all lines just for fun
if (len(response_list) > 0) and sol.except_error is None:
print(f"SO has {len(response_list)} lines after added 2") # number of lines in the document
else:
raise Exception(sol.except_error)
# for line editing we need to have line id, find it by search line with line no 30000
line_no = 30000
sol.filter_text = f"sequence eq {line_no}" # add filter ?$filter=sequence eq 30000
response_list = sol.read() # get line from document; line No is 30000
if (len(response_list) > 0) and sol.except_error is None:
line_id = response_list[0].get('id') # get line id
print(f"Line id is {line_id}")
print("description before update is", response_list[0].get("description"))
else:
raise Exception(sol.except_error)
# modify exiting line: it is line no 30000
line_update = {"description": "This is updated Comments line"} # new info to update line
sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrderLines('{line_id}')" # adding line_id to url
sol.filter_text = ''
# for beta api document key was document id and sequence
# sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrderLines({so_id},{line_no})"
response_list = sol.modify(line_update) # update line in parameters new info dic
if (len(response_list) > 0) and sol.except_error is None:
print("Modified line 30000 description now is 'This is updated Comments line'", response_list)
else:
raise Exception(sol.except_error)
# delete line
line_no = 37500 # line No (sequence in response json)
# in API beta order line url includes document id and line no (line primary key)
# sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrderLines({so_id},{line_no})"
##
# in API v1.0 we need to find sales line id and identify it by id
# we looking for line with sequence 37500
sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/salesOrderLines"
sol.filter_text = f"sequence eq {line_no}" # add filter ?$filter=sequence eq 37500
response_list = sol.read() # get line with filtered sequence 37500
if (len(response_list) > 0) and sol.except_error is None:
line_id = response_list[0].get('id')
print(f"Line 3750 id is {line_id}")
else:
raise Exception(sol.except_error)
sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrderLines('{line_id}')" # adding line_id to URL
sol.filter_text = '' # remove any filters
response_list = sol.delete() # update line in parameters new info dic
if (len(response_list) > 0) and sol.except_error is None:
print("Deleted fake line 37500", response_list)
else:
raise Exception(sol.except_error)
# count lines
sol.url = f"http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/salesOrderLines"
response_list = sol.read() # read all lines just for fun
print(f"SO has {len(response_list)} lines after deleted one") # number of lines in the document
# execute action - order ship and invoice
# http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/Microsoft.NAV.shipAndInvoice
if len(so_id) > 0: # if doc id not blank (we found it earlier)then we can ship and invoice it
so.url = f"http://bs17:7048/BC/api/v1.0/salesOrders({so_id})/Microsoft.NAV.shipAndInvoice" # create URL
else:
raise Exception('Critical error - Can not find document')
response_list = so.exe() # execute sales order action "ship and invoice"
if (len(response_list) > 0) and so.except_error is None:
print("Sales order is shipped and invoiced; response is ", response_list)
else:
raise Exception(so.except_error)
# find just created invoice by "orderNumber"= so_number
url_salesInvoices = "http://bs17:7048/BC/api/v1.0/salesInvoices"
si = Connect(url_salesInvoices, (user, psw), {"Accept-Language": "en-us"})
so_number = '1001'
si.filter_text = f"orderNumber eq '{so_number}'"
response_list = si.read()
if (len(response_list) > 0) and si.except_error is None:
si_number = response_list[0].get('number')
si_totalAmount = response_list[0].get('totalAmountIncludingTax')
si_remainingAmount = response_list[0].get('remainingAmount')
print(f'Sales Invoice {si_number} is created \n Total Amount {si_totalAmount} \n '
f'Remaining Amount {si_remainingAmount}')
else:
raise Exception(si.except_error)
# Response from execution sample is
# Read customers Cronus Cardoxy Procurement
# Sales order Customer No IC1030
# Sales order is created [201, 'Created']
# SO No 1004
# SO has 4 lines
# Added line 35000: Item - 1996-S [201, 'Created']
# Added line 37500: Item - '2000-S' [201, 'Created']
# SO has 6 lines after added 2
# Line id is 2043b1e7-811e-eb11-b334-ef426b246304-30000
# description before update is This is Comments line
# Modified line 30000 description now is 'This is updated Comments line' [200, 'OK']
# Line 3750 id is 2043b1e7-811e-eb11-b334-ef426b246304-37500
# Deleted fake line 37500 [204, 'No Content']
# SO has 5 lines after deleted one
# Sales order is shipped and invoiced; response is [204, 'No Content']
# Sales Invoice 103032 is created
# Total Amount 3531.3
# Remaining Amount 3531.3 | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/forms/models/conditionChoiceModel.es6.js | Djblets.Forms.ConditionChoice = Backbone.Model.extend({
defaults: {
name: null,
valueField: null
},
/**
* Initialize the choice.
*
* Attributes:
* operators (Array):
* The list of operators to populate the operators collection.
*/
initialize(attributes) {
this.operators = new Backbone.Collection(attributes.operators, {
model: Djblets.Forms.ConditionOperator,
parse: true
});
},
/**
* Create the value field for the choice.
*
* This will construct a new instance of the view used to take values for
* this choice.
*
* Args:
* fieldName (string):
* The name for the form field.
*
* Returns:
* Djblets.Forms.BaseConditionValueFieldView:
* The view for the field.
*/
createValueField(fieldName) {
const valueField = this.get('valueField');
return new valueField.viewClass(_.defaults({
model: new valueField.modelClass(_.defaults({
fieldName: fieldName
}, valueField.modelData))
}, valueField.viewData));
},
/**
* Parse the attribute data passed to the model.
*
* Args:
* data (object):
* The attribute data passed to the model.
*
* Returns:
* object:
* The parsed attributes.
*/
parse(data) {
return {
id: data.id,
name: data.name,
valueField: Djblets.Forms.ConditionChoice.parseValueFieldData(
data.valueField)
};
}
}, {
/**
* Parse value field data into a standard structure.
*
* This can be used by any choice-related class that needs to deal with
* value fields. It's intended for internal use only.
*
* Args:
* data (object):
* The value field data to parse.
*
* Returns:
* dict:
* The resulting value field information, or ``null`` if the data
* provided is ``undefined``.
*/
parseValueFieldData(data) {
let valueField = null;
if (data !== undefined) {
const fieldModelInfo = data.model;
const fieldViewInfo = data.view;
valueField = {
modelClass: Djblets.getObjectByName(fieldModelInfo.className),
modelData: fieldModelInfo.data,
viewClass: Djblets.getObjectByName(fieldViewInfo.className),
viewData: fieldViewInfo.data
};
}
return valueField;
}
}); | PypiClean |
/IPChecker-1.2.tar.gz/IPChecker-1.2/README.txt | ##################################
Program: IPChecker
Author: Boumediene Kaddour
Country: Algeria
##################################
IPChecker is a tiny Python library that is used to check if an IP version address is Private, Public or Invalid,
The library returns Booleans and contains a couple of methods summurized as follows:
isValid(): This method returns True, if a valid IPv4's given, otherwise, it returns False.
isPrivate(): This little method returns True if the given IP
is private, otherwise, False is returned.
isMCast(): This little method returns True, if a Valid IPv4 is given and it's among the multicast ip range, otherwise, it returns a False.
isPublic(): This little method returns True if the given IP is a Valid IPv4, not private and not multicast address, otherwise, False is returned.
Usage:
>>> from IPChecker import IPChecker
>>> obj = IPChecker("172.16.122.254")
>>> obj.isValid()
>>> True
>>> obj.isPrivate()
>>> True
>>> obj.isPublic()
>>> False
Here is an example of how you can checkout for a valid IPv4 address using regular expressions
in this example, i'm going to be using the built-in re module in python
>>> def isValid(ip):
>>> if findall( "(?i)^(\d|\d\d|1[0-9][0-9]|2[0-5][0-5]).(\d|\d\d|1[0-9][0-9]|2[0-5][0-5]).(\d|\d\d|1[0-9][0-9]|2[0-5][0-5]).(\d|\d\d|1[0-9][0-9]|2[0-5][0-5])$" ,ip):
>>> return True
>>> else:
>>> return False
| PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/cogs/downloader/errors.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from .repo_manager import Candidate
__all__ = [
"DownloaderException",
"GitException",
"InvalidRepoName",
"CopyingError",
"ExistingGitRepo",
"MissingGitRepo",
"CloningError",
"CurrentHashError",
"HardResetError",
"UpdateError",
"GitDiffError",
"NoRemoteURL",
"UnknownRevision",
"AmbiguousRevision",
"PipError",
]
class DownloaderException(Exception):
"""
Base class for Downloader exceptions.
"""
pass
class GitException(DownloaderException):
"""
Generic class for git exceptions.
"""
def __init__(self, message: str, git_command: str) -> None:
self.git_command = git_command
super().__init__(f"Git command failed: {git_command}\nError message: {message}")
class InvalidRepoName(DownloaderException):
"""
Throw when a repo name is invalid. Check
the message for a more detailed reason.
"""
pass
class CopyingError(DownloaderException):
"""
Throw when there was an issue
during copying of module's files.
"""
pass
class ExistingGitRepo(DownloaderException):
"""
Thrown when trying to clone into a folder where a
git repo already exists.
"""
pass
class MissingGitRepo(DownloaderException):
"""
Thrown when a git repo is expected to exist but
does not.
"""
pass
class CloningError(GitException):
"""
Thrown when git clone returns a non zero exit code.
"""
pass
class CurrentHashError(GitException):
"""
Thrown when git returns a non zero exit code attempting
to determine the current commit hash.
"""
pass
class HardResetError(GitException):
"""
Thrown when there is an issue trying to execute a hard reset
(usually prior to a repo update).
"""
pass
class UpdateError(GitException):
"""
Thrown when git pull returns a non zero error code.
"""
pass
class GitDiffError(GitException):
"""
Thrown when a git diff fails.
"""
pass
class NoRemoteURL(GitException):
"""
Thrown when no remote URL exists for a repo.
"""
pass
class UnknownRevision(GitException):
"""
Thrown when specified revision cannot be found.
"""
pass
class AmbiguousRevision(GitException):
"""
Thrown when specified revision is ambiguous.
"""
def __init__(self, message: str, git_command: str, candidates: List[Candidate]) -> None:
super().__init__(message, git_command)
self.candidates = candidates
class PipError(DownloaderException):
"""
Thrown when pip returns a non-zero return code.
"""
pass | PypiClean |
/Draugr-1.0.9.tar.gz/Draugr-1.0.9/draugr/opencv_utilities/color_space/threshold.py | from enum import Enum
from typing import MutableMapping
import cv2
import numpy
from sorcery import assigned_names
__all__ = ["threshold_channel", "ThresholdMethodEnum", "hsv_min_max_clip_mask"]
from draugr.opencv_utilities.namespaces.flags import ThresholdTypeFlag
from draugr.opencv_utilities.namespaces.color_conversion_enum import ColorConversionEnum
class ThresholdMethodEnum(Enum):
simple, adaptive = assigned_names()
def threshold_channel(
gray: numpy.ndarray,
method: ThresholdMethodEnum = ThresholdMethodEnum.simple,
**kwargs: MutableMapping
) -> numpy.ndarray:
"""
:param gray:
:type gray:
:param method:
:type method:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
method = ThresholdMethodEnum(method)
if method == ThresholdMethodEnum.simple:
return cv2.threshold(
gray,
thresh=kwargs.get("thresh", 120),
maxval=kwargs.get("maxval", 255),
type=ThresholdTypeFlag.otsu.value
+ ThresholdTypeFlag.binary.value, # +ThresholdTypeFlag.to_zero.value,
)[1]
elif method == ThresholdMethodEnum.adaptive:
return cv2.adaptiveThreshold(
gray,
maxValue=kwargs.get("maxValue", 255),
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY,
blockSize=11,
C=2,
)
raise NotImplementedError
def hsv_min_max_clip_mask(
input_image: numpy.ndarray,
lower_bound: numpy.ndarray = numpy.array([0, 0, 0]),
upper_bound: numpy.ndarray = numpy.array(
[179, 255, 255]
), # Hue is from 0-179 for Opencv
) -> numpy.ndarray:
"""
:param input_image:
:type input_image:
:param lower_bound:
:type lower_bound:
:param upper_bound:
:type upper_bound:
:return:
:rtype:
"""
return cv2.inRange(
cv2.cvtColor(input_image, ColorConversionEnum.bgr2hsv.value),
lowerb=lower_bound,
upperb=upper_bound,
) | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/exports/glances_mongodb.py | import sys
from glances.logger import logger
from glances.exports.glances_export import GlancesExport
import pymongo
from urllib.parse import quote_plus
class Export(GlancesExport):
"""This class manages the MongoDB export module."""
def __init__(self, config=None, args=None):
"""Init the MongoDB export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optional configuration keys
self.user = None
self.password = None
# Load the Cassandra configuration file section
self.export_enable = self.load_conf('mongodb', mandatories=['host', 'port', 'db'], options=['user', 'password'])
if not self.export_enable:
sys.exit(2)
# Init the CouchDB client
self.client = self.init()
def init(self):
"""Init the connection to the CouchDB server."""
if not self.export_enable:
return None
server_uri = 'mongodb://%s:%s@%s:%s' % (quote_plus(self.user), quote_plus(self.password), self.host, self.port)
try:
client = pymongo.MongoClient(server_uri)
client.admin.command('ping')
except Exception as e:
logger.critical("Cannot connect to MongoDB server %s:%s (%s)" % (self.host, self.port, e))
sys.exit(2)
else:
logger.info("Connected to the MongoDB server")
return client
def database(self):
"""Return the CouchDB database object"""
return self.client[self.db]
def export(self, name, columns, points):
"""Write the points to the MongoDB server."""
logger.debug("Export {} stats to MongoDB".format(name))
# Create DB input
data = dict(zip(columns, points))
# Write data to the MongoDB database
try:
self.database()[name].insert_one(data)
except Exception as e:
logger.error("Cannot export {} stats to MongoDB ({})".format(name, e)) | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Subst.py | import collections
import re
from inspect import signature, Parameter
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature,
]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = [_f for _f in excepts if _f]
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError(target[0], msg)
else:
raise SCons.Errors.UserError(msg)
class Literal:
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
def __eq__(self, other):
if not isinstance(other, Literal):
return False
return self.lstr == other.lstr
def __neq__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.lstr)
class SpecialAttrWrapper:
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(collections.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
collections.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(mylist, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return list(map(escape, mylist))
class NLWrapper:
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
mylist = self.list
if mylist is None:
mylist = []
elif not is_Sequence(mylist):
mylist = [mylist]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(list(map(self.func, mylist)))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source:
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError("NodeList has no attribute: %s" % attr)
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
_callable_args_set = {'target', 'source', 'env', 'for_signature'}
class StringSubber:
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
# In this case keep the double $'s which we'll later
# swap for a single dollar sign as we need to retain
# this information to properly avoid matching "$("" when
# the actual text was "$$("" (or "$)"" when "$$)"" )
return '$$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or '.' in key:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], old_s)
elif s is None:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
# SCons has the unusual Null class where any __getattr__ call returns it's self,
# which does not work the signature module, and the Null class returns an empty
# string if called on, so we make an exception in this condition for Null class
# Also allow callables where the only non default valued args match the expected defaults
# this should also allow functools.partial's to work.
if isinstance(s, SCons.Util.Null) or {k for k, v in signature(s).parameters.items() if
k in _callable_args_set or v.default == Parameter.empty} == _callable_args_set:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode == SUBST_SIG))
else:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
class ListSubber(collections.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
collections.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x: self.append(x)
else:
self.add_strip = lambda x: None
self.in_strip = None
self.next_line()
def expanded(self, s):
"""Determines if the string s requires further expansion.
Due to the implementation of ListSubber expand will call
itself 2 additional times for an already expanded string. This
method is used to determine if a string is already fully
expanded and if so exit the loop early to prevent these
recursive calls.
"""
if not is_String(s) or isinstance(s, CmdStringHolder):
return False
s = str(s) # in case it's a UserString
return _separate_args.findall(s) is None
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], old_s)
elif s is None:
return
# If the string is already full expanded there's no
# need to continue recursion.
if self.expanded(s):
self.append(s)
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
# SCons has the unusual Null class where any __getattr__ call returns it's self,
# which does not work the signature module, and the Null class returns an empty
# string if called on, so we make an exception in this condition for Null class
# Also allow callables where the only non default valued args match the expected defaults
# this should also allow functools.partial's to work.
if isinstance(s, SCons.Util.Null) or {k for k, v in signature(s).parameters.items() if
k in _callable_args_set or v.default == Parameter.empty} == _callable_args_set:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
else:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
collections.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
# Note the pattern below only matches $( or $) when there is no
# preceeding $. (Thus the (?<!\$))
_rm_split = re.compile(r'(?<!\$)(\$[()])')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _rm_split ]
def _rm_list(list):
return [l for l in list if l not in ('$(', '$)')]
def _remove_list(list):
result = []
depth = 0
for l in list:
if l == '$(':
depth += 1
elif l == '$)':
depth -= 1
if depth < 0:
break
elif depth == 0:
result.append(l)
if depth != 0:
return None
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if (isinstance(strSubst, str) and '$' not in strSubst) or isinstance(strSubst, CmdStringHolder):
return strSubst
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
res = result
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
if mode == SUBST_SIG:
result = _list_remove[mode](remove.split(result))
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + res)
result = ' '.join(result)
else:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
# Now replace escaped $'s currently "$$"
# This is needed because we now retain $$ instead of
# replacing them during substition to avoid
# improperly trying to escape "$$(" as being "$("
result = result.replace('$$','$')
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + str(res))
return result
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return ' '.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Jupyter_Video_Widget-0.3.1.tar.gz/Jupyter_Video_Widget-0.3.1/jpy_video/static/index.js | define(["@jupyter-widgets/base"], function(__WEBPACK_EXTERNAL_MODULE_2__) { return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId])
/******/ return installedModules[moduleId].exports;
/******/
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ exports: {},
/******/ id: moduleId,
/******/ loaded: false
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.loaded = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(0);
/******/ })
/************************************************************************/
/******/ ([
/* 0 */
/***/ (function(module, exports, __webpack_require__) {
// Entry point for the notebook bundle containing custom model definitions.
//
// Setup notebook base URL
//
// Some static assets may be required by the custom widget javascript. The base
// url for the notebook is not known at build time and is therefore computed
// dynamically.
__webpack_require__.p = document.querySelector('body').getAttribute('data-base-url') + 'nbextensions/jupyter-video/';
// Export widget models and views, and the npm package version number.
module.exports = __webpack_require__(1);
module.exports['version'] = __webpack_require__(4).version;
/***/ }),
/* 1 */
/***/ (function(module, exports, __webpack_require__) {
var widgets = __webpack_require__(2);
var _ = __webpack_require__(3);
// https://remysharp.com/2010/07/21/throttling-function-calls
// See updated version in above article's comments
function throttle(fn, threshhold, scope) {
// threshhold || (threshhold = 250);
var last, deferTimer;
return function () {
var context = scope || this;
var now = +new Date, args = arguments;
if (last && now < last + threshhold) {
// hold on to it
clearTimeout(deferTimer);
deferTimer = setTimeout(function () {
last = now;
fn.apply(context, args);
}, threshhold + last - now);
} else {
last = now;
fn.apply(context, args);
}
};
}
function zero_pad_two_digits(number) {
var size = 2;
var pretty = "00" + number;
return pretty.substr(pretty.length-size);
}
//-----------------------------------------------
// Widget models must provide default values for the model attributes that are
// different from the base class. These include at least `_model_name`, `_view_name`,
// `_model_module`, and `_view_module`. When serialiazing entire widget state for embedding,
// only values different from default will be specified.
var TimeCodeModel = widgets.HTMLModel.extend({
defaults: _.extend(_.result(this, 'widgets.HTMLModel.prototype.defaults'), {
_model_name: 'TimeCodeModel',
_model_module: 'jupyter-video',
_view_name: 'TimeCodeView',
_view_module: 'jupyter-video',
})
});
var VideoModel = widgets.DOMWidgetModel.extend({
defaults: _.extend(_.result(this, 'widgets.DOMWidgetModel.prototype.defaults'), {
_model_name: 'VideoModel',
_model_module: 'jupyter-video',
_view_name: 'VideoView',
_view_module: 'jupyter-video',
})
});
//-----------------------------------------------
// Widget View renders the model to the DOM
var TimeCodeView = widgets.HTMLView.extend({
// https://codereview.stackexchange.com/questions/49524/updating-single-view-on-change-of-a-model-in-backbone
render: function() {
this.listenTo(this.model, 'change:timecode', this.timecode_changed);
TimeCodeView.__super__.render.apply(this);
this.timecode_changed();
this.update();
return this;
},
timecode_changed: function() {
var time_base = this.model.get('timebase');
var t = this.model.get('timecode'); // current video time in seconds
var h = Math.floor((t/3600));
var m = Math.floor((t % 3600)/60);
var s = Math.floor((t % 60));
var f = Math.floor((t % 1)/time_base);
// var f = Math.round((t % 1)/time_base);
// Pretty timecode string
var time_string = zero_pad_two_digits(h) + ':' +
zero_pad_two_digits(m) + ':' +
zero_pad_two_digits(s) + ';' +
zero_pad_two_digits(f);
var html = `<p style="font-family: DejaVu Sans Mono, Consolas, Monospace;'
font-variant: normal;
font-weight: bold;
font-style: normal;
margin-left: 3pt;
margin-right: 3pt;
margin-top: 3pt;
margin-bottom: 3pt;
font-size: 11pt;
line-height: 13pt;
">${time_string}</p>`;
this.model.set('value', html);
this.touch();
},
});
//-----------------------------------------------
//-----------------------------------------------
var VideoView = widgets.DOMWidgetView.extend({
render: function() {
// This project's view is a single <video/> element.
this.video = document.createElement('video');
this.setElement(this.video);
this.video.preload = 'metadata';
this.video.autoplay = false;
this.video.controls = true;
this.src_changed();
// .listenTo() is better than .on()
// http://backbonejs.org/#Events-listenTo
// https://coderwall.com/p/fpxt4w/using-backbone-s-new-listento
this.listenTo(this.model, 'change:_method', this.invoke_method);
this.listenTo(this.model, 'change:_property', this.set_property);
this.listenTo(this.model, 'change:_play_pause', this.play_pause_changed);
this.listenTo(this.model, 'change:src', this.src_changed);
this.listenTo(this.model, 'change:current_time', this.current_time_changed);
//-------------------------------------------------
// Video element event handlers
// https://developer.mozilla.org/en-US/docs/Web/Reference/Events
// https://developer.mozilla.org/en-US/docs/Web/API/EventTarget/addEventListener
this.video.addEventListener('durationchange', this.handle_event.bind(this));
this.video.addEventListener('ended', this.handle_event.bind(this));
this.video.addEventListener('loadedmetadata', this.handle_event.bind(this));
this.video.addEventListener('pause', this.handle_event.bind(this));
this.video.addEventListener('play', this.handle_event.bind(this));
this.video.addEventListener('playing', this.handle_event.bind(this));
this.video.addEventListener('ratechange', this.handle_event.bind(this));
this.video.addEventListener('seeked', this.handle_event.bind(this));
this.video.addEventListener('seeking', this.handle_event.bind(this));
this.video.addEventListener('timeupdate', this.handle_event.bind(this));
this.video.addEventListener('volumechange', this.handle_event.bind(this));
// Special handling for play and pause events
this.enable_fast_time_update = false
this.video.addEventListener('play', this.handle_play.bind(this));
this.video.addEventListener('pause', this.handle_pause.bind(this));
// Define throttled event handlers for mouse wheel and mouse click
var dt = 10; // miliseconds
var throttled_mouse_wheel = throttle(this.handle_mouse_wheel, dt, this);
this.video.addEventListener('wheel', throttled_mouse_wheel);
var throttled_mouse_click = throttle(this.handle_mouse_click, dt, this);
this.video.addEventListener('click', throttled_mouse_click);
//-------------------------------------------------
// Handle keyboard event via containing div element.
this.video.onloadedmetadata = function(ev) {
// Parent element only knowable after DOM is rendered
var container = ev.target.closest('div.output_area');
container.tabIndex = 0
function div_focus() {
if (this.model.get('_enable_keyboard')) {
container.focus();
};
}
container.addEventListener('mouseover', div_focus.bind(this));
container.addEventListener('keydown', this.handle_keypress.bind(this));
}.bind(this);
//-------------------------------------------------
// Prevent page from scrolling with mouse wheel when hovering over video element
this.video.onwheel = function(ev) {
ev.preventDefault();
};
// Prevent context menu popup from right-click on video element
this.video.oncontextmenu = function(ev) {
ev.preventDefault();
};
return this;
},
//------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------
// Functions defined below generally are called in response to changes in the backbone model.
// Typical outcome is to make changes to some front-end components, or to make changes to other
// model components.
invoke_method: function() {
// https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement
// stamp is a timestamp generated at back end. Its used here to guarantee a unique data
// Backbone event.
var parts = this.model.get('_method');
var name = parts[0];
var stamp = parts[1];
var args = parts[2];
this.video[name](...args);
},
set_property: function() {
// https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement
var parts = this.model.get('_property');
var name = parts[0];
var stamp = parts[1];
var value = parts[2];
this.video[name] = value;
},
src_changed: function() {
// backend --> frontend
var field = 'src';
this.video[field] = this.model.get(field);
},
current_time_changed: function() {
// HTML5 video element responds to backbone model changes.
this.video['currentTime'] = this.model.get('current_time');
// }
},
play_pause_changed: function() {
if (this.video.paused) {
this.play();
} else {
this.pause();
}
},
play: function() {
// Start video playback, handled through backbone system.
this.model.set('_method', ['play', Date.now(), '']);
this.touch();
},
pause: function() {
// Stop video playback, handled through backbone system.
this.model.set('_method', ['pause', Date.now(), '']);
this.touch();
},
jump_frames: function(num_frames) {
// Jump fractional number of frames, positive or negative
var dt_frame = this.model.get('timebase');
this.jump_seconds(num_frames*dt_frame);
},
jump_seconds: function(dt_seconds) {
// Jump fractional number of seconds, positive or negative
if (!this.video.paused) {
this.video.pause();
}
this.video.currentTime += dt_seconds;
// if (paused) {
// this.video.play();
// }
},
//-------------------------------------------------------------------------------------------
//-------------------------------------------------------------------------------------------
// The various handle_<something> functions are written to respond to front-end
// JavaScript-generated events. The usual outcome is either changing a parameter in the
// backbone model or changing some other front-end component.
handle_event: function(ev) {
// General video-element event handler
// https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement
var fields = ['clientHeight', 'clientWidth', 'controls', 'currentTime', 'currentSrc',
'duration', 'ended', 'muted', 'paused', 'playbackRate',
'readyState', 'seeking', 'videoHeight', 'videoWidth', 'volume'];
var pev = {'type': ev.type};
for (let f of fields) {
pev[f] = ev.target[f];
}
this.model.set('_event', pev);
// https://developer.mozilla.org/en-US/docs/Web/Events/timeupdate
this.model.set('current_time', ev.target['currentTime']);
this.touch();
},
fast_time_update: function() {
this.model.set('current_time', this.video['currentTime']);
this.touch();
var delta_time_fast = 100; // milliseconds
if (this.enable_fast_time_update) {
setTimeout(this.fast_time_update.bind(this), delta_time_fast);
}
},
handle_play: function(ev) {
// console.log(ev);
// Don't respond to current_time events while playing. The video itself is the source of
// those events, and responding to them will only cause hard-to-debug timming trouble.
this.stopListening(this.model, 'change:current_time');
// Emit time updates in background at faster rate
this.enable_fast_time_update = true;
this.fast_time_update();
},
handle_pause: function(ev) {
// Once no longer playing it is safe again to listen for current_time events.
this.listenTo(this.model, 'change:current_time', this.current_time_changed);
// Stop emitting time updates at faster rate
this.enable_fast_time_update = false;
},
handle_keypress: function(ev) {
if (this.model.get('_enable_keyboard')) {
// console.log(ev.key)
// 'altKey'
// 'metaKey'
// 'ctrlKey'
ev.stopImmediatePropagation();
ev.stopPropagation();
ev.preventDefault();
if (ev.key == ' ') {
// space bar toggle play/pause
this.play_pause_changed();
} else if (ev.key == 'ArrowLeft') {
if (ev.ctrlKey) {
this.jump_seconds(-1);
} else {
this.jump_frames(-1);
}
} else if (ev.key == 'ArrowRight') {
if (ev.ctrlKey) {
this.jump_seconds(1);
} else {
this.jump_frames(1);
}
}
}
},
handle_mouse_wheel: function(ev) {
var increment;
if (ev.deltaY < 0) {
// Forwards
increment = 1
} else {
// Backwards
increment = -1
}
if (ev.ctrlKey) {
// ctrl --> skip one second
this.jump_seconds(increment);
} else {
// skip a single frame
// e.g. 1/30 or 1/60 sec
this.jump_frames(increment);
}
},
handle_mouse_click: function(ev) {
this.play_pause_changed();
},
});
//-----------------------------------------------
module.exports = {
TimeCodeModel: TimeCodeModel,
TimeCodeView: TimeCodeView,
VideoModel: VideoModel,
VideoView: VideoView
};
/***/ }),
/* 2 */
/***/ (function(module, exports) {
module.exports = __WEBPACK_EXTERNAL_MODULE_2__;
/***/ }),
/* 3 */
/***/ (function(module, exports, __webpack_require__) {
var __WEBPACK_AMD_DEFINE_ARRAY__, __WEBPACK_AMD_DEFINE_RESULT__;// Underscore.js 1.8.3
// http://underscorejs.org
// (c) 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` in the browser, or `exports` on the server.
var root = this;
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype;
// Create quick reference variables for speed access to core prototypes.
var
push = ArrayProto.push,
slice = ArrayProto.slice,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var
nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeBind = FuncProto.bind,
nativeCreate = Object.create;
// Naked function reference for surrogate-prototype-swapping.
var Ctor = function(){};
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) {
if (obj instanceof _) return obj;
if (!(this instanceof _)) return new _(obj);
this._wrapped = obj;
};
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for the old `require()` API. If we're in
// the browser, add `_` as a global object.
if (true) {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root._ = _;
}
// Current version.
_.VERSION = '1.8.3';
// Internal function that returns an efficient (for current engines) version
// of the passed-in callback, to be repeatedly applied in other Underscore
// functions.
var optimizeCb = function(func, context, argCount) {
if (context === void 0) return func;
switch (argCount == null ? 3 : argCount) {
case 1: return function(value) {
return func.call(context, value);
};
case 2: return function(value, other) {
return func.call(context, value, other);
};
case 3: return function(value, index, collection) {
return func.call(context, value, index, collection);
};
case 4: return function(accumulator, value, index, collection) {
return func.call(context, accumulator, value, index, collection);
};
}
return function() {
return func.apply(context, arguments);
};
};
// A mostly-internal function to generate callbacks that can be applied
// to each element in a collection, returning the desired result — either
// identity, an arbitrary callback, a property matcher, or a property accessor.
var cb = function(value, context, argCount) {
if (value == null) return _.identity;
if (_.isFunction(value)) return optimizeCb(value, context, argCount);
if (_.isObject(value)) return _.matcher(value);
return _.property(value);
};
_.iteratee = function(value, context) {
return cb(value, context, Infinity);
};
// An internal function for creating assigner functions.
var createAssigner = function(keysFunc, undefinedOnly) {
return function(obj) {
var length = arguments.length;
if (length < 2 || obj == null) return obj;
for (var index = 1; index < length; index++) {
var source = arguments[index],
keys = keysFunc(source),
l = keys.length;
for (var i = 0; i < l; i++) {
var key = keys[i];
if (!undefinedOnly || obj[key] === void 0) obj[key] = source[key];
}
}
return obj;
};
};
// An internal function for creating a new object that inherits from another.
var baseCreate = function(prototype) {
if (!_.isObject(prototype)) return {};
if (nativeCreate) return nativeCreate(prototype);
Ctor.prototype = prototype;
var result = new Ctor;
Ctor.prototype = null;
return result;
};
var property = function(key) {
return function(obj) {
return obj == null ? void 0 : obj[key];
};
};
// Helper for collection methods to determine whether a collection
// should be iterated as an array or as an object
// Related: http://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength
// Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094
var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1;
var getLength = property('length');
var isArrayLike = function(collection) {
var length = getLength(collection);
return typeof length == 'number' && length >= 0 && length <= MAX_ARRAY_INDEX;
};
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles raw objects in addition to array-likes. Treats all
// sparse array-likes as if they were dense.
_.each = _.forEach = function(obj, iteratee, context) {
iteratee = optimizeCb(iteratee, context);
var i, length;
if (isArrayLike(obj)) {
for (i = 0, length = obj.length; i < length; i++) {
iteratee(obj[i], i, obj);
}
} else {
var keys = _.keys(obj);
for (i = 0, length = keys.length; i < length; i++) {
iteratee(obj[keys[i]], keys[i], obj);
}
}
return obj;
};
// Return the results of applying the iteratee to each element.
_.map = _.collect = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
results = Array(length);
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
results[index] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Create a reducing function iterating left or right.
function createReduce(dir) {
// Optimized iterator function as using arguments.length
// in the main function will deoptimize the, see #1991.
function iterator(obj, iteratee, memo, keys, index, length) {
for (; index >= 0 && index < length; index += dir) {
var currentKey = keys ? keys[index] : index;
memo = iteratee(memo, obj[currentKey], currentKey, obj);
}
return memo;
}
return function(obj, iteratee, memo, context) {
iteratee = optimizeCb(iteratee, context, 4);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length,
index = dir > 0 ? 0 : length - 1;
// Determine the initial value if none is provided.
if (arguments.length < 3) {
memo = obj[keys ? keys[index] : index];
index += dir;
}
return iterator(obj, iteratee, memo, keys, index, length);
};
}
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`.
_.reduce = _.foldl = _.inject = createReduce(1);
// The right-associative version of reduce, also known as `foldr`.
_.reduceRight = _.foldr = createReduce(-1);
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, predicate, context) {
var key;
if (isArrayLike(obj)) {
key = _.findIndex(obj, predicate, context);
} else {
key = _.findKey(obj, predicate, context);
}
if (key !== void 0 && key !== -1) return obj[key];
};
// Return all the elements that pass a truth test.
// Aliased as `select`.
_.filter = _.select = function(obj, predicate, context) {
var results = [];
predicate = cb(predicate, context);
_.each(obj, function(value, index, list) {
if (predicate(value, index, list)) results.push(value);
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, predicate, context) {
return _.filter(obj, _.negate(cb(predicate)), context);
};
// Determine whether all of the elements match a truth test.
// Aliased as `all`.
_.every = _.all = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (!predicate(obj[currentKey], currentKey, obj)) return false;
}
return true;
};
// Determine if at least one element in the object matches a truth test.
// Aliased as `any`.
_.some = _.any = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = !isArrayLike(obj) && _.keys(obj),
length = (keys || obj).length;
for (var index = 0; index < length; index++) {
var currentKey = keys ? keys[index] : index;
if (predicate(obj[currentKey], currentKey, obj)) return true;
}
return false;
};
// Determine if the array or object contains a given item (using `===`).
// Aliased as `includes` and `include`.
_.contains = _.includes = _.include = function(obj, item, fromIndex, guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
if (typeof fromIndex != 'number' || guard) fromIndex = 0;
return _.indexOf(obj, item, fromIndex) >= 0;
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = function(obj, method) {
var args = slice.call(arguments, 2);
var isFunc = _.isFunction(method);
return _.map(obj, function(value) {
var func = isFunc ? method : value[method];
return func == null ? func : func.apply(value, args);
});
};
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, _.property(key));
};
// Convenience version of a common use case of `filter`: selecting only objects
// containing specific `key:value` pairs.
_.where = function(obj, attrs) {
return _.filter(obj, _.matcher(attrs));
};
// Convenience version of a common use case of `find`: getting the first object
// containing specific `key:value` pairs.
_.findWhere = function(obj, attrs) {
return _.find(obj, _.matcher(attrs));
};
// Return the maximum element (or element-based computation).
_.max = function(obj, iteratee, context) {
var result = -Infinity, lastComputed = -Infinity,
value, computed;
if (iteratee == null && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value > result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(value, index, list) {
computed = iteratee(value, index, list);
if (computed > lastComputed || computed === -Infinity && result === -Infinity) {
result = value;
lastComputed = computed;
}
});
}
return result;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iteratee, context) {
var result = Infinity, lastComputed = Infinity,
value, computed;
if (iteratee == null && obj != null) {
obj = isArrayLike(obj) ? obj : _.values(obj);
for (var i = 0, length = obj.length; i < length; i++) {
value = obj[i];
if (value < result) {
result = value;
}
}
} else {
iteratee = cb(iteratee, context);
_.each(obj, function(value, index, list) {
computed = iteratee(value, index, list);
if (computed < lastComputed || computed === Infinity && result === Infinity) {
result = value;
lastComputed = computed;
}
});
}
return result;
};
// Shuffle a collection, using the modern version of the
// [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher–Yates_shuffle).
_.shuffle = function(obj) {
var set = isArrayLike(obj) ? obj : _.values(obj);
var length = set.length;
var shuffled = Array(length);
for (var index = 0, rand; index < length; index++) {
rand = _.random(0, index);
if (rand !== index) shuffled[index] = shuffled[rand];
shuffled[rand] = set[index];
}
return shuffled;
};
// Sample **n** random values from a collection.
// If **n** is not specified, returns a single random element.
// The internal `guard` argument allows it to work with `map`.
_.sample = function(obj, n, guard) {
if (n == null || guard) {
if (!isArrayLike(obj)) obj = _.values(obj);
return obj[_.random(obj.length - 1)];
}
return _.shuffle(obj).slice(0, Math.max(0, n));
};
// Sort the object's values by a criterion produced by an iteratee.
_.sortBy = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
return _.pluck(_.map(obj, function(value, index, list) {
return {
value: value,
index: index,
criteria: iteratee(value, index, list)
};
}).sort(function(left, right) {
var a = left.criteria;
var b = right.criteria;
if (a !== b) {
if (a > b || a === void 0) return 1;
if (a < b || b === void 0) return -1;
}
return left.index - right.index;
}), 'value');
};
// An internal function used for aggregate "group by" operations.
var group = function(behavior) {
return function(obj, iteratee, context) {
var result = {};
iteratee = cb(iteratee, context);
_.each(obj, function(value, index) {
var key = iteratee(value, index, obj);
behavior(result, value, key);
});
return result;
};
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = group(function(result, value, key) {
if (_.has(result, key)) result[key].push(value); else result[key] = [value];
});
// Indexes the object's values by a criterion, similar to `groupBy`, but for
// when you know that your index values will be unique.
_.indexBy = group(function(result, value, key) {
result[key] = value;
});
// Counts instances of an object that group by a certain criterion. Pass
// either a string attribute to count by, or a function that returns the
// criterion.
_.countBy = group(function(result, value, key) {
if (_.has(result, key)) result[key]++; else result[key] = 1;
});
// Safely create a real, live array from anything iterable.
_.toArray = function(obj) {
if (!obj) return [];
if (_.isArray(obj)) return slice.call(obj);
if (isArrayLike(obj)) return _.map(obj, _.identity);
return _.values(obj);
};
// Return the number of elements in an object.
_.size = function(obj) {
if (obj == null) return 0;
return isArrayLike(obj) ? obj.length : _.keys(obj).length;
};
// Split a collection into two arrays: one whose elements all satisfy the given
// predicate, and one whose elements all do not satisfy the predicate.
_.partition = function(obj, predicate, context) {
predicate = cb(predicate, context);
var pass = [], fail = [];
_.each(obj, function(value, key, obj) {
(predicate(value, key, obj) ? pass : fail).push(value);
});
return [pass, fail];
};
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head` and `take`. The **guard** check
// allows it to work with `_.map`.
_.first = _.head = _.take = function(array, n, guard) {
if (array == null) return void 0;
if (n == null || guard) return array[0];
return _.initial(array, array.length - n);
};
// Returns everything but the last entry of the array. Especially useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N.
_.initial = function(array, n, guard) {
return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n)));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array.
_.last = function(array, n, guard) {
if (array == null) return void 0;
if (n == null || guard) return array[array.length - 1];
return _.rest(array, Math.max(0, array.length - n));
};
// Returns everything but the first entry of the array. Aliased as `tail` and `drop`.
// Especially useful on the arguments object. Passing an **n** will return
// the rest N values in the array.
_.rest = _.tail = _.drop = function(array, n, guard) {
return slice.call(array, n == null || guard ? 1 : n);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, _.identity);
};
// Internal implementation of a recursive `flatten` function.
var flatten = function(input, shallow, strict, startIndex) {
var output = [], idx = 0;
for (var i = startIndex || 0, length = getLength(input); i < length; i++) {
var value = input[i];
if (isArrayLike(value) && (_.isArray(value) || _.isArguments(value))) {
//flatten current level of array or arguments object
if (!shallow) value = flatten(value, shallow, strict);
var j = 0, len = value.length;
output.length += len;
while (j < len) {
output[idx++] = value[j++];
}
} else if (!strict) {
output[idx++] = value;
}
}
return output;
};
// Flatten out an array, either recursively (by default), or just one level.
_.flatten = function(array, shallow) {
return flatten(array, shallow, false);
};
// Return a version of the array that does not contain the specified value(s).
_.without = function(array) {
return _.difference(array, slice.call(arguments, 1));
};
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iteratee, context) {
if (!_.isBoolean(isSorted)) {
context = iteratee;
iteratee = isSorted;
isSorted = false;
}
if (iteratee != null) iteratee = cb(iteratee, context);
var result = [];
var seen = [];
for (var i = 0, length = getLength(array); i < length; i++) {
var value = array[i],
computed = iteratee ? iteratee(value, i, array) : value;
if (isSorted) {
if (!i || seen !== computed) result.push(value);
seen = computed;
} else if (iteratee) {
if (!_.contains(seen, computed)) {
seen.push(computed);
result.push(value);
}
} else if (!_.contains(result, value)) {
result.push(value);
}
}
return result;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = function() {
return _.uniq(flatten(arguments, true, true));
};
// Produce an array that contains every item shared between all the
// passed-in arrays.
_.intersection = function(array) {
var result = [];
var argsLength = arguments.length;
for (var i = 0, length = getLength(array); i < length; i++) {
var item = array[i];
if (_.contains(result, item)) continue;
for (var j = 1; j < argsLength; j++) {
if (!_.contains(arguments[j], item)) break;
}
if (j === argsLength) result.push(item);
}
return result;
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = function(array) {
var rest = flatten(arguments, true, true, 1);
return _.filter(array, function(value){
return !_.contains(rest, value);
});
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = function() {
return _.unzip(arguments);
};
// Complement of _.zip. Unzip accepts an array of arrays and groups
// each array's elements on shared indices
_.unzip = function(array) {
var length = array && _.max(array, getLength).length || 0;
var result = Array(length);
for (var index = 0; index < length; index++) {
result[index] = _.pluck(array, index);
}
return result;
};
// Converts lists into objects. Pass either a single array of `[key, value]`
// pairs, or two parallel arrays of the same length -- one of keys, and one of
// the corresponding values.
_.object = function(list, values) {
var result = {};
for (var i = 0, length = getLength(list); i < length; i++) {
if (values) {
result[list[i]] = values[i];
} else {
result[list[i][0]] = list[i][1];
}
}
return result;
};
// Generator function to create the findIndex and findLastIndex functions
function createPredicateIndexFinder(dir) {
return function(array, predicate, context) {
predicate = cb(predicate, context);
var length = getLength(array);
var index = dir > 0 ? 0 : length - 1;
for (; index >= 0 && index < length; index += dir) {
if (predicate(array[index], index, array)) return index;
}
return -1;
};
}
// Returns the first index on an array-like that passes a predicate test
_.findIndex = createPredicateIndexFinder(1);
_.findLastIndex = createPredicateIndexFinder(-1);
// Use a comparator function to figure out the smallest index at which
// an object should be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iteratee, context) {
iteratee = cb(iteratee, context, 1);
var value = iteratee(obj);
var low = 0, high = getLength(array);
while (low < high) {
var mid = Math.floor((low + high) / 2);
if (iteratee(array[mid]) < value) low = mid + 1; else high = mid;
}
return low;
};
// Generator function to create the indexOf and lastIndexOf functions
function createIndexFinder(dir, predicateFind, sortedIndex) {
return function(array, item, idx) {
var i = 0, length = getLength(array);
if (typeof idx == 'number') {
if (dir > 0) {
i = idx >= 0 ? idx : Math.max(idx + length, i);
} else {
length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1;
}
} else if (sortedIndex && idx && length) {
idx = sortedIndex(array, item);
return array[idx] === item ? idx : -1;
}
if (item !== item) {
idx = predicateFind(slice.call(array, i, length), _.isNaN);
return idx >= 0 ? idx + i : -1;
}
for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) {
if (array[idx] === item) return idx;
}
return -1;
};
}
// Return the position of the first occurrence of an item in an array,
// or -1 if the item is not included in the array.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = createIndexFinder(1, _.findIndex, _.sortedIndex);
_.lastIndexOf = createIndexFinder(-1, _.findLastIndex);
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (stop == null) {
stop = start || 0;
start = 0;
}
step = step || 1;
var length = Math.max(Math.ceil((stop - start) / step), 0);
var range = Array(length);
for (var idx = 0; idx < length; idx++, start += step) {
range[idx] = start;
}
return range;
};
// Function (ahem) Functions
// ------------------
// Determines whether to execute a function as a constructor
// or a normal function with the provided arguments
var executeBound = function(sourceFunc, boundFunc, context, callingContext, args) {
if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args);
var self = baseCreate(sourceFunc.prototype);
var result = sourceFunc.apply(self, args);
if (_.isObject(result)) return result;
return self;
};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if
// available.
_.bind = function(func, context) {
if (nativeBind && func.bind === nativeBind) return nativeBind.apply(func, slice.call(arguments, 1));
if (!_.isFunction(func)) throw new TypeError('Bind must be called on a function');
var args = slice.call(arguments, 2);
var bound = function() {
return executeBound(func, bound, context, this, args.concat(slice.call(arguments)));
};
return bound;
};
// Partially apply a function by creating a version that has had some of its
// arguments pre-filled, without changing its dynamic `this` context. _ acts
// as a placeholder, allowing any combination of arguments to be pre-filled.
_.partial = function(func) {
var boundArgs = slice.call(arguments, 1);
var bound = function() {
var position = 0, length = boundArgs.length;
var args = Array(length);
for (var i = 0; i < length; i++) {
args[i] = boundArgs[i] === _ ? arguments[position++] : boundArgs[i];
}
while (position < arguments.length) args.push(arguments[position++]);
return executeBound(func, bound, this, this, args);
};
return bound;
};
// Bind a number of an object's methods to that object. Remaining arguments
// are the method names to be bound. Useful for ensuring that all callbacks
// defined on an object belong to it.
_.bindAll = function(obj) {
var i, length = arguments.length, key;
if (length <= 1) throw new Error('bindAll must be passed function names');
for (i = 1; i < length; i++) {
key = arguments[i];
obj[key] = _.bind(obj[key], obj);
}
return obj;
};
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memoize = function(key) {
var cache = memoize.cache;
var address = '' + (hasher ? hasher.apply(this, arguments) : key);
if (!_.has(cache, address)) cache[address] = func.apply(this, arguments);
return cache[address];
};
memoize.cache = {};
return memoize;
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = function(func, wait) {
var args = slice.call(arguments, 2);
return setTimeout(function(){
return func.apply(null, args);
}, wait);
};
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = _.partial(_.delay, _, 1);
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
_.throttle = function(func, wait, options) {
var context, args, result;
var timeout = null;
var previous = 0;
if (!options) options = {};
var later = function() {
previous = options.leading === false ? 0 : _.now();
timeout = null;
result = func.apply(context, args);
if (!timeout) context = args = null;
};
return function() {
var now = _.now();
if (!previous && options.leading === false) previous = now;
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
previous = now;
result = func.apply(context, args);
if (!timeout) context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds. If `immediate` is passed, trigger the function on the
// leading edge, instead of the trailing.
_.debounce = function(func, wait, immediate) {
var timeout, args, context, timestamp, result;
var later = function() {
var last = _.now() - timestamp;
if (last < wait && last >= 0) {
timeout = setTimeout(later, wait - last);
} else {
timeout = null;
if (!immediate) {
result = func.apply(context, args);
if (!timeout) context = args = null;
}
}
};
return function() {
context = this;
args = arguments;
timestamp = _.now();
var callNow = immediate && !timeout;
if (!timeout) timeout = setTimeout(later, wait);
if (callNow) {
result = func.apply(context, args);
context = args = null;
}
return result;
};
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return _.partial(wrapper, func);
};
// Returns a negated version of the passed-in predicate.
_.negate = function(predicate) {
return function() {
return !predicate.apply(this, arguments);
};
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var args = arguments;
var start = args.length - 1;
return function() {
var i = start;
var result = args[start].apply(this, arguments);
while (i--) result = args[i].call(this, result);
return result;
};
};
// Returns a function that will only be executed on and after the Nth call.
_.after = function(times, func) {
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
};
// Returns a function that will only be executed up to (but not including) the Nth call.
_.before = function(times, func) {
var memo;
return function() {
if (--times > 0) {
memo = func.apply(this, arguments);
}
if (times <= 1) func = null;
return memo;
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = _.partial(_.before, 2);
// Object Functions
// ----------------
// Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed.
var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString');
var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString',
'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString'];
function collectNonEnumProps(obj, keys) {
var nonEnumIdx = nonEnumerableProps.length;
var constructor = obj.constructor;
var proto = (_.isFunction(constructor) && constructor.prototype) || ObjProto;
// Constructor is a special case.
var prop = 'constructor';
if (_.has(obj, prop) && !_.contains(keys, prop)) keys.push(prop);
while (nonEnumIdx--) {
prop = nonEnumerableProps[nonEnumIdx];
if (prop in obj && obj[prop] !== proto[prop] && !_.contains(keys, prop)) {
keys.push(prop);
}
}
}
// Retrieve the names of an object's own properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`
_.keys = function(obj) {
if (!_.isObject(obj)) return [];
if (nativeKeys) return nativeKeys(obj);
var keys = [];
for (var key in obj) if (_.has(obj, key)) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve all the property names of an object.
_.allKeys = function(obj) {
if (!_.isObject(obj)) return [];
var keys = [];
for (var key in obj) keys.push(key);
// Ahem, IE < 9.
if (hasEnumBug) collectNonEnumProps(obj, keys);
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var values = Array(length);
for (var i = 0; i < length; i++) {
values[i] = obj[keys[i]];
}
return values;
};
// Returns the results of applying the iteratee to each element of the object
// In contrast to _.map it returns an object
_.mapObject = function(obj, iteratee, context) {
iteratee = cb(iteratee, context);
var keys = _.keys(obj),
length = keys.length,
results = {},
currentKey;
for (var index = 0; index < length; index++) {
currentKey = keys[index];
results[currentKey] = iteratee(obj[currentKey], currentKey, obj);
}
return results;
};
// Convert an object into a list of `[key, value]` pairs.
_.pairs = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var pairs = Array(length);
for (var i = 0; i < length; i++) {
pairs[i] = [keys[i], obj[keys[i]]];
}
return pairs;
};
// Invert the keys and values of an object. The values must be serializable.
_.invert = function(obj) {
var result = {};
var keys = _.keys(obj);
for (var i = 0, length = keys.length; i < length; i++) {
result[obj[keys[i]]] = keys[i];
}
return result;
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = createAssigner(_.allKeys);
// Assigns a given object with all the own properties in the passed-in object(s)
// (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
_.extendOwn = _.assign = createAssigner(_.keys);
// Returns the first key on an object that passes a predicate test
_.findKey = function(obj, predicate, context) {
predicate = cb(predicate, context);
var keys = _.keys(obj), key;
for (var i = 0, length = keys.length; i < length; i++) {
key = keys[i];
if (predicate(obj[key], key, obj)) return key;
}
};
// Return a copy of the object only containing the whitelisted properties.
_.pick = function(object, oiteratee, context) {
var result = {}, obj = object, iteratee, keys;
if (obj == null) return result;
if (_.isFunction(oiteratee)) {
keys = _.allKeys(obj);
iteratee = optimizeCb(oiteratee, context);
} else {
keys = flatten(arguments, false, false, 1);
iteratee = function(value, key, obj) { return key in obj; };
obj = Object(obj);
}
for (var i = 0, length = keys.length; i < length; i++) {
var key = keys[i];
var value = obj[key];
if (iteratee(value, key, obj)) result[key] = value;
}
return result;
};
// Return a copy of the object without the blacklisted properties.
_.omit = function(obj, iteratee, context) {
if (_.isFunction(iteratee)) {
iteratee = _.negate(iteratee);
} else {
var keys = _.map(flatten(arguments, false, false, 1), String);
iteratee = function(value, key) {
return !_.contains(keys, key);
};
}
return _.pick(obj, iteratee, context);
};
// Fill in a given object with default properties.
_.defaults = createAssigner(_.allKeys, true);
// Creates an object that inherits from the given prototype object.
// If additional properties are provided then they will be added to the
// created object.
_.create = function(prototype, props) {
var result = baseCreate(prototype);
if (props) _.extendOwn(result, props);
return result;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Returns whether an object has a given set of `key:value` pairs.
_.isMatch = function(object, attrs) {
var keys = _.keys(attrs), length = keys.length;
if (object == null) return !length;
var obj = Object(object);
for (var i = 0; i < length; i++) {
var key = keys[i];
if (attrs[key] !== obj[key] || !(key in obj)) return false;
}
return true;
};
// Internal recursive comparison function for `isEqual`.
var eq = function(a, b, aStack, bStack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal).
if (a === b) return a !== 0 || 1 / a === 1 / b;
// A strict comparison is necessary because `null == undefined`.
if (a == null || b == null) return a === b;
// Unwrap any wrapped objects.
if (a instanceof _) a = a._wrapped;
if (b instanceof _) b = b._wrapped;
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className !== toString.call(b)) return false;
switch (className) {
// Strings, numbers, regular expressions, dates, and booleans are compared by value.
case '[object RegExp]':
// RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i')
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return '' + a === '' + b;
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive.
// Object(NaN) is equivalent to NaN
if (+a !== +a) return +b !== +b;
// An `egal` comparison is performed for other numeric values.
return +a === 0 ? 1 / +a === 1 / b : +a === +b;
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a === +b;
}
var areArrays = className === '[object Array]';
if (!areArrays) {
if (typeof a != 'object' || typeof b != 'object') return false;
// Objects with different constructors are not equivalent, but `Object`s or `Array`s
// from different frames are.
var aCtor = a.constructor, bCtor = b.constructor;
if (aCtor !== bCtor && !(_.isFunction(aCtor) && aCtor instanceof aCtor &&
_.isFunction(bCtor) && bCtor instanceof bCtor)
&& ('constructor' in a && 'constructor' in b)) {
return false;
}
}
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
// Initializing stack of traversed objects.
// It's done here since we only need them for objects and arrays comparison.
aStack = aStack || [];
bStack = bStack || [];
var length = aStack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (aStack[length] === a) return bStack[length] === b;
}
// Add the first object to the stack of traversed objects.
aStack.push(a);
bStack.push(b);
// Recursively compare objects and arrays.
if (areArrays) {
// Compare array lengths to determine if a deep comparison is necessary.
length = a.length;
if (length !== b.length) return false;
// Deep compare the contents, ignoring non-numeric properties.
while (length--) {
if (!eq(a[length], b[length], aStack, bStack)) return false;
}
} else {
// Deep compare objects.
var keys = _.keys(a), key;
length = keys.length;
// Ensure that both objects contain the same number of properties before comparing deep equality.
if (_.keys(b).length !== length) return false;
while (length--) {
// Deep compare each member
key = keys[length];
if (!(_.has(b, key) && eq(a[key], b[key], aStack, bStack))) return false;
}
}
// Remove the first object from the stack of traversed objects.
aStack.pop();
bStack.pop();
return true;
};
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (obj == null) return true;
if (isArrayLike(obj) && (_.isArray(obj) || _.isString(obj) || _.isArguments(obj))) return obj.length === 0;
return _.keys(obj).length === 0;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType === 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) === '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
var type = typeof obj;
return type === 'function' || type === 'object' && !!obj;
};
// Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp, isError.
_.each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp', 'Error'], function(name) {
_['is' + name] = function(obj) {
return toString.call(obj) === '[object ' + name + ']';
};
});
// Define a fallback version of the method in browsers (ahem, IE < 9), where
// there isn't any inspectable "Arguments" type.
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return _.has(obj, 'callee');
};
}
// Optimize `isFunction` if appropriate. Work around some typeof bugs in old v8,
// IE 11 (#1621), and in Safari 8 (#1929).
if (typeof /./ != 'function' && typeof Int8Array != 'object') {
_.isFunction = function(obj) {
return typeof obj == 'function' || false;
};
}
// Is a given object a finite number?
_.isFinite = function(obj) {
return isFinite(obj) && !isNaN(parseFloat(obj));
};
// Is the given value `NaN`? (NaN is the only number which does not equal itself).
_.isNaN = function(obj) {
return _.isNumber(obj) && obj !== +obj;
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) === '[object Boolean]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Shortcut function for checking if an object has a given property directly
// on itself (in other words, not on a prototype).
_.has = function(obj, key) {
return obj != null && hasOwnProperty.call(obj, key);
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iteratees.
_.identity = function(value) {
return value;
};
// Predicate-generating functions. Often useful outside of Underscore.
_.constant = function(value) {
return function() {
return value;
};
};
_.noop = function(){};
_.property = property;
// Generates a function for a given object that returns a given property.
_.propertyOf = function(obj) {
return obj == null ? function(){} : function(key) {
return obj[key];
};
};
// Returns a predicate for checking whether an object has a given set of
// `key:value` pairs.
_.matcher = _.matches = function(attrs) {
attrs = _.extendOwn({}, attrs);
return function(obj) {
return _.isMatch(obj, attrs);
};
};
// Run a function **n** times.
_.times = function(n, iteratee, context) {
var accum = Array(Math.max(0, n));
iteratee = optimizeCb(iteratee, context, 1);
for (var i = 0; i < n; i++) accum[i] = iteratee(i);
return accum;
};
// Return a random integer between min and max (inclusive).
_.random = function(min, max) {
if (max == null) {
max = min;
min = 0;
}
return min + Math.floor(Math.random() * (max - min + 1));
};
// A (possibly faster) way to get the current timestamp as an integer.
_.now = Date.now || function() {
return new Date().getTime();
};
// List of HTML entities for escaping.
var escapeMap = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": ''',
'`': '`'
};
var unescapeMap = _.invert(escapeMap);
// Functions for escaping and unescaping strings to/from HTML interpolation.
var createEscaper = function(map) {
var escaper = function(match) {
return map[match];
};
// Regexes for identifying a key that needs to be escaped
var source = '(?:' + _.keys(map).join('|') + ')';
var testRegexp = RegExp(source);
var replaceRegexp = RegExp(source, 'g');
return function(string) {
string = string == null ? '' : '' + string;
return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string;
};
};
_.escape = createEscaper(escapeMap);
_.unescape = createEscaper(unescapeMap);
// If the value of the named `property` is a function then invoke it with the
// `object` as context; otherwise, return it.
_.result = function(object, property, fallback) {
var value = object == null ? void 0 : object[property];
if (value === void 0) {
value = fallback;
}
return _.isFunction(value) ? value.call(object) : value;
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = ++idCounter + '';
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate : /<%([\s\S]+?)%>/g,
interpolate : /<%=([\s\S]+?)%>/g,
escape : /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /(.)^/;
// Certain characters need to be escaped so that they can be put into a
// string literal.
var escapes = {
"'": "'",
'\\': '\\',
'\r': 'r',
'\n': 'n',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
var escaper = /\\|'|\r|\n|\u2028|\u2029/g;
var escapeChar = function(match) {
return '\\' + escapes[match];
};
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
// NB: `oldSettings` only exists for backwards compatibility.
_.template = function(text, settings, oldSettings) {
if (!settings && oldSettings) settings = oldSettings;
settings = _.defaults({}, settings, _.templateSettings);
// Combine delimiters into one regular expression via alternation.
var matcher = RegExp([
(settings.escape || noMatch).source,
(settings.interpolate || noMatch).source,
(settings.evaluate || noMatch).source
].join('|') + '|$', 'g');
// Compile the template source, escaping string literals appropriately.
var index = 0;
var source = "__p+='";
text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
source += text.slice(index, offset).replace(escaper, escapeChar);
index = offset + match.length;
if (escape) {
source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
} else if (interpolate) {
source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
} else if (evaluate) {
source += "';\n" + evaluate + "\n__p+='";
}
// Adobe VMs need the match returned to produce the correct offest.
return match;
});
source += "';\n";
// If a variable is not specified, place data values in local scope.
if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n';
source = "var __t,__p='',__j=Array.prototype.join," +
"print=function(){__p+=__j.call(arguments,'');};\n" +
source + 'return __p;\n';
try {
var render = new Function(settings.variable || 'obj', '_', source);
} catch (e) {
e.source = source;
throw e;
}
var template = function(data) {
return render.call(this, data, _);
};
// Provide the compiled source as a convenience for precompilation.
var argument = settings.variable || 'obj';
template.source = 'function(' + argument + '){\n' + source + '}';
return template;
};
// Add a "chain" function. Start chaining a wrapped Underscore object.
_.chain = function(obj) {
var instance = _(obj);
instance._chain = true;
return instance;
};
// OOP
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
// Helper function to continue chaining intermediate results.
var result = function(instance, obj) {
return instance._chain ? _(obj).chain() : obj;
};
// Add your own custom functions to the Underscore object.
_.mixin = function(obj) {
_.each(_.functions(obj), function(name) {
var func = _[name] = obj[name];
_.prototype[name] = function() {
var args = [this._wrapped];
push.apply(args, arguments);
return result(this, func.apply(_, args));
};
});
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
_.each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
var obj = this._wrapped;
method.apply(obj, arguments);
if ((name === 'shift' || name === 'splice') && obj.length === 0) delete obj[0];
return result(this, obj);
};
});
// Add all accessor Array functions to the wrapper.
_.each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
return result(this, method.apply(this._wrapped, arguments));
};
});
// Extracts the result from a wrapped and chained object.
_.prototype.value = function() {
return this._wrapped;
};
// Provide unwrapping proxy for some methods used in engine operations
// such as arithmetic and JSON stringification.
_.prototype.valueOf = _.prototype.toJSON = _.prototype.value;
_.prototype.toString = function() {
return '' + this._wrapped;
};
// AMD registration happens at the end for compatibility with AMD loaders
// that may not enforce next-turn semantics on modules. Even though general
// practice for AMD registration is to be anonymous, underscore registers
// as a named module because, like jQuery, it is a base library that is
// popular enough to be bundled in a third party lib, but not be part of
// an AMD load request. Those cases could generate an error when an
// anonymous define() is called outside of a loader request.
if (true) {
!(__WEBPACK_AMD_DEFINE_ARRAY__ = [], __WEBPACK_AMD_DEFINE_RESULT__ = function() {
return _;
}.apply(exports, __WEBPACK_AMD_DEFINE_ARRAY__), __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
}
}.call(this));
/***/ }),
/* 4 */
/***/ (function(module, exports) {
module.exports = {
"name": "jupyter-video",
"version": "0.2.0",
"description": "HTML5 video player widget",
"author": "Pierre V. Villeneuve",
"license": "MIT",
"main": "src/index.js",
"repository": {
"type": "git",
"url": "https://github.com/who8mylunch/Jupyter_Video_Widget.git"
},
"keywords": [
"jupyter",
"widgets",
"ipython",
"ipywidgets",
"html5",
"video"
],
"scripts": {
"prepublish": "webpack",
"test": "echo \"Error: no test specified\" && exit 1"
},
"devDependencies": {
"json-loader": "^0.5.4",
"webpack": "^1.12.14"
},
"dependencies": {
"@jupyter-widgets/base": "^1.0.1",
"@jupyter-widgets/controls": "^1.0.1",
"underscore": "^1.8.3"
}
};
/***/ })
/******/ ])});;
//# sourceMappingURL=index.js.map | PypiClean |
/Automancy-0.5.12.tar.gz/Automancy-0.5.12/automancy/elementals/organisms/dropdown/dropdown.py | from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as wait
from selenium.webdriver.support.ui import Select, WebDriverWait
from automancy.elementals.atoms import Label
from automancy.core import Elemental
from .dropdown_options import DropdownOptions
class Dropdown(Elemental):
""" Represents a dropdown based element """
def __init__(self, locator, human_name, system_name, options=DropdownOptions()):
"""
Args:
locator (str): xpath string for the lookup
human_name (str): human-readable name
system_name (str): system-readable name
options (DropdownOptions): "Options" object which simplifies the instantiation of a Dropdown object
Notes:
For details about the 'options' argument, see the "Notes" section in the constructor definition
of the DropdownOptions class
"""
super().__init__(locator, human_name, system_name)
if not isinstance(options, DropdownOptions):
raise TypeError('The value for the parameter "options" must be an instance of DropdownOptions, found: {}'.format(type(options)))
# The container for the element id's for each of the drop down options
# We want to store the ID's instead of the WebElement objects because
# the Selenium WebElements often go stale and storing the obfuscated ID's
# allows for the elements to be looked up at any time more freely.
self.option_ids = []
# The container for options that can be iterated over to be selected or clicked
self.options = {}
# The xpath that will loop up all options within the dropdown
self.options_locator = options.option_locator
# An optional xpath extension that can be concatenated at the end of an ID lookup
# for each of the option rows within the dropdown.
# This xpath extension is used to located an element on a per row basis that can be
# clicked or selected (e.g, a checkbox, link, etc)
self.option_selector_extension = options.option_selector_extension
# Same general idea as "option_selector_extension", this property is meant to be
# concatenated to an ID based xpath search in order to find the label that is
# associated with each option row within a dropdown.
# In some cases, you may want to look up / choose an option before selecting it
# based on the text of the label visible as it is to the end user.
self.option_label_extension = options.option_label_extension
self.options_type = options.option_type
self.static_options = options.static_options
self.disconnected_options = options.disconnected_options
def open(self, get_options=False):
"""
Opens the dropdown menu. By default, inspects the element for available options.
Args:
get_options (bool): Optional (default: False), You might want the dropdown to be inspected for available options even when static_options is True.
Returns:
None
"""
self.click()
# By default, look up the option available in the dropdown.
if not self.static_options or get_options:
self.get_options()
def close(self):
"""
Closes the dropdown by using the escape key.
Notes:
Uses the escape key as input in order to close the dropdown because
trying to select the dropdown to to click it again isn't easy/possible.
IMPORTANT: SafariDriver handles switching to the active element differently
than other drivers (I.E, ChromeDriver and GeckoDriver) such that if the
command driver.switch_to.active_element is used, Safari experiences issues
interacting with other elements after the fact.
The last point appears to be connected to switching to the active element
after clicking a checkbox if the driver is already within a child iframe.
(Needs further testing to be exactly sure)
Simply using send_keys(Keys.ESCAPE) with SafariDriver doesn't have any issues however,
with ChromeDriver (for example), if you don't use driver.switch_to.active_element before
using send_keys(Keys.Escape), things fall apart.
A simple try except block for an "ElementNotInteractableException" isn't good enough
here (tested). Without a specific switch for 'Safari' there are too many unknowable
situations that fail. Besides, the need for the logic switch only came about due to
SafariDriver (and no other) requiring a special rule.
I know, I know, I hate it too. It feel super hacky but I've tested several more
generic ways of handling this but this really does seem to be a Safari only concern.
Either SafariDriver needs to be fixed (likely) or the other drivers needs to conform
to some standard that they're not currently (unlikely).
Examples:
Dropdown is opened with Dropdown.open() -> options are dynamically discovered ->
Options are checkbox based -> Checkbox is selected for the targeted option ->
Dropdown.close() is performed -> driver.switch_to.parent_content is used to move
up from a child iframe/context -> elements within the parent context fail to be
interacted with inexplicably.
Returns:
None
"""
if self.browser_used == 'Safari':
self.element().send_keys(Keys.ESCAPE)
else:
element = self.browser.switch_to.active_element
element.send_keys(Keys.ESCAPE)
def deselect_all(self):
"""
Use if you want to make sure all selectable elements in the dropdown
are unselected before proceeding with some sequence of events.
Returns:
None
"""
if not self.options:
self.get_options()
for option in self.options.values():
if 'checkboxTrue' in option.classes():
option.click()
def get_options(self):
"""
Inspects the dropdowns' internal elements to discover which options exist based
on the options row locator.
Notes:
Current implementation is only designed to work with dropdowns that have a
selectable element (i.e., Checkbox) and a corresponding label for that element.
Returns:
None
"""
if not self.disconnected_options:
row_locator = self.locator + self.options_locator
else:
row_locator = self.options_locator
option_rows = WebDriverWait(self.browser, 30).until(wait.presence_of_all_elements_located((By.XPATH, row_locator)))
for index, option in enumerate(option_rows, start=1):
# Define the option locator and the Automancy object that will eventually be added to the options dictionary.
option_locator = '{base_path}[{index}]{selector_ext}'.format(base_path=row_locator, index=index, selector_ext=self.option_selector_extension)
new_option = self.options_type(option_locator, name='')
try:
# This first check is to see if the element actually exists or not.
# The Firefox webdriver acts really weird sometimes and will find an extra element that doesn't exist.
if self.browser.find_elements(By.XPATH, option_locator):
# NOTE: Dropdowns which have complex sets of options which don't conform to W3C standards should skip the wait for visibility step.
# In real world experiments, dropdowns with disconnected options have issues with visibility since not all options are always loaded in).
# if not self.disconnected_options:
# Now wait for the element to gain existence and visibility.
# assertGainsExistence(new_option)
# assertGainsVisibility(new_option)
# If the dropdown object has the option_label_extension defined, we need to target that object for the option name to be stored as the key.
if self.option_label_extension:
option_label_locator = '{base_path}[{index}]{selector_ext}'.format(base_path=row_locator, index=index, selector_ext=self.option_label_extension)
option_label = Label(option_label_locator, 'Option Label', 'option_label')
new_option.name = option_label.text
del option_label
else:
# Add the text from the element to the name of the option object.
new_option.name = new_option.text
# Add the new option to the dictionary of options for this dropdown.
self.options[new_option.name] = new_option
except NoSuchElementException:
# This exception handling is for situations where different browser drivers unexpectedly handle
# presence_of_all_elements_located by finding non-existent element. Firefox sometimes acts in
# this way verses the Chrome webdriver. No hypothesis exists for why this occurs at this time
# so we're putting blind handling here to prevent edge case false negatives from stopping tests.
pass
def include(self, elemental):
"""
Adds a Elemental to the Dropdowns' options dictionary. A KeyError is raised if an option with an
existing name is attempting to be included.
Notes:
This is derivative of the Grid class method "include(...)". The difference is that
the user needs to take special care when defining the option xpath locator and the
option name since there is not any special magic to do the work of ensuring that
things are done properly implicitly. Using Dropdown.include(...) is completely manual.
Args:
elemental (Elemental): The object that is being added as a component
"""
# If an option of the same name already exists, exclude it from the list of options.
if elemental.name in self.options.keys():
self.exclude(elemental.name)
# Check that the new option locator doesn't already include the base dropdown locator as a substring and that they aren't the same.
# (This would indicate that the user is attempting to use the base object xpath and the option xpath, which is fine, just keep swimming)
if self.locator not in elemental.locator and self.locator != elemental.locator:
# An empty or other negative value use the pre-defined options locator xpath. If not, go ahead and use what we get.
if not elemental.locator:
# Since we're not collecting options on the fly we need to use the name of the object as an xpath contains text lookup
elemental.locator = self.locator + '//*[contains(text(), "{0}")]'.format(elemental.name)
else:
if not self.disconnected_options:
elemental.locator = self.locator + elemental.locator
# Add the new option to the options dictionary, overwriting whatever exists by design (might changes this later)
self.options[elemental.name] = elemental
def exclude(self, option_name):
"""
Removes a dropdown option by name.
Args:
option_name (str): The name of the option wished to be removed.
Returns:
None
"""
if option_name in self.options.keys():
self.options.pop(option_name)
else:
raise KeyError('Option named "{0}" not found, cannot exclude from options'.format(option_name))
def select(self, option):
"""
A direct way of selecting an option stored within a dropdown instance. Opens the dropdown menu if
the option isn't visible.
Notes:
This method is designed to select by visible text when the dropdown object is a <select> element.
If the option isn't visible or if the option string doesn't exist in self.options (yet), open the
dropdown in order to trigger the options to become visible and also for the self.options dict to be
populated with values.
Args:
option (str): The visible text for the option that you want to select.
Returns:
None
"""
# Inspect the dropdown locator trying to determine if the dropdown is a <select> element.
if 'select' in self.locator:
select = Select(self.element())
select.select_by_visible_text(option)
else:
try:
# Check to see if the desired option exists at this time. If not, open the dropdown.
if not self.options[option].exists:
self.open()
# Once open if the option is visible click it -OR- if the option happened to exist but not be visible, open the dropdown.
if self.options[option].visible:
self.options[option].click()
else:
self.open()
except KeyError:
# Also open the dropdown if the key isn't known to exist (yet... usually because it hasn't been opened before)
self.open()
# TODO -> Re-implement when Safari testing can be performed again
# Safari Sucks
# if not self.browser_used == 'Safari':
# assertGainsClickability(self.options[option])
# Select the option by it's name through __getitem__
# TODO -> Remove commented out line if dropdowns in Safari don't have a problem with it removed.
# assertBecomesIncludedIn(option, list(self.options.keys()))
self.options[option].click()
self.close()
def select_options(self, desired_options):
"""
Selects the options that are desired based on the string that should match them.
Args:
desired_options (list): A list of the names of the options that you want to select
Returns:
None
"""
for option in desired_options:
if option in self.options:
self.select(option) | PypiClean |
/Another_One_Messenger_Client-0.9.1.tar.gz/Another_One_Messenger_Client-0.9.1/src/client/add_contact.py | from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
import logging
logger = logging.getLogger('client')
class AddContactDialog(QDialog):
'''
Диалог добавления пользователя в список контактов.
Предлагает пользователю список возможных контактов и
добавляет выбранный в контакты.
'''
def __init__(self, transport, database):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов
self.possible_contacts_update()
# Назначаем действие на кнопку обновить
self.btn_refresh.clicked.connect(self.update_possible_contacts)
def possible_contacts_update(self):
'''
Метод заполнения списка возможных контактов.
Создаёт список всех зарегистрированных пользователей
за исключением уже добавленных в контакты и самого себя.
'''
self.selector.clear()
# множества всех контактов и контактов клиента
contacts_list = set(self.database.get_contacts())
users_list = set(self.database.get_users())
# Удалим сами себя из списка пользователей, чтобы нельзя было добавить
# самого себя
users_list.remove(self.transport.username)
# Добавляем список возможных контактов
self.selector.addItems(users_list - contacts_list)
def update_possible_contacts(self):
'''
Метод обновления списка возможных контактов. Запрашивает с сервера
список известных пользователей и обносляет содержимое окна.
'''
try:
self.transport.user_list_update()
except OSError:
pass
else:
logger.debug('Обновление списка пользователей с сервера выполнено')
self.possible_contacts_update() | PypiClean |
/BiblioPixel-3.4.46.tar.gz/BiblioPixel-3.4.46/bibliopixel/colors/palette.py | import functools
import numpy as np
from . classic import Black
class Palette(list):
"""
Palette is a list of one or more colors used to render Animations.
The method ``Palette.get()`` takes a position in the pallete and returns
a color.
"""
def __init__(self, colors=(), continuous=False, serpentine=False, scale=1,
offset=0, autoscale=False, length=None):
"""
Arguments:
colors: an iterable of colors
continuous: if True, interpolate linearly between colors; if False,
use the nearest color from the original list
serpentine: if True, palette colors are used in reverse order every
other iteration, giving a back-and-forth effect. If False,
palette colors always restart on each iteration
scale: Scales the incoming index ``i``. As ``i`` moves from 0
to ``len(colors) - 1``, the whole palette repeats itself
``self.scale`` times
offset: offset to the incoming index ``i``, applied after scaling
autoscale: If True, automatically rescale the Palette size to
match the length of the output. ``autoscale`` happens before
``scale``, so the two work well together to give banding or
striping effects across your display
``length``:
The length of the output color_list. If None, use the length of
the palette itself. If autoscale=True, ``length`` is used to scale
the palette to match the output.
"""
super().__init__(colors)
if not self:
self.append(Black)
self.continuous = continuous
self.serpentine = serpentine
self.scale = scale
self.offset = offset
self.autoscale = autoscale
self.length = length
self.np_table = np.array([self.get(i) for i in range(256)])
def __call__(self, position=0):
return self.get(position)
def get(self, position=0):
"""
Return a color interpolated from the Palette.
In the case where continuous=False, serpentine=False, scale=1,
autoscale=False, and offset=0, this is exactly the same as plain old []
indexing, but with a wrap-around.
The constructor parameters affect this result as documented in the
constructor.
Arguments:
``position``:
May be any integer or floating point number
"""
n = len(self)
if n == 1:
return self[0]
pos = position
if self.length and self.autoscale:
pos *= len(self)
pos /= self.length
pos *= self.scale
pos += self.offset
if not self.continuous:
if not self.serpentine:
return self[int(pos % n)]
# We want a color sequence of length 2n-2
# e.g. for n=5: a b c d | e d c b | a b c d ...
m = (2 * n) - 2
pos %= m
if pos < n:
return self[int(pos)]
else:
return self[int(m - pos)]
if self.serpentine:
pos %= (2 * n)
if pos > n:
pos = (2 * n) - pos
else:
pos %= n
# p is a number in [0, n): scale it to be in [0, n-1)
pos *= n - 1
pos /= n
index = int(pos)
fade = pos - index
if not fade:
return self[index]
r1, g1, b1 = self[index]
r2, g2, b2 = self[(index + 1) % len(self)]
dr, dg, db = r2 - r1, g2 - g1, b2 - b1
return r1 + fade * dr, g1 + fade * dg, b1 + fade * db
def batch_apply_palette(self, np_vals_256):
"""Batch apply palette to an input numpy array.
:param np_vals_256: numpy array of arbitrary shape, with values in [0, 255]
:returns: numpy array of shape (..., 3) with RGB values for each corresponding entry in the input.
"""
return self.np_table[np_vals_256.astype('uint8')]
def __setitem__(self, i, color):
from . import make
super().__setitem__(i, make.color(color))
def __eq__(self, other):
return (isinstance(other, Palette) and
super().__eq__(other) and
self.continuous == other.continuous and
self.serpentine == other.serpentine and
self.scale == other.scale and
self.offset == other.offset and
self.autoscale == other.autoscale and
self.length == other.length)
# excluded: np_table since it's a computed attribute
def __ne__(self, other):
return not (self == other) | PypiClean |
/OMS_distributions-0.1.tar.gz/OMS_distributions-0.1/OMS_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Flask_CBlueprint-1.0.0-py3-none-any.whl/flask_cblueprint/utils/filesystem.py | import os
import glob
import shutil
from pathlib import Path
from string import Template
def create_folder_if_not(folder_path):
"""
If the folder path doesn't exist, create it
:param folder_path: The path to the folder you want to create
:return: the path to the folder.
"""
return os.makedirs(os.path.dirname(folder_path), exist_ok=True)
def list_files(directory, **kwargs):
"""
"Return a list of files in a directory, ignoring files in the ignore list and optionally only
returning files with a specific file extension."
The function takes two arguments:
directory: The directory to list files from.
ignore: A list of files to ignore.
The function also takes two keyword arguments:
file_extension: A file extension to filter files by.
ignore: A list of files to ignore.
The function returns a list of files
:param directory: The directory to list files from
:return: A list of files in the directory.
"""
ignore = kwargs.get("ignore", [""])
file_extension = kwargs.get("file_extension")
files = []
# It's iterating through the files in the directory, and if the file is not in the ignore list,
# it appends the file to the files list.
for file in os.listdir(directory):
if os.path.isfile(os.path.join(directory, file)):
if file not in ignore:
if file_extension and file.endswith(file_extension):
files.append(file)
elif not file_extension:
files.append(file)
return files
def list_directories(directory, ignore=None):
"""
It returns a list of directories in a given directory, ignoring any directories in the ignore list
:param directory: The directory to list the contents of
:param ignore: A list of directories to ignore
:return: A list of directories in the given directory.
"""
if not ignore:
ignore = ["__pycache__"]
return list(
filter(
lambda x: os.path.isdir(os.path.join(directory, x)) and x not in ignore,
os.listdir(directory)
)
)
def set_file(file_path, file_content):
"""
It opens a file, writes to it, and closes it
:param file_path: The path to the file you want to write to
:param file_content: The content of the file you want to write to
"""
with open(file_path, 'w') as f:
f.write(file_content)
f.close()
def has_file(file_path):
"""
`has_file` takes a file path as a string and returns a boolean indicating whether or not the file
exists.
:param file_path: The path to the file you want to check
:return: A boolean value.
"""
# It's creating a path object from the file path.
potential_file = Path(file_path)
return potential_file.is_file()
def copy_file(src, dest):
"""
"Copy the file at the path src to the path dest."
The function takes two arguments, src and dest, and returns the result of shutil.copy(src, dest)
:param src: The source file path
:param dest: The destination path where the file is to be copied
:return: The return value is the path to the newly created file.
"""
return shutil.copy(src, dest)
def read_file(file_path):
"""
"This function takes a file path and returns a file object."
The first line of the function is called the docstring. It's a special comment that describes what
the function does
:param file_path: The path to the file you want to read
:return: The file object
"""
return open(file_path, 'r')
def replace_templates_in_files(lookup_path, file_extension, template_vars, ignore=None):
"""
It takes a path, a file extension, a dictionary of template variables, and an optional list of files
to ignore, and then it replaces all the template variables in all the files in the path with the
file extension
:param lookup_path: The path to the directory where the files are located
:param file_extension: The file extension of the files you want to replace the templates in
:param template_vars: A dictionary of variables to replace in the template files
:param ignore: A list of files to ignore
"""
if not ignore:
ignore = []
# Using the glob module to find all the files in the lookup_path with the file_extension
files = [f for f in glob.glob(lookup_path + "/**/*%s" % file_extension, recursive=True)]
# Iterating through the files in the files list, and if the file is not in the ignore list, it
# opens the file, reads the file, and then closes the file. Then it opens the file again, writes
# the file, and then closes the file.
for f in files:
if f.split("/")[-1] not in ignore:
file = open(f, 'r')
file_content = Template(file.read()).substitute(template_vars)
file.close()
file = open(f, 'w')
file.write(file_content)
file.close() | PypiClean |
/102017059_Aakanksha_Topsis-0.0.0.tar.gz/102017059_Aakanksha_Topsis-0.0.0/README.md | # 102017059_Aakanksha_Topsis
This package is implementation of multi-criteria decision analysis using topsis. This package will accept three arguments during file execution:
dataset.csv //file which contains the models and parameters
string of weights separated by commas(,)
string of requirements (+/-) separated by commas(,) // important install pandas,sys,operator and math libraries before installing this // You can install this package using following command pip install 102017059_Aakanksha_Topsis
| PypiClean |
/FRCUploader-3.6.2.tar.gz/FRCUploader-3.6.2/frcuploader/utils.py |
import os
import re
import json
import time
import shutil
import hashlib
import datetime as dt
import requests
from googleapiclient.errors import HttpError
from . import consts
from .youtube import upload, RETRIABLE_EXCEPTIONS, RETRIABLE_STATUS_CODES
from cachecontrol import CacheControl
from cachecontrol.heuristics import ExpiresAfter
app_id = {"X-TBA-App-Id": ""}
trusted_auth = {"X-TBA-Auth-Id": "", "X-TBA-Auth-Sig": ""}
s = requests.Session()
s = CacheControl(s, heuristic=ExpiresAfter(minutes=1))
s.headers.update(app_id)
"""Utility Functions"""
def convert_bytes(num):
for x in consts.sizes:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def file_size(path):
file_info = os.stat(path)
return convert_bytes(file_info.st_size)
"""YouTube Title Generators"""
def quals_yt_title(options):
return options.title if not options.replay else f"{options.title} Replay"
def quarters_yt_title(options):
mnum = options.mnum
if mnum <= 8:
return (
f"{options.ename} - Quarterfinal Match {mnum}"
if not options.replay
else f"{options.ename} - Quarterfinal Match {mnum} Replay"
)
elif mnum <= 12:
mnum -= 8
return (
f"{options.ename} - Quarterfinal Tiebreaker {mnum}"
if not options.replay
else f"{options.ename} - Quarterfinal Tiebreaker {mnum} Replay"
)
else:
raise ValueError("options.mnum must be within 1 and 12")
def semis_yt_title(options):
mnum = options.mnum
if mnum <= 4:
return (
f"{options.ename} - Semifinal Match {mnum}"
if not options.replay
else f"{options.ename} - Semifinal Match {mnum} Replay"
)
elif mnum <= 6:
mnum -= 4
return (
f"{options.ename} - Semifinal Tiebreaker {mnum}"
if not options.replay
else f"{options.ename} - Semifinal Tiebreaker {mnum} Replay"
)
else:
raise ValueError("options.mnum must be within 1 and 6")
def finals_yt_title(options):
return (
f"{options.ename} - Final Match {options.mnum}"
if not options.replay
else f"{options.ename} - Final Match {options.mnum} Replay"
)
def ceremonies_yt_title(options):
title = None
if options.ceremonies is 1:
if not options.eday:
title = f"{options.ename} - {options.day} Opening Ceremonies"
else:
title = f"{options.ename} - Day {options.eday} Opening Ceremonies"
elif options.ceremonies is 2:
title = f"{options.ename} - Alliance Selection"
elif options.ceremonies is 3:
if not options.eday:
title = f"{options.ename} - Closing Ceremonies"
else:
title = f"{options.ename} - Day {options.eday} Closing Ceremonies"
elif options.ceremonies is 4:
title = f"{options.ename} - Highlight Reel"
return title
"""File Location Functions"""
def quals_filename(options):
file = None
for f in options.files:
fl = f.lower()
if all(
[
" " + str(options.mnum) + "." in fl
and any(k in fl for k in ("qual", "qualification", "qm"))
]
):
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
return file
def quarters_filename(options):
file = None
if 1 <= options.mnum <= 8:
for f in options.files:
fl = f.lower()
if all(
k in fl for k in ("quarter", "final", " " + str(options.mnum) + ".")
):
if "tiebreak" not in fl:
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
elif 9 <= options.mnum <= 12:
mnum = options.mnum - 8
for f in options.files:
fl = f.lower()
if all(
k in fl for k in ("quarter", "tiebreak", "final", " " + str(mnum) + ".")
):
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
return file
def semis_filename(options):
file = None
if options.mnum <= 4:
for f in options.files:
fl = f.lower()
if all(k in fl for k in ("semi", "final", " " + str(options.mnum) + ".")):
if "tiebreak" not in fl:
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
elif options.mnum <= 6:
mnum = options.mnum - 4
for f in options.files:
fl = f.lower()
if all(
k in fl for k in ("semi", "tiebreak", "final", " " + str(mnum) + ".")
):
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
return file
def finals_filename(options):
file = None
if options.mnum <= 2:
for f in options.files:
fl = f.lower()
if all(k in fl for k in ("final", " " + str(options.mnum) + ".")):
if (
all(k not in fl for k in ("quarter", "semi"))
and "tiebreak" not in fl
):
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
elif options.mnum >= 3:
for f in options.files:
fl = f.lower()
if "final" in fl and any(
k in fl for k in ("tiebreak", " " + str(options.mnum) + ".")
):
if all(k not in fl for k in ("quarter", "semi")):
if options.replay:
if "replay" in fl:
file = f
break
else:
if "replay" not in fl:
file = f
break
return file
def ceremonies_filename(options):
file = None
if options.ceremonies is 1:
for f in options.files:
fl = f.lower()
if all(k in fl for k in ("opening", "ceremon")):
if any(k in fl for k in (options.day.lower(), f"day {options.eday}")):
file = f
break
elif options.ceremonies is 2:
for f in options.files:
fl = f.lower()
if all(k in fl for k in ("alliance", "selection")):
file = f
break
elif options.ceremonies is 3:
for f in options.files:
fl = f.lower()
if any(k in fl for k in ("closing", "award")) and "ceremon" in fl:
if any(k in fl for k in (options.day.lower(), f"day {options.eday}")):
file = f
break
elif options.eday == 0:
file = f
break
elif options.ceremonies is 4:
for f in options.files:
fl = f.lower()
if any(k in fl for k in ("highlight", "wrapup", "recap")):
file = f
break
return file
def create_names(options):
if options.ceremonies is 0:
fname = {
"qm": quals_filename,
"qf": quarters_filename,
"sf": semis_filename,
"f1m": finals_filename,
}
yt = {
"qm": quals_yt_title,
"qf": quarters_yt_title,
"sf": semis_yt_title,
"f1m": finals_yt_title,
}
try:
if options.newest:
return yt[options.mtype](options)
else:
return fname[options.mtype](options), yt[options.mtype](options)
except KeyError:
print(options.mtype)
else:
return ceremonies_filename(options), ceremonies_yt_title(options)
"""Match Code Generators"""
def quals_match_code(mtype, mnum):
match_code = str(mtype) + str(mnum)
return match_code
def quarters_match_code(mtype, mnum):
match_set = str(mnum % 4)
match_set = "4" if match_set == "0" else match_set
match_code = mtype + match_set
if mnum <= 4:
match_code += "m1"
elif mnum <= 8:
match_code += "m2"
elif mnum <= 12:
match_code += "m3"
else:
raise ValueError("Match Number can't be larger than 12")
return match_code
def semis_match_code(mtype, mnum):
match_set = str(mnum % 2)
match_set = "2" if match_set == "0" else match_set
match_code = mtype + match_set
if mnum <= 2:
match_code += "m1"
elif mnum <= 4:
match_code += "m2"
elif mnum <= 6:
match_code += "m3"
else:
raise ValueError("Match Number can't be larger than 6")
return match_code
def finals_match_code(mtype, mnum):
match_code = mtype + str(mnum)
return match_code
def get_match_code(mtype, mnum, mcode):
if any(k == mcode for k in ("", "0")):
switcher = {
"qm": quals_match_code,
"qf": quarters_match_code,
"sf": semis_match_code,
"f1m": finals_match_code,
}
return switcher[mtype](mtype, mnum)
print(f"Uploading as {mcode}")
return mcode.lower()
"""Data Compliation and Adjustment Functions"""
def get_match_results(event_key, match_key):
match_data = consts.tba.match(f"{event_key}_{match_key}")
if match_data is None:
raise ValueError(
f"{event_key} {match_key} does not exist on TBA. Please use a match that exists"
)
blue_data, red_data = parse_data(match_data)
while blue_data[0] == -1 or red_data[0] == -1:
print("Waiting 1 minute for TBA to update scores")
time.sleep(60)
match_data = consts.tba.match(f"{event_key}_{match_key}")
blue_data, red_data = parse_data(match_data)
return blue_data, red_data
def parse_data(match_data):
blue = match_data["alliances"]["blue"]["team_keys"]
red = match_data["alliances"]["red"]["team_keys"]
blue_data = [match_data["alliances"]["blue"]["score"]]
red_data = [match_data["alliances"]["red"]["score"]]
for team in blue:
blue_data.append(team[3:])
for team in red:
red_data.append(team[3:])
return blue_data, red_data
def tba_results(options):
mcode = get_match_code(options.mtype, options.mnum, options.mcode)
blue_data, red_data = get_match_results(options.ecode, mcode)
return blue_data, red_data, mcode
def create_description(
options, blueScore, blue1, blue2, blue3, redScore, red1, red2, red3
):
if all(
x == -1 for x in (red1, red2, red3, redScore, blue1, blue2, blue3, blueScore)
):
return consts.NO_TBA_DESCRIPTION.format(
ename=options.ename,
team=options.prodteam,
twit=options.twit,
fb=options.fb,
weblink=options.weblink,
)
try:
return options.description.format(
ename=options.ename,
team=options.prodteam,
red1=red1,
red2=red2,
red3=red3,
redscore=redScore,
blue1=blue1,
blue2=blue2,
blue3=blue3,
bluescore=blueScore,
ecode=options.ecode,
twit=options.twit,
fb=options.fb,
weblink=options.weblink,
)
except TypeError as e:
print(e)
return options.description
def tiebreak_mnum(mnum, mtype):
switcher = {
"qm": mnum,
"qf": mnum + 8,
"sf": mnum + 4,
"f1m": 3,
}
return switcher[mtype]
"""Additional YouTube Functions"""
def update_thumbnail(video_id, thumbnail):
consts.youtube.thumbnails().set(videoId=video_id, media_body=thumbnail).execute()
print(f"Thumbnail added to video {video_id}")
def add_to_playlist(videoID, playlistID):
if type(videoID) is list: # Recursively add videos if videoID is list
for vid in videoID:
add_to_playlist(vid, playlistID)
else:
consts.youtube.playlistItems().insert(
part="snippet",
body={
"snippet": {
"playlistId": playlistID,
"resourceId": {"kind": "youtube#video", "videoId": videoID},
}
},
).execute()
print("Added to playlist")
def post_video(token, secret, match_video, event_key, loc="match_videos"):
trusted_auth = {"X-TBA-Auth-Id": "", "X-TBA-Auth-Sig": ""}
trusted_auth["X-TBA-Auth-Id"] = token
request_path = f"/api/trusted/v1/event/{event_key}/{loc}/add"
concat = secret + request_path + str(match_video)
md5 = hashlib.md5(concat.encode("utf-8")).hexdigest()
trusted_auth["X-TBA-Auth-Sig"] = str(md5)
url_str = (
f"https://www.thebluealliance.com/api/trusted/v1/event/{event_key}/{loc}/add"
)
if consts.DEBUG:
url_str = f"http://localhost:8080/api/trusted/v1/event/{event_key}/{loc}/add"
if trusted_auth["X-TBA-Auth-Id"] == "" or trusted_auth["X-TBA-Auth-Sig"] == "":
print("TBA ID and/or TBA secret missing. Please set them in the UI")
return
r = s.post(url_str, data=match_video, headers=trusted_auth)
while 405 == r.status_code:
print("Failed to POST to TBA")
print("Attempting to POST to TBA again")
r = s.post(url_str, data=match_video, headers=trusted_auth)
if r.status_code > 299:
print(r.text)
elif "Success" in r.text or r.status_code == 200:
print("Successfully added to TBA")
else:
print(r.text)
print("Something went wrong")
def upload_multiple_videos(options):
for mnum in range(options.mnum, options.end + 1):
options.mnum = mnum
options.file, options.yttitle = create_names(options)
try:
print(pre_upload(options))
except HttpError as e:
print(f"An HTTP error {e.resp.status} occurred:\n{e.content}")
def init(options):
"""The program starts here, options is a Namespace() object"""
options.day = dt.datetime.now().strftime("%A") # weekday in english ex: "Monday"
options.files = list(
reversed(
[
f
for f in os.listdir(options.where)
if os.path.isfile(os.path.join(options.where, f))
and not f.startswith(".")
]
)
)
try:
options.tags = consts.DEFAULT_TAGS.format(
options.ecode, game=consts.GAMES[options.ecode[:4]]
) # add the ecode and game to default tags
except KeyError as e:
options.tags = consts.DEFAULT_TAGS.format(
options.ecode, game=""
) # new year so just use empty string for game
print(
"This must be a new year and frcuploader doesn't know the game name, please message Nikki or whoever runs this repo at that point"
)
# default category is science & technology
options.category = 28
options.title = (
options.ename + f" - Qualification Match {options.mnum}"
) # default title
if any(
k == options.description
for k in ("Add alternate description here.", "", consts.DEFAULT_DESCRIPTION)
):
options.description = consts.DEFAULT_DESCRIPTION + consts.CREDITS
else:
options.description += consts.CREDITS
# fix types except options.end
options.ceremonies = int(options.ceremonies)
options.mnum = int(options.mnum)
options.eday = int(options.eday)
# seperate case to push to TBA
if options.ceremonies != 0:
if options.tba:
options.post = True
else:
options.post = False
options.tba = False # stupid hack to avoid grabbing match data
if options.tiebreak:
options.mnum = tiebreak_mnum(options.mnum, options.mtype)
if options.newest:
options.yttitle = create_names(options)
else:
options.file, options.yttitle = create_names(options)
if options.file is not None:
print(f"Found {options.file} to upload")
if options.end > options.mnum:
upload_multiple_videos(options)
else:
try:
print(pre_upload(options))
except HttpError as e:
print(f"An HTTP error {e.resp.status} occurred:\n{e.content}")
else:
print("No file found")
return
def pre_upload(options):
mcode = None
tags = None
if not options.ceremonies:
print(f"Initializing upload for {options.mtype} match {options.mnum}")
else:
print(f"Initializing upload for: {ceremonies_yt_title(options)}")
print(f"YouTube title is {options.yttitle}")
if len(options.yttitle) > 100:
print(f"Title Length: {len(options.yttitle)} characters")
return "Title cannot exceed 100 characters, please adjust your settings to reduce the title length"
if options.tba:
blue_data, red_data, mcode = tba_results(options)
tags = options.tags.split(",")
for team in blue_data[1:] + red_data[1:]:
tags.append(f"frc{team}")
tags.extend(options.ename.split(" "))
tags.append("frc" + re.search("\D+", options.ecode).group())
body = dict(
snippet=dict(
title=options.yttitle,
description=create_description(options, *blue_data, *red_data),
tags=tags,
categoryId=options.category,
),
status=dict(privacyStatus=options.privacy),
)
else:
mcode = get_match_code(options.mtype, options.mnum, options.mcode)
tags = options.tags.split(",")
tags.append("frc" + re.search(r"\D+", options.ecode).group())
body = dict(
snippet=dict(
title=options.yttitle,
description=create_description(options, -1, -1, -1, -1, -1, -1, -1, -1),
tags=tags,
categoryId=options.category,
),
status=dict(privacyStatus=options.privacy),
)
if options.newest:
ret, options.vid = upload(consts.youtube, body, options.file)
else:
ret, options.vid = upload(
consts.youtube, body, os.path.join(options.where, options.file)
)
return post_upload(options, mcode) if ret else "Failed to Upload\n"
def post_upload(options, mcode):
try:
if "thumbnail.png" in options.files:
update_thumbnail(options.vid, os.path.join(options.where, "thumbnail.png"))
else:
print("thumbnail.png does not exist")
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}"
except RETRIABLE_EXCEPTIONS as e:
error = f"A retriable error occurred: {e}"
try:
add_to_playlist(options.vid, options.pID)
except Exception as e:
print(e)
print("Failed to post to playlist")
if options.tba:
request_body = json.dumps({mcode: options.vid})
post_video(options.tbaID, options.tbaSecret, request_body, options.ecode)
elif options.ceremonies and options.post:
request_body = json.dumps([options.vid])
post_video(
options.tbaID, options.tbaSecret, request_body, options.ecode, "media"
)
if options.sendto:
if options.newest:
try:
print("Moving file")
shutil.move(
os.path.join(options.where, options.filebasename),
os.path.join(options.sendto, options.filebasename),
)
except Exception as e:
print(f"Could not copy to {options.sendto}")
print(e)
else:
try:
print("Moving file")
shutil.move(
os.path.join(options.where, options.file),
os.path.join(options.sendto, options.file),
)
except Exception as e:
print(f"Could not copy to {options.sendto}")
print(e)
return f"DONE UPLOADING {options.file}\n" | PypiClean |
/Gammalearn-0.11.0.tar.gz/Gammalearn-0.11.0/gammalearn/data/example_settings/experiment_settings_pointing.py | import collections
import os
import importlib
from pathlib import Path
import math
import numpy as np
import torch
from torch.optim import lr_scheduler
from torchvision import transforms
from torchmetrics.classification import Accuracy, AUROC
from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler, PyTorchProfiler
import gammalearn.criterions as criterions
import gammalearn.optimizers as optimizers
import gammalearn.steps as steps
from gammalearn.callbacks import (LogGradientNorm, LogModelWeightNorm, LogModelParameters,
LogUncertaintyLogVars, LogUncertaintyPrecisions, LogGradNormWeights,
LogReLUActivations, LogLinearGradient, LogFeatures, WriteDL2Files)
import gammalearn.utils as utils
import gammalearn.datasets as dsets
from gammalearn.data_handlers import GLearnDataModule
from gammalearn.constants import GAMMA_ID, PROTON_ID, ELECTRON_ID
import gammalearn.data.nets as nets
# Experiment settings
# TODO change directory if needed
main_directory = str(Path.home()) + '/gammalearn_experiments/'
"""str: mandatory, where the experiments are stored"""
experiment_name = 'test_install'
"""str: mandatory, the name of the experiment. Should be different
for each experiment, except if one wants to resume an old experiment
"""
info = ''
"""str: optional"""
gpus = 1
"""int or list: mandatory, the number of gpus to use. If -1, run on all GPUS,
if None/0 run on CPU. If list, run on GPUS of list.
"""
log_every_n_steps = 1
"""int: optional, the interval in term of iterations for on screen
data printing during experiment. A small value may lead to a very large log file size.
"""
window_size = 100
"""int: optional, the interval in term of stored values for metric moving computation"""
checkpointing_options = dict(every_n_epochs=1, save_top_k=-1, save_last=True)
"""dict: optional, specific options for model checkpointing.
See https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html
for details.
"""
random_seed = 1
"""int: optional, the manual seed to make experiments more reproducible"""
monitor_device = True
"""bool: optional, whether or not monitoring the gpu utilization"""
particle_dict = {GAMMA_ID: 0,
PROTON_ID: 1,
# ELECTRON_ID: 2,
}
"""particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0"""
targets = collections.OrderedDict({
'energy': {
'output_shape': 1,
'loss': torch.nn.L1Loss(reduction='none'),
'loss_weight': 1,
'metrics': {
# 'functions': ,
},
'mt_balancing': True
},
'impact': {
'output_shape': 2,
'loss': torch.nn.L1Loss(reduction='none'),
'loss_weight': 1,
'metrics': {},
'mt_balancing': True
},
'direction': {
'output_shape': 2,
'loss': torch.nn.L1Loss(reduction='none'),
'loss_weight': 1,
'metrics': {},
'mt_balancing': True
},
'class': {
'label_shape': 1,
'output_shape': len(particle_dict),
'loss': torch.nn.CrossEntropyLoss(),
'loss_weight': 1,
'metrics': {
'Accuracy_particle': Accuracy(threshold=0.5),
'AUC_particle':AUROC(pos_label=particle_dict[GAMMA_ID],
num_classes=len(particle_dict),
compute_on_step=True
)
},
'mt_balancing': True
}
})
"""dict: mandatory, defines for every objectives of the experiment
the loss function and its weight
"""
dataset_class = dsets.MemoryLSTDataset
# dataset_class = dsets.FileLSTDataset
"""Dataset: mandatory, the Dataset class to load the data. Currently 2 classes are available, MemoryLSTDataset that
loads images in memory, and FileLSTDataset that loads images from files during training.
"""
dataset_parameters = {
'camera_type': 'LST_LSTCam',
'group_by': 'image',
'use_time': True,
'particle_dict': particle_dict,
'targets': list(targets.keys()),
# 'subarray': [1],
}
"""dict: mandatory, the parameters of the dataset.
camera_type is mandatory and can be:
'LST_LSTCam', 'MST_NectarCam', 'MST_FlashCam', 'SST_ASTRICam', 'SST1M_DigiCam', 'SST_CHEC', 'MST-SCT_SCTCam'.
group_by is mandatory and can be 'image', 'event_all_tels', 'event_triggered_tels'.
particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0,
proton (101) is class 1 and electron (1) is class 2.
use_time (optional): whether or not to use time information
subarray (optional): the list of telescope ids to select as a subarray
"""
preprocessing_workers = 4
"""int: optional, the max number of workers to create dataset."""
dataloader_workers = 4
"""int: optional, the max number of workers for the data loaders. If 0, data are loaded from the main thread."""
mp_start_method = 'fork'
"""str: optional, the method to start new process in [fork, spawn]"""
# Net settings
# Uncomment following lines to import your network from an external file
# net_definition_file = utils.nets_definition_path()
# """str: mandatory, the file where to find the net definition to use"""
# # Load the network definitions module #
# spec = importlib.util.spec_from_file_location("nets", net_definition_file)
# nets = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(nets)
net_parameters_dic = {
'model': nets.GammaPhysNetPointing,
'parameters': {
'backbone': {
'model': nets.ResNetAttention,
'parameters': {
'num_layers': 3,
'init': 'kaiming',
'batch_norm': True,
# 'init': 'orthogonal',
'num_channels': 2,
'block_features': [16, 32, 64],
'attention_layer': (nets.DualAttention, {'ratio': 16}),
# 'attention_layer': (nets.SqueezeExcite, {'ratio': 4}),
# 'attention_layer': None,
'non_linearity': torch.nn.ReLU,
'output_size': (14, 14)
}
},
'fc_width': 256,
'non_linearity': torch.nn.ReLU,
'last_bias_init': None,
'targets': {k: v.get('output_shape', 0) for k, v in targets.items()}
},
}
"""dict: mandatory, the parameters of the network. Depends on the
network chosen. Must include at least a model and a parameters field.
"""
# checkpoint_path = main_directory + '/test_install/checkpoint_epoch=1.ckpt'
"""str: optional, the path where to find the backup of the model to resume"""
profiler = None
# profiler = {'profiler': SimpleProfiler,
# 'options': dict(extended=True)
# }
"""str: optional, the profiler to use"""
######################################################################################################################
train = True
"""bool: mandatory, whether or not to train the model"""
# Data settings
data_module_train = {
'module': GLearnDataModule,
'paths': [
Path(__file__).parent.absolute().joinpath(
'../../../share/data/MC_data').resolve().as_posix(),
], # TODO fill your folder path
'image_filter': {
# utils.intensity_filter: {'intensity': [50, np.inf]},
# utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3,
# 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2},
# utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3,
# 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2},
},
'event_filter': {
# utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV
# utils.emission_cone_filter: {'max_angle': 0.0698},
# utils.impact_distance_filter: {'max_distance': 200},
# utils.telescope_multiplicity_filter: {'multiplicity': 2},
},
'transform': dsets.ResampleImage('bilinear_interpolation', (55, 55, 1)),
'target_transform': None
}
"""paths->list: mandatory, the folders where to find the hdf5 data files"""
"""image_filter->dict: optional, the filter(s) to apply to the dataset at image level"""
"""event_filter->dict: optional, the filter(s) to apply to the dataset"""
validating_ratio = 0.2
"""float: mandatory, the ratio of data to create the validating set"""
max_epochs = 2
"""int: mandatory, the maximum number of epochs for the experiment"""
batch_size = 4
"""int: mandatory, the size of the mini-batch"""
# train_files_max_number = 1
"""int: optional, the max number of files to use for the dataset"""
pin_memory = True
"""bool: optional, whether or not to pin memory in dataloader"""
# Training settings
loss_options = {
'conditional': True,
'gamma_class': dataset_parameters['particle_dict'][0],
}
loss_balancing_options = {
'logvar_coeff': [2, 2, 2, 0.5], # for uncertainty
'penalty': 0, # for uncertainty
}
"""dict: mandatory, defines for every objectives of the experiment
the loss function and its weight
"""
loss_balancing = criterions.MultilossBalancing(targets, **loss_balancing_options)
"""function: mandatory, the function to compute the loss"""
optimizer_dic = {
'network': optimizers.load_adam,
'loss_balancing': optimizers.load_adam
}
"""dict: mandatory, the optimizers to use for the experiment.
One may want to use several optimizers in case of GAN for example
"""
optimizer_parameters = {
'network': {
'lr': 1e-3,
'weight_decay': 1e-4,
},
'loss_balancing': {
'lr': 0.025,
},
}
"""dict: mandatory, defines the parameters for every optimizers to use"""
# regularization = {'function': 'gradient_penalty',
# 'weight': 10}
"""dict: optional, regularization to use during the training process. See in optimizers.py for
available regularization functions. If `function` is set to 'gradient_penalty', the training step must be
`training_step_mt_gradient_penalty`."""
experiment_hparams = {
'add_pointing': True
}
training_step = steps.get_training_step_mt(**experiment_hparams)
# training_step = steps.training_step_gradnorm
# training_step = steps.training_step_mt_gradient_penalty
"""function: mandatory, the function to compute the training step"""
eval_step = steps.get_eval_step_mt(**experiment_hparams)
"""function: mandatory, the function to compute the validating step"""
check_val_every_n_epoch = 1
"""int: optional, the interval in term of epoch for validating the model"""
lr_schedulers = {
'network': {
lr_scheduler.StepLR: {
'gamma': 0.1,
'step_size': 10,
}
},
# 'network': {
# lr_scheduler.ReduceLROnPlateau: {
# 'factor': 0.1,
# 'patience': 30,
# }
# },
# 'network': {
# lr_scheduler.MultiStepLR: {
# 'gamma': 0.1,
# 'milestones': [10, 15, 18],
# }
# },
# 'network': {
# lr_scheduler.ExponentialLR: {
# 'gamma': 0.9,
# }
# },
}
"""dict: optional, defines the learning rate schedulers"""
# callbacks
training_callbacks = [
LogGradientNorm(),
LogModelWeightNorm(),
LogModelParameters(),
LogUncertaintyLogVars(),
LogUncertaintyPrecisions(),
# LogGradNormWeights(),
LogReLUActivations(),
LogLinearGradient(),
# LogFeatures(), # Do not use during training !! Very costly !!
]
"""dict: list of callbacks
"""
######################################################################################################################
# Testing settings
test = True
"""bool: mandatory, whether or not to test the model at the end of training"""
merge_test_datasets = False
"""bool: optional, whether or not to merge test datasets"""
data_module_test = {
'module': GLearnDataModule,
'paths': [
Path(__file__).parent.absolute().joinpath(
'../../../share/data/MC_data').resolve().as_posix(),
],
'image_filter': {
utils.intensity_filter: {'intensity': [10, np.inf]},
# # utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3,
# # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2},
# utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3,
# 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2},
},
'event_filter': {
# utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV
# utils.emission_cone_filter: {'max_angle': 0.0698},
# utils.impact_distance_filter: {'max_distance': 200},
# utils.telescope_multiplicity_filter: {'multiplicity': 2},
},
'transform': dsets.ResampleImage('bilinear_interpolation', (55, 55, 1))
}
"""
dict: optional, must at least contain a non-empty 'source':{'paths:[]'}
path->list of str: optional, the folders containing the hdf5 data files for the test
image_filter->dict: optional, filter(s) to apply to the test set at image level
event_filter->dict: optional, filter(s) to apply to the test set
"""
test_step = steps.get_test_step_mt(**experiment_hparams)
"""function: mandatory, the function to compute the validating step"""
dl2_path = ''
"""str: optional, path to store dl2 files"""
test_dataset_parameters = {
# 'subarray': [1],
}
"""dict: optional, the parameters of the dataset specific to the test operation."""
test_batch_size = 10
"""int: optional, the size of the mini-batch for the test"""
test_callbacks = [
WriteDL2Files()
]
"""dict: list of callbacks""" | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/localflavor/cz/forms.py | from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.cz.cz_regions import REGION_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Select, RegexField, Field
from django.utils.translation import ugettext_lazy as _
birth_number = re.compile(r'^(?P<birth>\d{6})/?(?P<id>\d{3,4})$')
ic_number = re.compile(r'^(?P<number>\d{7})(?P<check>\d)$')
class CZRegionSelect(Select):
"""
A select widget widget with list of Czech regions as choices.
"""
def __init__(self, attrs=None):
super(CZRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class CZPostalCodeField(RegexField):
"""
A form field that validates its input as Czech postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(CZPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(CZPostalCodeField, self).clean(value)
return v.replace(' ', '')
class CZBirthNumberField(Field):
"""
Czech birth number field.
"""
default_error_messages = {
'invalid_format': _('Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'),
'invalid': _('Enter a valid birth number.'),
}
def clean(self, value, gender=None):
super(CZBirthNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(birth_number, value)
if not match:
raise ValidationError(self.error_messages['invalid_format'])
birth, id = match.groupdict()['birth'], match.groupdict()['id']
# Three digits for verification number were used until 1. january 1954
if len(id) == 3:
return '%s' % value
# Birth number is in format YYMMDD. Females have month value raised by 50.
# In case that all possible number are already used (for given date),
# the month field is raised by 20.
month = int(birth[2:4])
if (not 1 <= month <= 12) and (not 21 <= month <= 32) and \
(not 51 <= month <= 62) and (not 71 <= month <= 82):
raise ValidationError(self.error_messages['invalid'])
day = int(birth[4:6])
if not (1 <= day <= 31):
raise ValidationError(self.error_messages['invalid'])
# Fourth digit has been added since 1. January 1954.
# It is modulo of dividing birth number and verification number by 11.
# If the modulo were 10, the last number was 0 (and therefore, the whole
# birth number wasn't divisable by 11. These number are no longer used (since 1985)
# and the condition 'modulo == 10' can be removed in 2085.
modulo = int(birth + id[:3]) % 11
if (modulo == int(id[-1])) or (modulo == 10 and id[-1] == '0'):
return '%s' % value
else:
raise ValidationError(self.error_messages['invalid'])
class CZICNumberField(Field):
"""
Czech IC number field.
"""
default_error_messages = {
'invalid': _('Enter a valid IC number.'),
}
def clean(self, value):
super(CZICNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(ic_number, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number, check = match.groupdict()['number'], int(match.groupdict()['check'])
sum = 0
weight = 8
for digit in number:
sum += int(digit)*weight
weight -= 1
remainder = sum % 11
# remainder is equal:
# 0 or 10: last digit is 1
# 1: last digit is 0
# in other case, last digit is 11 - remainder
if (not remainder % 10 and check == 1) or \
(remainder == 1 and check == 0) or \
(check == (11 - remainder)):
return '%s' % value
raise ValidationError(self.error_messages['invalid']) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/app-layout/app-header-layout/README.md | ##<app-header-layout>
app-header-layout is a wrapper element that positions an app-header and other content. This
element uses the document scroll by default, but it can also define its own scrolling region.
Using the document scroll:
```html
<app-header-layout>
<app-header fixed condenses effects="waterfall">
<app-toolbar>
<div main-title>App name</div>
</app-toolbar>
</app-header>
<div>
main content
</div>
</app-header-layout>
```
Using an own scrolling region:
```html
<app-header-layout has-scrolling-region style="width: 300px; height: 400px;">
<app-header fixed condenses effects="waterfall">
<app-toolbar>
<div main-title>App name</div>
</app-toolbar>
</app-header>
<div>
main content
</div>
</app-header-layout>
```
Add the `fullbleed` attribute to app-header-layout to make it fit the size of its container:
```html
<app-header-layout fullbleed>
...
</app-header-layout>
``` | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/data/formatters/datasets/gpcp_sg.py | import logging
import warnings
from pathlib import Path
import iris
from iris import NameConstraint
from esmvaltool.cmorizers.data import utilities as utils
logger = logging.getLogger(__name__)
def _fix_var_metadata(var_info, cmor_info, cube):
"""Fix variable metadata."""
if 'raw_units' in var_info:
cube.units = var_info['raw_units']
if cube.units == 'mm/day':
cube.units = 'kg m-2 day-1'
cube.convert_units(cmor_info.units)
utils.fix_var_metadata(cube, cmor_info)
return cube
def _fix_coords(cube, filepath):
"""Fix coordinates."""
utils.fix_dim_coordnames(cube)
# Bounds
# Time
time_bnds = iris.load_cube(filepath, NameConstraint(var_name='time_bnds'))
cube.coord('time').bounds = time_bnds.core_data()
# Latitude
lat_bnds = iris.load_cube(filepath, NameConstraint(var_name='lat_bnds'))
cube.coord('latitude').bounds = lat_bnds.core_data()
# Longitude
lon_bnds = iris.load_cube(filepath, NameConstraint(var_name='lon_bnds'))
cube.coord('longitude').bounds = lon_bnds.core_data()
def _extract_variable(var_info, cmor_info, attrs, filepath, out_dir):
"""Extract variable."""
var = cmor_info.short_name
raw_var = var_info.get('raw_name', var)
# Load data
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message="Skipping global attribute 'units': 'units' is not a "
"permitted attribute",
category=UserWarning,
module='iris',
)
cube = iris.load_cube(filepath, NameConstraint(var_name=raw_var))
# Fix variable metadata
cube = _fix_var_metadata(var_info, cmor_info, cube)
# Fix coordinates
_fix_coords(cube, filepath)
# Fix global metadata
utils.set_global_atts(cube, attrs)
# Save variable
utils.save_variable(
cube,
var,
out_dir,
attrs,
unlimited_dimensions=['time'],
)
def cmorization(in_dir, out_dir, cfg, cfg_user, start_date, end_date):
"""Cmorization func call."""
cmor_table = cfg['cmor_table']
glob_attrs = cfg['attributes']
# Run the cmorization
for (var, var_info) in cfg['variables'].items():
filepath = Path(in_dir) / var_info['filename']
logger.info("CMORizing variable '%s' from file %s", var, filepath)
glob_attrs['mip'] = var_info['mip']
cmor_info = cmor_table.get_variable(var_info['mip'], var)
_extract_variable(var_info, cmor_info, glob_attrs, filepath, out_dir) | PypiClean |
/JoUtil-1.3.3-py3-none-any.whl/JoTools/txkjRes/deteXml.py |
from ..utils.XmlUtil import XmlUtil
from collections import OrderedDict
# fixme 重写这个函数,速度更快
class ParseXml(object):
"""解析 xml 中的信息,将信息导出为 xml"""
def __init__(self):
self.__attrs = {"folder", "filename", "path", "segmented", "size", "source", "object", "des"} # 所有的属性
self.__xml_info_dict = OrderedDict() # xml 信息字典
self.__objects_info = []
self.__size_info = {}
self.__source_info = {}
def _parse_node(self, assign_node):
"""解析在字典中的关键字"""
node_name = assign_node.nodeName
element_info = XmlUtil.get_info_from_node(assign_node)
self.__xml_info_dict[node_name] = element_info['value']
def _parse_object(self, assign_node):
"""解析 object 中的数据"""
object_info = {}
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name not in ["bndbox", "robndbox", "#text"]:
object_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
elif node_name == "bndbox":
bndbox_info = {}
for each_node_2 in each_node.childNodes:
each_node_name = each_node_2.nodeName
if each_node_name in ["xmin", "ymin", "xmax", "ymax"]:
bndbox_info[each_node_name] = XmlUtil.get_info_from_node(each_node_2)['value']
object_info['bndbox'] = bndbox_info
elif node_name == "robndbox":
robndbox_info = {}
for each_node_2 in each_node.childNodes:
each_node_name = each_node_2.nodeName
if each_node_name in ["cx", "cy", "w", "h", "angle"]:
robndbox_info[each_node_name] = XmlUtil.get_info_from_node(each_node_2)['value']
object_info['robndbox'] = robndbox_info
self.__objects_info.append(object_info)
def _parse_size(self, assign_node):
"""解析 size 信息"""
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name in ["width", "height", "depth"]:
self.__size_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
def _parse_source(self, assign_node):
"""解析 source 信息"""
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name in ["database"]:
self.__source_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
def _parse_xml(self, xml_path):
"""解析 xml"""
root_node = XmlUtil.get_root_node(xml_path) # 得到根节点
# 遍历根节点下面的子节点
for each_node in root_node.childNodes:
node_name = each_node.nodeName
if node_name in ["folder", "filename", "path", "segmented", "des"]:
self._parse_node(each_node)
elif node_name == "source":
self._parse_source(each_node)
elif node_name == "size":
self._parse_size(each_node)
elif node_name == "object":
self._parse_object(each_node)
def set_attr_info(self, attr, info):
"""设置属性值"""
if attr not in self.__attrs:
raise ValueError("""attr should in folder, filename, path, segmented, size, source, object""")
self.__xml_info_dict[attr] = info
def update_xml_info(self, up_info):
"""更新 xml 字典信息,up_info: dict"""
for each_attr in up_info:
if each_attr not in self.__attrs:
raise ValueError("""attr should in folder, filename, path, segmented, size, source, object""")
else:
self.__xml_info_dict[each_attr] = up_info[each_attr]
def get_xml_info(self, xml_path):
# 解析 xml
self.__xml_info_dict = {"folder": None, "filename": None, "path": None, "segmented": None, "des": None}
self._parse_xml(xml_path)
# 将 xml 中的信息整理输出
self.__xml_info_dict['size'] = self.__size_info
self.__xml_info_dict['source'] = self.__source_info
self.__xml_info_dict['object'] = self.__objects_info
return self.__xml_info_dict
def save_to_xml(self, save_path, assign_xml_info=None):
"""将 xml_info 保存为 xml 形式"""
if assign_xml_info is None:
assign_xml_info = self.__xml_info_dict.copy()
# 没有值
if not assign_xml_info:
raise ValueError("xml info is empty")
# 写 xml
root = XmlUtil.get_document()
xml_calss_1 = XmlUtil.add_sub_node(root, root, 'annotation', '')
# 增加 "folder", "filename", "path", "segmented"
for attr_name in ["folder", "filename", "path", "segmented", "des"]:
XmlUtil.add_sub_node(root, xml_calss_1, attr_name, assign_xml_info[attr_name])
# 增加 source
source_node = XmlUtil.add_sub_node(root, xml_calss_1, "source", '')
for each_node in assign_xml_info["source"]:
XmlUtil.add_sub_node(root, source_node, each_node, assign_xml_info["source"][each_node])
# 增加 size
size_node = XmlUtil.add_sub_node(root, xml_calss_1, "size", '')
for each_node in assign_xml_info["size"]:
XmlUtil.add_sub_node(root, size_node, each_node, assign_xml_info["size"][each_node])
# 增加 object
for each_object in assign_xml_info["object"]:
object_node = XmlUtil.add_sub_node(root, xml_calss_1, "object", '')
for each_node in each_object:
if (each_node != "bndbox") and (each_node != "robndbox"):
XmlUtil.add_sub_node(root, object_node, each_node, each_object[each_node])
elif each_node == "bndbox":
bndbox_node = XmlUtil.add_sub_node(root, object_node, "bndbox", "")
for each_bndbox in each_object["bndbox"]:
XmlUtil.add_sub_node(root, bndbox_node, each_bndbox, each_object["bndbox"][each_bndbox])
else:
bndbox_node = XmlUtil.add_sub_node(root, object_node, "robndbox", "")
for each_bndbox in each_object["robndbox"]:
XmlUtil.add_sub_node(root, bndbox_node, each_bndbox, each_object["robndbox"][each_bndbox])
# 保存 xml 到文件
XmlUtil.save_xml(root, save_path)
def save_to_xml_wh_format(self, save_path, assign_xml_info):
"""将 xml 保存为武汉提供的格式"""
if assign_xml_info is None:
assign_xml_info = self.__xml_info_dict.copy()
# 没有值
if not assign_xml_info:
raise ValueError("xml info is empty")
# 写 xml
root = XmlUtil.get_document()
xml_calss_1 = XmlUtil.add_sub_node(root, root, 'annotation', '')
# 增加 "folder", "filename", "path", "segmented"
for attr_name in ["filename"]:
XmlUtil.add_sub_node(root, xml_calss_1, attr_name, assign_xml_info[attr_name])
# 增加 size
size_node = XmlUtil.add_sub_node(root, xml_calss_1, "size", '')
XmlUtil.add_sub_node(root, xml_calss_1, "objectsum", str(len(assign_xml_info["object"])))
XmlUtil.add_sub_node(root, size_node, "width", str(int(float(assign_xml_info["size"]["width"]))))
XmlUtil.add_sub_node(root, size_node, "height", assign_xml_info["size"]["height"])
XmlUtil.add_sub_node(root, size_node, "depth", assign_xml_info["size"]["depth"])
# 增加 object
index = 0
for each_object in assign_xml_info["object"]:
index += 1
object_node = XmlUtil.add_sub_node(root, xml_calss_1, "object", '')
XmlUtil.add_sub_node(root, object_node, "Serial", str(index))
XmlUtil.add_sub_node(root, object_node, "code", each_object["name"])
bndbox_node = XmlUtil.add_sub_node(root, object_node, "bndbox", "")
XmlUtil.add_sub_node(root, bndbox_node, "xmin", each_object["bndbox"]["xmin"])
XmlUtil.add_sub_node(root, bndbox_node, "ymin", each_object["bndbox"]["ymin"])
XmlUtil.add_sub_node(root, bndbox_node, "xmax", each_object["bndbox"]["xmax"])
XmlUtil.add_sub_node(root, bndbox_node, "ymax", each_object["bndbox"]["ymax"])
# 保存 xml 到文件
XmlUtil.save_xml(root, save_path)
def parse_xml(xml_path):
"""简易的函数使用版本"""
a = ParseXml()
xml_info = a.get_xml_info(xml_path)
return xml_info
def save_to_xml(xml_info, xml_path):
"""保存为 xml"""
a = ParseXml()
a.save_to_xml(save_path=xml_path, assign_xml_info=xml_info)
def save_to_xml_wh_format(xml_info, xml_path):
"""按照武汉的格式保存 xml """
a = ParseXml()
a.save_to_xml_wh_format(save_path=xml_path, assign_xml_info=xml_info)
def parse_xml_as_txt(xml_path):
"""使用读取存文本的方式读取 xml """
def parse_assign_line(each_xml_line, assign_tag):
"""解析指定行中的指定标签"""
return each_xml_line.strip()[len(assign_tag) + 2: -len(assign_tag) - 3]
xml_info = {'size': {'height': -1, 'width': -1, 'depth': -1},
'filename': '', 'path': '', 'object': [], 'folder': '',
'segmented': '', 'source': ''}
with open(xml_path, 'r', encoding='utf-8') as xml_file:
each_line = next(xml_file)
while each_line:
each_line = each_line.strip()
if each_line.startswith('<filename>'):
xml_info['filename'] = parse_assign_line(each_line, 'filename')
elif each_line.startswith('<folder>'):
xml_info['folder'] = parse_assign_line(each_line, 'folder')
elif each_line.startswith('<height>'):
xml_info['size']['height'] = float(parse_assign_line(each_line, 'height'))
elif each_line.startswith('<width>'):
xml_info['size']['width'] = float(parse_assign_line(each_line, 'width'))
elif each_line.startswith('<depth>'):
# xml_info['size']['depth'] = float(parse_assign_line(each_line, 'depth'))
xml_info['size']['depth'] = 3
elif each_line.startswith('<path>'):
xml_info['path'] = parse_assign_line(each_line, 'path')
elif each_line.startswith('<segmented>'):
xml_info['segmented'] = parse_assign_line(each_line, 'segmented')
elif each_line.startswith('<source>'):
xml_info['source'] = parse_assign_line(each_line, 'source')
elif each_line.startswith('<object>'):
each_obj = {'name': '', 'prob': -1, 'id':-1, 'des':'','crop_path':'',
'bndbox': {'xmin': -1, 'xmax': -1, 'ymin': -1, 'ymax': -1}}
while True:
each_line = next(xml_file)
each_line = each_line.strip()
if each_line.startswith('</object>'):
xml_info['object'].append(each_obj)
break
elif each_line.startswith('<name>'):
each_obj['name'] = parse_assign_line(each_line, 'name')
elif each_line.startswith('<prob>'):
each_obj['prob'] = float(parse_assign_line(each_line, 'prob'))
elif each_line.startswith('<id>'):
each_obj['id'] = float(parse_assign_line(each_line, 'id'))
elif each_line.startswith('<des>'):
each_obj['des'] = parse_assign_line(each_line, 'des')
elif each_line.startswith('<crop_path>'):
each_obj['crop_path'] = parse_assign_line(each_line, 'crop_path')
elif each_line.startswith('<xmin>'):
each_obj['bndbox']['xmin'] = float(parse_assign_line(each_line, 'xmin'))
elif each_line.startswith('<xmax>'):
each_obj['bndbox']['xmax'] = float(parse_assign_line(each_line, 'xmax'))
elif each_line.startswith('<ymin>'):
each_obj['bndbox']['ymin'] = float(parse_assign_line(each_line, 'ymin'))
elif each_line.startswith('<ymax>'):
each_obj['bndbox']['ymax'] = float(parse_assign_line(each_line, 'ymax'))
elif each_line.startswith('</annotation>'):
return xml_info
each_line = next(xml_file) | PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/esmvalcore/_recipe/check.py | from __future__ import annotations
import logging
import os
import re
import subprocess
from pprint import pformat
from shutil import which
from typing import Any, Iterable
import isodate
import yamale
from esmvalcore.exceptions import InputFilesNotFound, RecipeError
from esmvalcore.local import _get_start_end_year, _parse_period
from esmvalcore.preprocessor import TIME_PREPROCESSORS, PreprocessingTask
from esmvalcore.preprocessor._multimodel import STATISTIC_MAPPING
from esmvalcore.preprocessor._supplementary_vars import (
PREPROCESSOR_SUPPLEMENTARIES,
)
logger = logging.getLogger(__name__)
def ncl_version():
"""Check the NCL version."""
ncl = which('ncl')
if not ncl:
raise RecipeError("Recipe contains NCL scripts, but cannot find "
"an NCL installation.")
try:
cmd = [ncl, '-V']
version = subprocess.check_output(cmd, universal_newlines=True)
except subprocess.CalledProcessError:
logger.error("Failed to execute '%s'", ' '.join(' '.join(cmd)))
raise RecipeError("Recipe contains NCL scripts, but your NCL "
"installation appears to be broken.")
version = version.strip()
logger.info("Found NCL version %s", version)
major, minor = (int(i) for i in version.split('.')[:2])
if major < 6 or (major == 6 and minor < 4):
raise RecipeError("NCL version 6.4 or higher is required to run "
"a recipe containing NCL scripts.")
def recipe_with_schema(filename):
"""Check if the recipe content matches schema."""
schema_file = os.path.join(os.path.dirname(__file__), 'recipe_schema.yml')
logger.debug("Checking recipe against schema %s", schema_file)
recipe = yamale.make_data(filename)
schema = yamale.make_schema(schema_file)
yamale.validate(schema, recipe, strict=False)
def diagnostics(diags):
"""Check diagnostics in recipe."""
if diags is None:
raise RecipeError('The given recipe does not have any diagnostic.')
for name, diagnostic in diags.items():
if 'scripts' not in diagnostic:
raise RecipeError(
"Missing scripts section in diagnostic {}".format(name))
variable_names = tuple(diagnostic.get('variables', {}))
scripts = diagnostic.get('scripts')
if scripts is None:
scripts = {}
for script_name, script in scripts.items():
if script_name in variable_names:
raise RecipeError(
"Invalid script name {} encountered in diagnostic {}: "
"scripts cannot have the same name as variables.".format(
script_name, name))
if not script.get('script'):
raise RecipeError(
"No script defined for script {} in diagnostic {}".format(
script_name, name))
def duplicate_datasets(
datasets: list[dict[str, Any]],
diagnostic: str,
variable_group: str,
) -> None:
"""Check for duplicate datasets."""
if not datasets:
raise RecipeError(
"You have not specified any dataset or additional_dataset groups "
f"for variable {variable_group} in diagnostic {diagnostic}.")
checked_datasets_ = []
for dataset in datasets:
if dataset in checked_datasets_:
raise RecipeError(
f"Duplicate dataset {dataset} for variable {variable_group} "
f"in diagnostic {diagnostic}.")
checked_datasets_.append(dataset)
def variable(var: dict[str, Any], required_keys: Iterable[str]):
"""Check variables as derived from recipe."""
required = set(required_keys)
missing = required - set(var)
if missing:
raise RecipeError(
f"Missing keys {missing} in\n"
f"{pformat(var)}\n"
"for variable {var['variable_group']} in diagnostic "
f"{var['diagnostic']}")
def _log_data_availability_errors(dataset):
"""Check if the required input data is available."""
input_files = dataset.files
patterns = dataset._file_globs
if not input_files:
logger.error("No input files found for %s", dataset)
if patterns:
if len(patterns) == 1:
msg = f': {patterns[0]}'
else:
msg = '\n{}'.format('\n'.join(str(p) for p in patterns))
logger.error("Looked for files matching%s", msg)
logger.error("Set 'log_level' to 'debug' to get more information")
def _group_years(years):
"""Group an iterable of years into easy to read text.
Example
-------
[1990, 1991, 1992, 1993, 2000] -> "1990-1993, 2000"
"""
years = sorted(years)
year = years[0]
previous_year = year
starts = [year]
ends = []
for year in years[1:]:
if year != previous_year + 1:
starts.append(year)
ends.append(previous_year)
previous_year = year
ends.append(year)
ranges = []
for start, end in zip(starts, ends):
ranges.append(f"{start}" if start == end else f"{start}-{end}")
return ", ".join(ranges)
def data_availability(dataset, log=True):
"""Check if input_files cover the required years."""
input_files = dataset.files
facets = dataset.facets
if log:
_log_data_availability_errors(dataset)
if not input_files:
raise InputFilesNotFound(f"Missing data for {dataset.summary(True)}")
if 'timerange' not in facets:
return
start_date, end_date = _parse_period(facets['timerange'])
start_year = int(start_date[0:4])
end_year = int(end_date[0:4])
required_years = set(range(start_year, end_year + 1, 1))
available_years = set()
for file in input_files:
start, end = _get_start_end_year(file)
available_years.update(range(start, end + 1))
missing_years = required_years - available_years
if missing_years:
missing_txt = _group_years(missing_years)
raise InputFilesNotFound(
"No input data available for years {} in files:\n{}".format(
missing_txt, "\n".join(str(f) for f in input_files)))
def preprocessor_supplementaries(dataset, settings):
"""Check that the required supplementary variables have been added."""
steps = [step for step in settings if step in PREPROCESSOR_SUPPLEMENTARIES]
supplementaries = {d.facets['short_name'] for d in dataset.supplementaries}
for step in steps:
ancs = PREPROCESSOR_SUPPLEMENTARIES[step]
for short_name in ancs['variables']:
if short_name in supplementaries:
break
else:
if ancs['required'] == "require_at_least_one":
raise RecipeError(
f"Preprocessor function {step} requires that at least "
f"one supplementary variable of {ancs['variables']} is "
f"defined in the recipe for {dataset}.")
if ancs['required'] == "prefer_at_least_one":
logger.warning(
"Preprocessor function %s works best when at least "
"one supplementary variable of %s is defined in the "
"recipe for %s.",
step,
ancs['variables'],
dataset,
)
def tasks_valid(tasks):
"""Check that tasks are consistent."""
filenames = set()
msg = "Duplicate preprocessor filename {}, please file a bug report."
for task in tasks.flatten():
if isinstance(task, PreprocessingTask):
for product in task.products:
if product.filename in filenames:
raise ValueError(msg.format(product.filename))
filenames.add(product.filename)
def check_for_temporal_preprocs(profile):
"""Check for temporal operations on fx variables."""
temp_preprocs = [
preproc for preproc in profile
if profile[preproc] and preproc in TIME_PREPROCESSORS
]
if temp_preprocs:
raise RecipeError(
"Time coordinate preprocessor step(s) {} not permitted on fx "
"vars, please remove them from recipe".format(temp_preprocs))
def extract_shape(settings):
"""Check that `extract_shape` arguments are valid."""
shapefile = settings.get('shapefile', '')
if not os.path.exists(shapefile):
raise RecipeError("In preprocessor function `extract_shape`: "
f"Unable to find 'shapefile: {shapefile}'")
valid = {
'method': {'contains', 'representative'},
'crop': {True, False},
'decomposed': {True, False},
}
for key in valid:
value = settings.get(key)
if not (value is None or value in valid[key]):
raise RecipeError(
f"In preprocessor function `extract_shape`: Invalid value "
f"'{value}' for argument '{key}', choose from "
"{}".format(', '.join(f"'{k}'".lower() for k in valid[key])))
def _verify_statistics(statistics, step):
"""Raise error if multi-model statistics cannot be verified."""
valid_names = ['std'] + list(STATISTIC_MAPPING.keys())
valid_patterns = [r"^(p\d{1,2})(\.\d*)?$"]
for statistic in statistics:
if not (statistic in valid_names
or re.match(r'|'.join(valid_patterns), statistic)):
raise RecipeError(
"Invalid value encountered for `statistic` in preprocessor "
f"{step}. Valid values are {valid_names} "
f"or patterns matching {valid_patterns}. Got '{statistic}'.")
def _verify_span_value(span):
"""Raise error if span argument cannot be verified."""
valid_names = ('overlap', 'full')
if span not in valid_names:
raise RecipeError(
"Invalid value encountered for `span` in preprocessor "
f"`multi_model_statistics`. Valid values are {valid_names}."
f"Got {span}.")
def _verify_groupby(groupby):
"""Raise error if groupby arguments cannot be verified."""
if not isinstance(groupby, list):
raise RecipeError(
"Invalid value encountered for `groupby` in preprocessor "
"`multi_model_statistics`.`groupby` must be defined as a "
f"list. Got {groupby}.")
def _verify_keep_input_datasets(keep_input_datasets):
if not isinstance(keep_input_datasets, bool):
raise RecipeError(
f"Invalid value encountered for `keep_input_datasets`."
f"Must be defined as a boolean (true or false). "
f"Got {keep_input_datasets}.")
def _verify_ignore_scalar_coords(ignore_scalar_coords):
if not isinstance(ignore_scalar_coords, bool):
raise RecipeError(
f"Invalid value encountered for `ignore_scalar_coords`."
f"Must be defined as a boolean (true or false). Got "
f"{ignore_scalar_coords}.")
def _verify_arguments(given, expected):
"""Raise error if arguments cannot be verified."""
for key in given:
if key not in expected:
raise RecipeError(
f"Unexpected keyword argument encountered: {key}. Valid "
f"keywords are: {expected}.")
def multimodel_statistics_preproc(settings):
"""Check that the multi-model settings are valid."""
valid_keys = [
'groupby',
'ignore_scalar_coords',
'keep_input_datasets',
'span',
'statistics',
]
_verify_arguments(settings.keys(), valid_keys)
span = settings.get('span', None) # optional, default: overlap
if span:
_verify_span_value(span)
groupby = settings.get('groupby', None) # optional, default: None
if groupby:
_verify_groupby(groupby)
statistics = settings.get('statistics', None) # required
if statistics:
_verify_statistics(statistics, 'multi_model_statistics')
keep_input_datasets = settings.get('keep_input_datasets', True)
_verify_keep_input_datasets(keep_input_datasets)
ignore_scalar_coords = settings.get('ignore_scalar_coords', False)
_verify_ignore_scalar_coords(ignore_scalar_coords)
def ensemble_statistics_preproc(settings):
"""Check that the ensemble settings are valid."""
valid_keys = [
'ignore_scalar_coords',
'span',
'statistics',
]
_verify_arguments(settings.keys(), valid_keys)
span = settings.get('span', 'overlap') # optional, default: overlap
if span:
_verify_span_value(span)
statistics = settings.get('statistics', None)
if statistics:
_verify_statistics(statistics, 'ensemble_statistics')
ignore_scalar_coords = settings.get('ignore_scalar_coords', False)
_verify_ignore_scalar_coords(ignore_scalar_coords)
def _check_delimiter(timerange):
if len(timerange) != 2:
raise RecipeError("Invalid value encountered for `timerange`. "
"Valid values must be separated by `/`. "
f"Got {timerange} instead.")
def _check_duration_periods(timerange):
try:
isodate.parse_duration(timerange[0])
except ValueError:
pass
else:
try:
isodate.parse_duration(timerange[1])
except ValueError:
pass
else:
raise RecipeError("Invalid value encountered for `timerange`. "
"Cannot set both the beginning and the end "
"as duration periods.")
def _check_format_years(date):
if date != '*' and not date.startswith('P'):
if len(date) < 4:
date = date.zfill(4)
return date
def _check_timerange_values(date, timerange):
try:
isodate.parse_date(date)
except ValueError:
try:
isodate.parse_duration(date)
except ValueError as exc:
if date != '*':
raise RecipeError("Invalid value encountered for `timerange`. "
"Valid value must follow ISO 8601 standard "
"for dates and duration periods, or be "
"set to '*' to load available years. "
f"Got {timerange} instead.") from exc
def valid_time_selection(timerange):
"""Check that `timerange` tag is well defined."""
if timerange != '*':
timerange = timerange.split('/')
_check_delimiter(timerange)
_check_duration_periods(timerange)
for date in timerange:
date = _check_format_years(date)
_check_timerange_values(date, timerange)
def differing_timeranges(timeranges, required_vars):
"""Log error if required variables have differing timeranges."""
if len(timeranges) > 1:
raise ValueError(
f"Differing timeranges with values {timeranges} "
f"found for required variables {required_vars}. "
"Set `timerange` to a common value.")
def reference_for_bias_preproc(products):
"""Check that exactly one reference dataset for bias preproc is given."""
step = 'bias'
products = {p for p in products if step in p.settings}
if not products:
return
# Check that exactly one dataset contains the facet ``reference_for_bias:
# true``
reference_products = []
for product in products:
if product.attributes.get('reference_for_bias', False):
reference_products.append(product)
if len(reference_products) != 1:
products_str = [p.filename for p in products]
if not reference_products:
ref_products_str = ". "
else:
ref_products_str = [p.filename for p in reference_products]
ref_products_str = f":\n{pformat(ref_products_str)}.\n"
raise RecipeError(
f"Expected exactly 1 dataset with 'reference_for_bias: true' in "
f"products\n{pformat(products_str)},\nfound "
f"{len(reference_products):d}{ref_products_str}Please also "
f"ensure that the reference dataset is not excluded with the "
f"'exclude' option") | PypiClean |
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/apex/apex/pyprof/prof/softmax.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Softmax(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch.nn.functional")
assert (op == "softmax")
#Filter out named parameters
args = list(filter(lambda x : x['name'] == '', args))
assert (len(args) <= 2)
self.shape = args[0]['shape']
self.type = args[0]['dtype']
self.dir = d.dir
return
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def elems(self):
return Utility.numElems(self.shape)
def flops(self):
# Note: exp, sum-reduce, divide
#flops = elems * 3
return 0
def bytes(self):
b = self.elems() * Utility.typeToBytes(self.type)
b *= 3 if self.dir == "fprop" else 5 #verify
return b
class LogSoftmax(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch.nn.functional")
assert (op == "log_softmax")
#Filter out named parameters
args = list(filter(lambda x : x['name'] == '', args))
assert (len(args) <= 2)
#Get input
if (args[0]['name'] == ""):
i = args[0]
else:
i = list(filter(lambda x : x['name'] == "input", args))[0]
t = i['dtype']
self.shape = i['shape']
self.type = i['dtype']
self.dir = d.dir
return
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def elems(self):
return Utility.numElems(self.shape)
def flops(self):
# Note: exp, sum-reduce, divide, log
#flops = elems * 4
return 0
def bytes(self):
b = self.elems() * Utility.typeToBytes(self.type)
b *= 3 if self.dir == "fprop" else 5 #verify
return b | PypiClean |
/BlueWhale3-Timeseries-0.3.13.tar.gz/BlueWhale3-Timeseries-0.3.13/doc/widgets/var.md | VAR Model
=========
Model the time series using [vector autoregression (VAR) model](https://en.wikipedia.org/wiki/Vector_autoregression).
**Inputs**
- Time series: Time series as output by [As Timeseries](as_timeseries.md) widget.
**Outputs**
- Time series model: The VAR model fitted to input time series.
- Forecast: The forecast time series.
- Fitted values: The values that the model was actually fitted to, equals to *original values - residuals*.
- Residuals: The errors the model made at each step.
Using this widget, you can model the time series using VAR model.
![](images/var-model-stamped.png)
1. Model's name. By default, the name is derived from the model and its parameters.
2. Desired model order (number of parameters).
3. If other than *None*, optimize the number of model parameters (up to the value selected in (2)) with the selected information criterion (one of: AIC, BIC, HQIC, FPE, or a mix thereof).
4. Choose this option to add additional "trend" columns to the data:
- *Constant*: a single column of ones is added
- *Constant and linear*: a column of ones and a column of linearly increasing numbers are added
- *Constant, linear and quadratic*: an additional column of quadratics is added
5. Number of forecast steps the model should output, along with the desired confidence intervals values at each step.
Example
-------
![](images/line-chart-ex1.png)
#### See also
[ARIMA Model](arima.md), [Model Evaluation](model_evaluation_w.md)
| PypiClean |
/M-LOOP-3.3.3.tar.gz/M-LOOP-3.3.3/mloop/launchers.py | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import mloop.utilities as mlu
import mloop.controllers as mlc
import mloop.interfaces as mli
import mloop.visualizations as mlv
def launch_from_file(config_filename,
**kwargs):
'''
Launch M-LOOP using a configuration file. See configuration file documentation.
Args:
config_filename (str): Filename of configuration file
**kwargs : keywords that override the keywords in the file.
Returns:
controller (Controller): Controller for optimization.
'''
try:
file_kwargs = mlu.get_dict_from_file(config_filename,'txt')
except (IOError, OSError):
print('Unable to open M-LOOP configuration file:' + repr(config_filename))
raise
file_kwargs.update(kwargs)
#Main run sequence
#Create interface and extract unused keywords
interface = mli.create_interface(**file_kwargs)
file_kwargs = interface.remaining_kwargs
#Create controller and extract unused keywords
controller = mlc.create_controller(interface, **file_kwargs)
file_kwargs = controller.remaining_kwargs
#Extract keywords for post processing extras, and raise an error if any keywords were unused.
extras_kwargs = _pop_extras_kwargs(file_kwargs)
if file_kwargs:
logging.getLogger(__name__).error('Unused extra options provided:' + repr(file_kwargs))
raise ValueError
#Run the actual optimization
controller.optimize()
#Launch post processing extras
launch_extras(controller, **extras_kwargs)
return controller
def launch_extras(controller,visualizations=True, **kwargs):
'''
Launch post optimization extras. Including visualizations.
Keyword Args:
visualizations (Optional [bool]): If true run default visualizations for the controller. Default false.
'''
if visualizations:
mlv.show_all_default_visualizations(controller)
def _pop_extras_kwargs(kwargs):
'''
Remove the keywords used in the extras section (if present), and return them.
Returns:
tuple made of (extras_kwargs, kwargs), where extras_kwargs are keywords for the extras and kwargs are the others that were provided.
'''
extras_kwargs={}
if 'visualizations' in kwargs:
extras_kwargs['visualizations'] = kwargs.pop('visualizations')
return extras_kwargs | PypiClean |
/DTMC/spatialModel/PeriodicMovement/periodicSEIRDV.py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from math import cos, sin, pi
from ..randomMovement.randMoveSEIRDV import RandMoveSEIRDV
from Eir.utility import Person2 as Person
from Eir.utility import randEvent, dist
from Eir.DTMC.spatialModel.simul_details import Simul_Details
class PeriodicSEIRDV(RandMoveSEIRDV):
def __init__(self, S0:int, E0:int, I0:int, R0:int, V0: int, rho: float, gamma: float, mu: float, eta:float, planeSize: float, move_r: float, sigma_R: float,
spread_r: float, sigma_r: float, days:int, w0=1.0, alpha=2.0, timeDelay=-1, k=5, std=pi/2):
self.floatCheck(k, std)
self.negValCheck(k, std)
super().__init__(S0=S0, E0=E0, I0=I0, R0=R0, V0=V0, rho=rho, gamma=gamma, mu=mu, eta=eta, planeSize=planeSize, move_r=move_r, sigma_R=sigma_R, spread_r=spread_r, sigma_r=sigma_r,
days=days, w0=w0, alpha=alpha, timeDelay=timeDelay)
self.k, self.std = k, std
# create data structures
self.Scollect, self.Ecollect, self.Icollect, self.Rcollect, self.Vcollect, self.Dcollect = [], [], [], [], [], []
loc_x, loc_y, spreading_r, mvnt_r = np.random.random(self.popsize)*planeSize, np.random.random(self.popsize)*planeSize, np.random.normal(spread_r, sigma_r, self.popsize), np.random.normal(2*pi/k, std, self.popsize)
# create the Simul_Details object
self.details = Simul_Details(days=self.days, popsize=self.popsize)
for i in range(self.popsize):
# generate theta for all copies in the data structure
theta = np.random.normal(2*pi/k, std)
persons = []
for j in range(6):
persons.append(Person(loc_x[i], loc_y[i], mvnt_r[i], spreading_r[i], theta=theta))
if i < S0:
persons[0].isIncluded=True
self.details.addStateChange(i, "S", 0)
elif i< S0 + E0:
persons[1].isIncluded=True
self.details.addStateChange(i, "E", 0)
elif i< S0 + E0 + I0:
persons[2].isIncluded=True
self.details.addStateChange(i, "I", 0)
elif i< S0 + E0 + I0 + R0:
persons[3].isIncluded=True
self.details.addStateChange(i, "R", 0)
else:
persons[4].isIncluded=True
self.details.addStateChange(i, "V", 0)
self.Scollect.append(persons[0])
self.Ecollect.append(persons[1])
self.Icollect.append(persons[2])
self.Rcollect.append(persons[3])
self.Vcollect.append(persons[4])
self.Dcollect.append(persons[5])
self.details.addLocation(0, (persons[0].x, persons[0].y))
# eventually do it for every person in each collection array; will be implemented in the sublcasses
def _move(self, day: int, collects: list):
"""
Responsible for moving the locations of each Person in the simulation. Does it in place.
Parameters
----------
day: int
The current day that the move is taking place on. Is important for the Simul_Details() object in order to keep track of the movement patterns each day.
collect: list
Contains all of the collection data structures that will be cycled through for the moves. This allows for easy object-oriented design.
"""
# generate the random thetas from a normal distribution
thetas = np.random.normal(2*pi/self.k, self.std, self.popsize)
for index, person in enumerate(collects[0]):
# adjust the theta current theta values in the object
collects[0][index].theta += thetas[index]
# adjust the x,y coordinate using polar coordinates
# conduct the boundary check at the same time
x = self._boundaryCheck(person.h + person.R * cos(collects[0][index].theta))
y = self._boundaryCheck(person.k + person.R * sin(collects[0][index].theta))
# add the new location to the Simul_Details object
self.details.addLocation(day, (x,y))
# change the x, y coordinates of every copy of person index in the other collections
for j, collect in enumerate(collects):
collects[j][index].x = x
collects[j][index].y = y
collects[j][index].theta += thetas[index]
def plot(self):
t = np.linspace(0,self.days, self.days+1)
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(nrows=6, sharex="all")
ax1.set_title("Periodic Movement SEIRDV")
ax6.set_xlabel("Days")
ax1.set_ylabel('# Susceptibles')
ax1.plot(t, self.S, label="Susceptibles")
ax2.set_ylabel("# Exposed")
ax2.plot(t, self.E, label="Exposed")
ax3.set_ylabel("# Infected")
ax3.plot(t, self.I, label="Infected")
ax4.set_ylabel("# Recovered")
ax4.plot(t, self.R, label="Recovered")
ax5.set_ylabel("# Dead")
ax5.plot(t, self.D, label='Dead')
ax6.set_ylabel("# Vaccinated")
ax6.plot(t, self.V, label="Vaccinated")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
ax5.legend()
ax6.legend()
plt.show() | PypiClean |
/2Keys-0.5.1.tar.gz/2Keys-0.5.1/twokeys/cli/cli.py | # CLI for 2Keys
# I'm just making my own since that's easier for me to understand
import click
import sys
from ..watcher import Keyboard
from ..util import Logger, load_config, constants
from ..add_keyboard import gen_async_handler, add_keyboard
from ..init import init as init_cli
from ..sync import sync_config
from ..daemon import generate_daemon
logger = Logger("cli")
@click.group()
@click.option("--debug", is_flag=True, help="Enable debugging")
@click.option("--silent", is_flag=True, help="Don't log")
def cli(debug, silent):
return
@cli.command()
@click.option("--address", "-a", help="Specify the IPv4 address of the server")
@click.option("--port", "-p", help="Specify the port the server is running on")
@click.option("--no-path-request", is_flag=True, help="Don't run the interactive keyboard detector (assumes all /dev/input/ paths have already been put into the config on the server)")
def init(address, port, no_path_request):
init_cli(address=address, port=port, no_path_request=no_path_request)
@cli.command()
@click.option("-y", "--yes", is_flag=True, help="Don't ask for prompts")
def sync(yes):
logger.warn("This will overwrite the copy of the config on the detector. Proceed? [Y/n]")
proceed = ""
if yes:
logger.warn("-y flag was given. Proceeding...")
proceed = "y"
else:
# ASK
proceed = input("").lower()
# DO IT
if proceed == "y":
sync_config()
@cli.command()
@click.argument("keyboard", default="")
@click.option(
"--inputs-path",
"-i",
help="Provide an alternative path to use as the source of keyboard input 'files' (default: /dev/input/by-id)",
default=constants.KEYBOARDS_PATH_BASE
)
def add(keyboard, inputs_path):
add_keyboard(keyboard, gen_async_handler, inputs_path)
@cli.command()
@click.argument("keyboard")
@click.option("-n", "--no-lock", is_flag=True, help="Don't lock the keyboard")
def watch(keyboard, no_lock):
if keyboard == "":
logger.err("Please provide a keyboard to watch.")
exit()
# Keyboard specified, watch it
config = load_config()
keyboard = Keyboard(config["keyboards"][keyboard], keyboard)
if not no_lock:
try:
keyboard.lock() # Grabs keyboard
keyboard.watch_keyboard()
except (KeyboardInterrupt, SystemExit, OSError):
keyboard.unlock()
exit(0)
else:
keyboard.watch_keyboard()
# Command to generate daemons
@cli.command()
@click.argument("keyboards", nargs=-1, required=False)
def daemon_gen(keyboards):
logger.info("Generating daemon files...")
config = load_config()
keyboard_list = config["keyboards"].keys()
if keyboards != ():
# Use args instead
keyboard_list = keyboards
generate_daemon(config["name"], config["keyboards"].keys()) | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/rocket/entry/rocket_landing_onppo_main_v2.py | import os
import torch
import gym
import numpy as np
from tensorboardX import SummaryWriter
from rocket_recycling.rocket import Rocket
from ditk import logging
from ding.model import VAC
from ding.policy import PPOPolicy
from ding.envs import DingEnvWrapper, BaseEnvManagerV2, EvalEpisodeReturnEnv
from ding.config import compile_config
from ding.framework import task
from ding.framework.context import OnlineRLContext
from ding.framework.middleware import multistep_trainer, StepCollector, interaction_evaluator, CkptSaver, \
gae_estimator, termination_checker
from ding.utils import set_pkg_seed
from dizoo.rocket.config.rocket_landing_ppo_config import main_config, create_config
class RocketLandingWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self._observation_space = gym.spaces.Box(low=float("-inf"), high=float("inf"), shape=(8, ), dtype=np.float32)
self._action_space = gym.spaces.Discrete(9)
self._action_space.seed(0) # default seed
self.reward_range = (float('-inf'), float('inf'))
def wrapped_rocket_env(task, max_steps):
return DingEnvWrapper(
Rocket(task=task, max_steps=max_steps),
cfg={'env_wrapper': [
lambda env: RocketLandingWrapper(env),
lambda env: EvalEpisodeReturnEnv(env),
]}
)
def main():
logging.getLogger().setLevel(logging.INFO)
main_config.exp_name = 'rocket_landing_ppo_nseed'
main_config.policy.cuda = True
print('torch.cuda.is_available(): ', torch.cuda.is_available())
cfg = compile_config(main_config, create_cfg=create_config, auto=True)
num_seed = 4
for seed_i in range(num_seed):
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'seed' + str(seed_i)))
with task.start(async_mode=False, ctx=OnlineRLContext()):
collector_env = BaseEnvManagerV2(
env_fn=[
lambda: wrapped_rocket_env(cfg.env.task, cfg.env.max_steps)
for _ in range(cfg.env.collector_env_num)
],
cfg=cfg.env.manager
)
evaluator_env = BaseEnvManagerV2(
env_fn=[
lambda: wrapped_rocket_env(cfg.env.task, cfg.env.max_steps)
for _ in range(cfg.env.evaluator_env_num)
],
cfg=cfg.env.manager
)
# evaluator_env.enable_save_replay()
set_pkg_seed(seed_i, use_cuda=cfg.policy.cuda)
model = VAC(**cfg.policy.model)
policy = PPOPolicy(cfg.policy, model=model)
def _add_scalar(ctx):
if ctx.eval_value != -np.inf:
tb_logger.add_scalar('evaluator_step/reward', ctx.eval_value, global_step=ctx.env_step)
collector_rewards = [ctx.trajectories[i]['reward'] for i in range(len(ctx.trajectories))]
collector_mean_reward = sum(collector_rewards) / len(ctx.trajectories)
collector_max_reward = max(collector_rewards)
collector_min_reward = min(collector_rewards)
tb_logger.add_scalar('collecter_step/mean_reward', collector_mean_reward, global_step=ctx.env_step)
tb_logger.add_scalar('collecter_step/max_reward', collector_max_reward, global_step=ctx.env_step)
tb_logger.add_scalar('collecter_step/min_reward', collector_min_reward, global_step=ctx.env_step)
task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env))
task.use(StepCollector(cfg, policy.collect_mode, collector_env))
task.use(gae_estimator(cfg, policy.collect_mode))
task.use(multistep_trainer(cfg, policy.learn_mode))
task.use(CkptSaver(policy, cfg.exp_name, train_freq=100))
# task.use(_add_scalar)
task.use(termination_checker(max_env_step=int(3e6)))
task.run()
if __name__ == "__main__":
main() | PypiClean |
/ChadBot6-0.1-py3-none-any.whl/ChadBot/generation/rajat_work/qgen/generator/eda.py |
import random
import re
from random import shuffle
from nltk.corpus import wordnet
from tqdm import tqdm
from .base import BaseGenerator
random.seed(42)
STOP_WORDS = ['i', 'me', 'my', 'myself', 'we', 'our',
'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who',
'whom', 'this', 'that', 'these', 'those', 'am',
'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did',
'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at',
'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when',
'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don',
'should', 'now', '']
class EDAGenerator(BaseGenerator):
""" Generate questions via Easy Data Augmentation Techniques (Reference: https://arxiv.org/abs/1901.11196). """
def __init__(self, alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=9):
"""
:param alpha_sr: ratio of words to be replaced by synonyms
:param alpha_ri: ratio of words to be inserted
:param alpha_rs: ratio of words to be swapped
:param p_rd: probability that a word will be deleted
:param num_aug: number of augmentations
"""
super().__init__("Easy Data Augmentation Techniques")
self.alpha_sr = alpha_sr
self.alpha_ri = alpha_ri
self.alpha_rs = alpha_rs
self.p_rd = p_rd
self.num_aug = num_aug
@staticmethod
def _get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") # replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +', ' ', clean_line) # delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
@staticmethod
def _get_synonyms(word):
synonyms = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonym = l.name().replace("_", " ").replace("-", " ").lower()
synonym = "".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])
synonyms.add(synonym)
if word in synonyms:
synonyms.remove(word)
return list(synonyms)
@staticmethod
def _synonym_replacement(words, n):
""" Replace n words in the sentence with synonyms from wordnet.
"""
new_words = words.copy()
random_word_list = list(set([word for word in words if word not in STOP_WORDS]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = EDAGenerator._get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms))
new_words = [synonym if word == random_word else word for word in new_words]
num_replaced += 1
if num_replaced >= n: # only replace up to n words
break
# this is stupid but we need it, trust me
sentence = ' '.join(new_words)
new_words = sentence.split(' ')
return new_words
@staticmethod
def _random_deletion(words, p):
""" Randomly delete words from the sentence with probability p.
"""
# obviously, if there's only one word, don't delete it
if len(words) == 1:
return words
# randomly delete words with probability p
new_words = []
for word in words:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
# if you end up deleting all words, just return a random word
if len(new_words) == 0:
rand_int = random.randint(0, len(words) - 1)
return [words[rand_int]]
return new_words
@staticmethod
def _swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words) - 1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words) - 1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
return new_words
@staticmethod
def _random_swap(words, n):
""" Randomly swap two words in the sentence n times
"""
new_words = words.copy()
for _ in range(n):
new_words = EDAGenerator._swap_word(new_words)
return new_words
@staticmethod
def _add_word(new_words):
synonyms = []
counter = 0
while len(synonyms) < 1:
random_word = new_words[random.randint(0, len(new_words) - 1)]
synonyms = EDAGenerator._get_synonyms(random_word)
counter += 1
if counter >= 10:
return
random_synonym = synonyms[0]
random_idx = random.randint(0, len(new_words) - 1)
new_words.insert(random_idx, random_synonym)
@staticmethod
def _random_insertion(words, n):
""" Randomly insert n words into the sentence
"""
new_words = words.copy()
for _ in range(n):
EDAGenerator._add_word(new_words)
return new_words
def generate(self, sentence):
sentence = self._get_only_chars(sentence)
words = sentence.split(' ')
words = [word for word in words if word is not '']
num_words = len(words)
augmented_sentences = []
num_new_per_technique = int(self.num_aug / 4) + 1
n_sr = max(1, int(self.alpha_sr * num_words))
n_ri = max(1, int(self.alpha_ri * num_words))
n_rs = max(1, int(self.alpha_rs * num_words))
# sr
for _ in range(num_new_per_technique):
a_words = self._synonym_replacement(words, n_sr)
augmented_sentences.append(' '.join(a_words))
# ri
for _ in range(num_new_per_technique):
a_words = self._random_insertion(words, n_ri)
augmented_sentences.append(' '.join(a_words))
# rs
for _ in range(num_new_per_technique):
a_words = self._random_swap(words, n_rs)
augmented_sentences.append(' '.join(a_words))
# rd
for _ in range(num_new_per_technique):
a_words = self._random_deletion(words, self.p_rd)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [self._get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
# trim so that we have the desired number of augmented sentences
if self.num_aug >= 1:
augmented_sentences = augmented_sentences[:self.num_aug]
else:
keep_prob = self.num_aug / len(augmented_sentences)
augmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]
# append the original sentence
augmented_sentences.append(sentence)
return augmented_sentences
def batch_generate(self, sentences):
results = dict()
for sentence in tqdm(sentences):
results[sentence] = self.generate(sentence)
return results | PypiClean |
/HDDM-0.9.9.tar.gz/HDDM-0.9.9/hddm/MPLTraits.py | import wx
import matplotlib
# We want matplotlib to use a wxPython backend
if __name__ == "__main__":
matplotlib.use("WXAgg")
import matplotlib.pyplot
import pylab as pl
pl.ion()
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from traits.api import (
HasTraits,
Instance,
Range,
Array,
on_trait_change,
Property,
cached_property,
Bool,
)
from traits.api import Any, Instance
from traitsui.wx.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
try:
from IPython.Debugger import Tracer
debug_here = Tracer()
except:
pass
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
"""Create the MPL canvas."""
# The panel lets us add additional controls.
panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
# matplotlib commands to create a canvas
mpl_control = FigureCanvas(panel, -1, self.value)
sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW)
toolbar = NavigationToolbar2Wx(mpl_control)
sizer.Add(toolbar, 0, wx.EXPAND)
self.value.canvas.SetMinSize((10, 10))
return panel
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
def main():
# Create a window to demo the editor
from enthought.traits.api import HasTraits
from enthought.traits.ui.api import View, Item
from numpy import sin, cos, linspace, pi
class Test(HasTraits):
figure = Instance(Figure, ())
test = Range(0, 5, 1)
view = View(
Item("figure", editor=MPLFigureEditor(), show_label=False),
Item("test"),
width=400,
height=300,
resizable=True,
)
def __init__(self):
super(Test, self).__init__()
self.axes = self.figure.add_subplot(111)
self.t = linspace(0, 2 * pi, 200)
# param = self.test
(self.line,) = self.axes.plot(
sin(self.t) * (1 + 0.5 * cos(self.test * self.t)),
cos(self.t) * (1 + 0.5 * cos(11 * self.t)),
)
def update_plot(self):
self.figure.axes[0].clear()
self.figure.axes[0].plot(
sin(self.t) * (1 + 0.5 * cos(self.test * self.t)),
cos(self.t) * (1 + 0.5 * cos(11 * self.t)),
)
# self.axes.plot(sin(self.t)*(1+0.5*cos(self.test*self.t)), cos(self.t)*(1+0.5*cos(11*self.t)))
# self.axes.redraw_in_frame()
wx.CallAfter(self.figure.canvas.draw)
def _test_changed(self):
self.update_plot()
# wx.EVT_IDLE(wx.GetApp(), callback)
Test().configure_traits()
# if __name__ == "__main__":
# main() | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/color-name/index.js | 'use strict'
module.exports = {
"aliceblue": [240, 248, 255],
"antiquewhite": [250, 235, 215],
"aqua": [0, 255, 255],
"aquamarine": [127, 255, 212],
"azure": [240, 255, 255],
"beige": [245, 245, 220],
"bisque": [255, 228, 196],
"black": [0, 0, 0],
"blanchedalmond": [255, 235, 205],
"blue": [0, 0, 255],
"blueviolet": [138, 43, 226],
"brown": [165, 42, 42],
"burlywood": [222, 184, 135],
"cadetblue": [95, 158, 160],
"chartreuse": [127, 255, 0],
"chocolate": [210, 105, 30],
"coral": [255, 127, 80],
"cornflowerblue": [100, 149, 237],
"cornsilk": [255, 248, 220],
"crimson": [220, 20, 60],
"cyan": [0, 255, 255],
"darkblue": [0, 0, 139],
"darkcyan": [0, 139, 139],
"darkgoldenrod": [184, 134, 11],
"darkgray": [169, 169, 169],
"darkgreen": [0, 100, 0],
"darkgrey": [169, 169, 169],
"darkkhaki": [189, 183, 107],
"darkmagenta": [139, 0, 139],
"darkolivegreen": [85, 107, 47],
"darkorange": [255, 140, 0],
"darkorchid": [153, 50, 204],
"darkred": [139, 0, 0],
"darksalmon": [233, 150, 122],
"darkseagreen": [143, 188, 143],
"darkslateblue": [72, 61, 139],
"darkslategray": [47, 79, 79],
"darkslategrey": [47, 79, 79],
"darkturquoise": [0, 206, 209],
"darkviolet": [148, 0, 211],
"deeppink": [255, 20, 147],
"deepskyblue": [0, 191, 255],
"dimgray": [105, 105, 105],
"dimgrey": [105, 105, 105],
"dodgerblue": [30, 144, 255],
"firebrick": [178, 34, 34],
"floralwhite": [255, 250, 240],
"forestgreen": [34, 139, 34],
"fuchsia": [255, 0, 255],
"gainsboro": [220, 220, 220],
"ghostwhite": [248, 248, 255],
"gold": [255, 215, 0],
"goldenrod": [218, 165, 32],
"gray": [128, 128, 128],
"green": [0, 128, 0],
"greenyellow": [173, 255, 47],
"grey": [128, 128, 128],
"honeydew": [240, 255, 240],
"hotpink": [255, 105, 180],
"indianred": [205, 92, 92],
"indigo": [75, 0, 130],
"ivory": [255, 255, 240],
"khaki": [240, 230, 140],
"lavender": [230, 230, 250],
"lavenderblush": [255, 240, 245],
"lawngreen": [124, 252, 0],
"lemonchiffon": [255, 250, 205],
"lightblue": [173, 216, 230],
"lightcoral": [240, 128, 128],
"lightcyan": [224, 255, 255],
"lightgoldenrodyellow": [250, 250, 210],
"lightgray": [211, 211, 211],
"lightgreen": [144, 238, 144],
"lightgrey": [211, 211, 211],
"lightpink": [255, 182, 193],
"lightsalmon": [255, 160, 122],
"lightseagreen": [32, 178, 170],
"lightskyblue": [135, 206, 250],
"lightslategray": [119, 136, 153],
"lightslategrey": [119, 136, 153],
"lightsteelblue": [176, 196, 222],
"lightyellow": [255, 255, 224],
"lime": [0, 255, 0],
"limegreen": [50, 205, 50],
"linen": [250, 240, 230],
"magenta": [255, 0, 255],
"maroon": [128, 0, 0],
"mediumaquamarine": [102, 205, 170],
"mediumblue": [0, 0, 205],
"mediumorchid": [186, 85, 211],
"mediumpurple": [147, 112, 219],
"mediumseagreen": [60, 179, 113],
"mediumslateblue": [123, 104, 238],
"mediumspringgreen": [0, 250, 154],
"mediumturquoise": [72, 209, 204],
"mediumvioletred": [199, 21, 133],
"midnightblue": [25, 25, 112],
"mintcream": [245, 255, 250],
"mistyrose": [255, 228, 225],
"moccasin": [255, 228, 181],
"navajowhite": [255, 222, 173],
"navy": [0, 0, 128],
"oldlace": [253, 245, 230],
"olive": [128, 128, 0],
"olivedrab": [107, 142, 35],
"orange": [255, 165, 0],
"orangered": [255, 69, 0],
"orchid": [218, 112, 214],
"palegoldenrod": [238, 232, 170],
"palegreen": [152, 251, 152],
"paleturquoise": [175, 238, 238],
"palevioletred": [219, 112, 147],
"papayawhip": [255, 239, 213],
"peachpuff": [255, 218, 185],
"peru": [205, 133, 63],
"pink": [255, 192, 203],
"plum": [221, 160, 221],
"powderblue": [176, 224, 230],
"purple": [128, 0, 128],
"rebeccapurple": [102, 51, 153],
"red": [255, 0, 0],
"rosybrown": [188, 143, 143],
"royalblue": [65, 105, 225],
"saddlebrown": [139, 69, 19],
"salmon": [250, 128, 114],
"sandybrown": [244, 164, 96],
"seagreen": [46, 139, 87],
"seashell": [255, 245, 238],
"sienna": [160, 82, 45],
"silver": [192, 192, 192],
"skyblue": [135, 206, 235],
"slateblue": [106, 90, 205],
"slategray": [112, 128, 144],
"slategrey": [112, 128, 144],
"snow": [255, 250, 250],
"springgreen": [0, 255, 127],
"steelblue": [70, 130, 180],
"tan": [210, 180, 140],
"teal": [0, 128, 128],
"thistle": [216, 191, 216],
"tomato": [255, 99, 71],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"wheat": [245, 222, 179],
"white": [255, 255, 255],
"whitesmoke": [245, 245, 245],
"yellow": [255, 255, 0],
"yellowgreen": [154, 205, 50]
}; | PypiClean |