blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbb1f4d634b5c2931e92070446640ee4d89d33d8 | efd55bc63da8ab6ee964ec82bd0b761fd36107cc | /leetcode/number-of-enclaves.py | ee0c5737871c26a66fb05b99c26e3ff45578e1bb | [] | no_license | gsantam/competitive-programming | f9a2c9999470eeae9ef4aada6af43b91a65fcb50 | 0b208516a6ae3e72bc7b79ef0ac83dcbfa100496 | refs/heads/master | 2021-06-20T23:27:30.274275 | 2021-06-20T19:44:51 | 2021-06-20T19:44:51 | 162,201,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | class Solution:
def numEnclaves(self, A: List[List[int]]) -> int:
visited = set()
numEnclaves = 0
for i in range(len(A)):
for j in range(len(A[0])):
if (i,j) not in visited and A[i][j]==1:
can_visit_boundary = False
stack = [(i,j)]
total_lands = 0
while len(stack)>0:
element = stack.pop()
x = element[0]
y = element[1]
if element not in visited and A[x][y]==1:
total_lands+=1
visited.add(element)
if x+1>=len(A):
can_visit_boundary = True
else:
stack.append((x+1,y))
if y+1>=len(A[0]):
can_visit_boundary = True
else:
stack.append((x,y+1))
if x-1<0:
can_visit_boundary = True
else:
stack.append((x-1,y))
if y-1<0:
can_visit_boundary = True
else:
stack.append((x,y-1))
if not can_visit_boundary:
numEnclaves+=total_lands
return numEnclaves
| [
"[email protected]"
] | |
f472551f1d884a042278fd5068b8812e440a9674 | f73bcada5ab8432d2af07b5cb7fd7a38109d3e3a | /.history/parser_20201108183309.py | ccfb07d5c586abb5736efc84264c3f2c979c39ba | [] | no_license | mariajbp/gedcomparser | 837bf4ae5628a81e535d233c7c35313c6d86d78c | 6fc55899e5a82c4071991ab94a344b64c014b84d | refs/heads/master | 2023-01-23T09:01:27.459597 | 2020-11-19T23:58:53 | 2020-11-19T23:58:53 | 310,900,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | #!/usr/bin/python3
#python3 parser.py input/bible.gedcom > test.txt
import sys
from re import *
filename = sys.argv[1].split('/')[1]
assetPath = "assets"
indPath = "individuals"
famPath = "families"
cssPath = "assets/gedcom.css"
def createIndi(ik,iv):
f = open('assets/individuals/'+ik+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código do individuo: ' + ik + '</h1>')
for k, v in iv.items():
f.write('<b>'+str(k) + ':</b> '+ str(v) + '\n')
f.close()
def createFamily(fk,fi):
f = open('assets/families/'+fk+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código da familia: ' + fk + '</h1>')
for k, v in fi.items():
f.write('<b>'+str(k) + ':</b> '+ str(v) +'\r\n')
f.close()
def createIndex(fam,indi):
f = open("assets/index.html", 'w')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="index.css"></head>\n')
f.write('<h1> Ficheiro: ' + filename + '</h1>')
f.write('<div class="row"><div class="column"><h2>Familias</h2>')
for keyf in fam:
f.write('<li> <a href=\"'+famPath+'/'+keyf+'.html\">'+keyf+'</a></li>\n')
f.write('</ul> </div>')
f.write('<div class="column"><h2>Individuos</h2>')
for keyi in indi:
f.write('<li> <a href=\"'+indPath+'/'+keyi+'.html\">'+keyi+'</a></li>\n')
f.write('</ul></div></div>')
f.close()
#contruir um individuo e as suas carateristicas
BG = {}
def procIndi(s,i):
indi = {}
name = search(r'\bNAME\s+(.*)', i)
title = search(r'\bTITL\s+(.*)', i)
gender = search(r'\bSEX\s+(.*)', i)
if name:
indi['Name']= name.group(1)
name = findall (r'\bFAMS\s+@(.*)@',i)
if title:
indi['Title'] = title.group(1)
if gender:
indi['Gender'] = gender.group(1)
BG[s] = indi
BF = {}
def procFam(f,i):
fam={}
h = search(r'\bHUSB\s+@(.*)@',i)
if h:
fam['Husband'] = h.group(1)
w = search(r'\bWIFE\s+@(.*)@',i)
if w:
fam['Wife'] = w.group(1)
fam['Children'] = findall (r'\bCHIL\s+@(.*)@',i)
BF[f] = fam
print(fam['Husband'])
def process(t):
items = split(r'\n0',t)
for i in items:
z = search(r'@(I\d+)@ *INDI', i) #procura todos os individuos
if z:
procIndi(z.group(1),i)
f = search(r'@(F\d+)@ *FAM', i) #procura todas as familias
if f:
procFam(f.group(1),i)
with open(sys.argv[1], 'r') as f :
gedcom = f.read()
process(gedcom)
createIndex(BF.keys(), BG.keys())
for k,v in BF.items():
createFamily(k,v)
for k,v in BG.items():
createIndi(k,v)
| [
"[email protected]"
] | |
bb727947846d46686b610f372e783a5b27bcc74b | 434cdc852799abf4377effdf6e4bd810b4af649a | /python2/pracmln/mln/mrf.py | 2c9db874e907742ff9e98859d2dda0ed5d2ca7b2 | [
"BSD-2-Clause"
] | permissive | yawenEholder/pracmln | 576f1ffc5f046cd9b0540d4f8799dadc5b1f4e22 | 65287963cc0df86ae0a0d5323ffa6a1a8a9b92cb | refs/heads/master | 2020-04-07T22:41:12.006506 | 2018-02-05T17:39:51 | 2018-02-05T17:39:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,017 | py | # -*- coding: utf-8 -*-
#
# Ground Markov Random Fields
#
# (C) 2012-2013 by Daniel Nyga ([email protected])
# (C) 2006-2011 by Dominik Jain ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from dnutils import logs, out
from pracmln.mln.database import Database
from util import mergedom
import copy
import sys
import re
from util import fstr
from pracmln.logic import FirstOrderLogic
from util import logx
import time
from grounding import *
from pracmln.logic.common import Logic
from pracmln.mln.constants import HARD, nan
from pracmln.logic.fuzzy import FuzzyLogic
from pracmln.mln.mrfvars import MutexVariable, SoftMutexVariable, FuzzyVariable,\
BinaryVariable
from pracmln.mln.errors import MRFValueException, NoSuchDomainError,\
NoSuchPredicateError
from pracmln.mln.util import CallByRef, Interval, temporary_evidence, tty
from pracmln.mln.methods import InferenceMethods
from math import *
import traceback
logger = logs.getlogger(__name__)
class MRF(object):
'''
Represents a ground Markov random field.
:member _gndatoms: dict mapping a string representation of a ground atom to its Logic.GroundAtom object
:member _gndatoms_indices: dict mapping ground atom index to Logic.GroundAtom object
:member _evidence: vector of evidence truth values of all ground atoms
:member _variables: dict mapping variable names to their :class:`mln.mrfvars.MRFVariable` instance.
:param mln: the MLN tied to this MRF.
:param db: the database that the MRF shall be grounded with.
'''
def __init__(self, mln, db):
self.mln = mln.materialize(db)
self._evidence = []
# self.evidenceBackup = {}
self._variables = {}
self._variables_by_idx = {} # gnd atom idx -> variable
self._variables_by_gndatomidx = {} # gnd atom idx
self._gndatoms = {}
self._gndatoms_by_idx = {}
# get combined domain
self.domains = mergedom(self.mln.domains, db.domains)
# self.softEvidence = list(mln.posteriorProbReqs) # constraints on posterior
# probabilities are nothing but
# soft evidence and can be handled in exactly the same way
# ground members
self.formulas = list(self.mln.formulas)
# self.gndAtoms = {}
# self.gndBlockLookup = {}
# self.gndBlocks = {}
# self.gndAtomsByIdx = {}
# self.gndFormulas = []
# self.gndAtomOccurrencesInGFs = []
if isinstance(db, basestring):
db = Database.load(self.mln, dbfile=db)
elif isinstance(db, Database):
pass
elif db is None:
db = Database(self.mln)
else:
raise Exception("Not a valid database argument (type %s)" % (str(type(db))))
self.db = db
# materialize formula weights
self._materialize_weights()
return
# self.closedWorldPreds = list(self.mln.closedWorldPreds)
# self.posteriorProbReqs = list(self.mln.posteriorProbReqs)
# self.predicates = copy.deepcopy(self.mln.predicates)
# self.templateIdx2GroupIdx = self.mln.templateIdx2GroupIdx
# # grounding
# log.info('Loading %s...' % groundingMethod)
# groundingMethod = eval('%s(self, db, **params)' % groundingMethod)
# self.groundingMethod = groundingMethod
# groundingMethod.groundMRF(cwAssumption=cwAssumption, simplify=simplify)
# assert len(self.gndAtoms) == len(self.evidence)
@property
def probreqs(self):
return self.mln.probreqs
@property
def variables(self):
return sorted(self._variables.values(), key=lambda v: v.idx)
@property
def gndatoms(self):
return self._gndatoms.values()
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, evidence):
self._evidence = evidence
self.consistent()
@property
def predicates(self):
return self.mln.predicates
@property
def hardformulas(self):
'''
Returns a list of all hard formulas in this MRF.
'''
return [f for f in self.formulas if f.weight == HARD]
def _getPredGroundings(self, predName):
'''
Gets the names of all ground atoms of the given predicate.
'''
# get the string represenation of the first grounding of the predicate
if predName not in self.predicates:
raise Exception('Unknown predicate "%s" (%s)' % (predName, map(str, self.predicates)))
domNames = self.predicates[predName]
params = []
for domName in domNames:
params.append(self.domains[domName][0])
gndAtom = "%s(%s)" % (predName, ",".join(params))
# get all subsequent groundings (by index) until the predicate name changes
groundings = []
idx = self.gndAtoms[gndAtom].idx
while True:
groundings.append(gndAtom)
idx += 1
if idx >= len(self.gndAtoms):
break
gndAtom = str(self.gndAtomsByIdx[idx])
if self.mln.logic.parseAtom(gndAtom)[0] != predName:
break
return groundings
def _getPredGroundingsAsIndices(self, predName):
'''
Get a list of all the indices of all groundings of the given predicate
'''
# get the index of the first grounding of the predicate and the number of groundings
domNames = self.predicates[predName]
params = []
numGroundings = 1
for domName in domNames:
params.append(self.domains[domName][0])
numGroundings *= len(self.domains[domName])
gndAtom = "%s(%s)" % (predName, ",".join(params))
if gndAtom not in self.gndAtoms: return []
idxFirst = self.gndAtoms[gndAtom].idx
return range(idxFirst, idxFirst + numGroundings)
def domsize(self, domname):
if not domname in self.domains:
raise NoSuchDomainError(domname)
return len(self.domains[domname])
def _materialize_weights(self, verbose=False):
'''
materialize all formula weights.
'''
max_weight = 0
for f in self.formulas:
if f.weight is not None and f.weight != HARD:
w = str(f.weight)
variables = re.findall(r'\$\w+', w)
for var in variables:
try:
w, numReplacements = re.subn(r'\%s' % var, self.mln.vars[var], w)
except:
raise Exception("Error substituting variable references in '%s'\n" % w)
if numReplacements == 0:
raise Exception("Undefined variable(s) referenced in '%s'" % w)
w = re.sub(r'domSize\((.*?)\)', r'self.domsize("\1")', w)
try:
f.weight = float(eval(w))
except:
sys.stderr.write("Evaluation error while trying to compute '%s'\n" % w)
raise
max_weight = max(abs(f.weight), max_weight)
def __getitem__(self, key):
return self.evidence[self.gndatom(key).idx]
def __setitem__(self, key, value):
self.set_evidence({key: value}, erase=False)
def prior(self, f, p):
self._probreqs.append(FirstOrderLogic.PriorConstraint(formula=f, p=p))
def posterior(self, f, p):
self._probreqs.append(FirstOrderLogic.PosteriorConstraint(formula=f, p=p))
def set_evidence(self, atomvalues, erase=False, cw=False):
'''
Sets the evidence of variables in this MRF.
If erase is `True`, for every ground atom appearing in atomvalues, the truth values of all ground
ground atom in the respective MRF variable are erased before the evidences
are set. All other ground atoms stay untouched.
:param atomvalues: a dict mapping ground atom strings/objects/indices to their truth
values.
:param erase: specifies whether or not variables shall be erased before asserting the evidences.
Only affects the variables that are present in `atomvalues`.
:param cw: applies the closed-world assumption for all non evidence atoms.
'''
# check validity of evidence values
atomvalues_ = {}
for key, value in dict(atomvalues).iteritems():
# convert boolean to numeric values
if value in (True, False):
atomvalues[key] = {True: 1, False: 0}[value]
value = atomvalues[key]
gndatom = self.gndatom(key)
if gndatom is None:
self.print_gndatoms()
raise MRFValueException('"%s" is not among the ground atoms.' % key)
atomvalues_[str(gndatom)] = value
var = self.variable(gndatom)
if isinstance(self.mln.logic, FuzzyLogic):
if (isinstance(var, MutexVariable) or isinstance(var, SoftMutexVariable) or isinstance(var, BinaryVariable)) and value is not None and value in Interval(']0,1['):
raise MRFValueException('Illegal value for the (soft-) mutex or binary variable "%s": %s' % (str(var), value))
atomvalues = atomvalues_
if erase: # erase all variable assignments appearing in atomvalues
for key, _ in atomvalues.iteritems():
var = self.variable(self.gndatom(key))
# unset all atoms in this variable
for atom in var.gndatoms:
self._evidence[atom.idx] = None
for key, value in atomvalues.iteritems():
gndatom = self.gndatom(key)
var = self.variable(gndatom)
# create a template with admissible truth values for all
# ground atoms in this variable
values = [-1] * len(var.gndatoms)
if isinstance(var, FuzzyVariable):
self._evidence[gndatom.idx] = value
continue
elif isinstance(var, BinaryVariable):
self._evidence[gndatom.idx] = value
continue
for _, val in var.itervalues(evidence={gndatom.idx: value}):
for i, (v, v_) in enumerate(zip(values, val)):
if v == -1: values[i] = v_
elif v is not None and v != v_:
values[i] = None
for atom, val in zip(var.gndatoms, values):
curval = self._evidence[atom.idx]
if curval is not None and val is not None and curval != val:
raise MRFValueException('Contradictory evidence in variable %s: %s = %s vs. %s' % (var.name, str(gndatom), curval, val))
elif curval is None and val is not None:
self._evidence[atom.idx] = val
if cw: self.apply_cw()
def erase(self):
'''
Erases all evidence in the MRF.
'''
self._evidence = [None] * len(self.gndatoms)
def apply_cw(self, *prednames):
'''
Applies the closed world assumption to this MRF.
Sets all evidences to 0 if they don't have truth value yet.
:param prednames: a list of predicate names the cw assumption shall be applied to.
If empty, it is applied to all predicates.
'''
for i, v in enumerate(self._evidence):
if prednames and self.gndatom(i).predname not in prednames:
continue
if v is None: self._evidence[i] = 0
def consistent(self, strict=False):
'''
Performs a consistency check on this MRF wrt. to the variable value assignments.
Raises an MRFValueException if the MRF is inconsistent.
'''
for variable in self.variables:
variable.consistent(self.evidence_dicti(), strict=strict)
def gndatom(self, identifier, *args):
'''
Returns the the ground atom instance that is associated with the given identifier, or adds
a new ground atom.
:param identifier: Either the string representation of the ground atom or its index (int)
:returns: the :class:`logic.common.Logic.GroundAtom` instance or None, if the ground
atom doesn't exist.
:Example:
>>> mrf = MRF(mln)
>>> mrf.gndatom('foo', 'x', 'y') # add the ground atom 'foo(x,y)'
>>> mrf.gndatom('foo(x,y)') # get the ground atom
foo(x,y)
>>> mrf.gndatom(0) # get the ground atom
foo(x,y)
'''
if not args:
if isinstance(identifier, basestring):
atom = self._gndatoms.get(identifier)
if atom is None:
try:
_, predname, args = self.mln.logic.parse_literal(identifier)
except NoSuchPredicateError: return None
atomstr = str(self.mln.logic.gnd_atom(predname, args, self.mln))
return self._gndatoms.get(atomstr)
else:
return atom
elif type(identifier) is int:
return self._gndatoms_by_idx.get(identifier)
elif isinstance(identifier, Logic.GroundAtom):
return self._gndatoms.get(str(identifier))
# else:
# return self.new_gndatom(identifier.predname, *identifier.args)
else: raise Exception('Illegal identifier type: %s' % type(identifier))
else:
return self.new_gndatom(identifier, *args)
def variable(self, identifier):
'''
Returns the :class:`mln.mrfvars.MRFVariable` instance of the variable with the name or index `var`,
or None, if no such variable exists.
:param identifier: (string/int/:class:`logic.common.Logic.GroundAtom`) the name or index of the variable,
or the instance of a ground atom that is part of the desired variable.
'''
if type(identifier) is int:
return self._variables_by_idx.get(identifier)
elif isinstance(identifier, Logic.GroundAtom):
return self._variables_by_gndatomidx[identifier.idx]
elif isinstance(identifier, basestring):
return self._variables.get(identifier)
def new_gndatom(self, predname, *args):
'''
Adds a ground atom to the set (actually it's a dict) of ground atoms.
If the ground atom is already in the MRF it does nothing but returning the existing
ground atom instance. Also updates/adds the variables of the MRF.
:param predname: the predicate name of the ground atom
:param *args: the list of predicate arguments `logic.common.Logic.GroundAtom` object
'''
# create and add the ground atom
gndatom = self.mln.logic.gnd_atom(predname, args, self.mln)
if str(gndatom) in self._gndatoms:
return self._gndatoms[str(gndatom)]
self._evidence.append(None)
gndatom.idx = len(self._gndatoms)
self._gndatoms[str(gndatom)] = gndatom
self._gndatoms_by_idx[gndatom.idx] = gndatom
# add the ground atom to the variable it belongs
# to or create a new one if it doesn't exists.
predicate = self.mln.predicate(gndatom.predname)
varname = predicate.varname(gndatom)
variable = self.variable(varname)
if variable is None:
variable = predicate.tovariable(self, varname)
self._variables[variable.name] = variable
self._variables_by_idx[variable.idx] = variable
variable.gndatoms.append(gndatom)
self._variables_by_gndatomidx[gndatom.idx] = variable
return gndatom
def print_variables(self):
for var in self.variables:
print str(var)
def print_world_atoms(self, world, stream=sys.stdout):
'''
Prints the given world `world` as a readable string of the plain gnd atoms to the given stream.
'''
for gndatom in self.gndatoms:
v = world[gndatom.idx]
vstr = '%.3f' % v if v is not None else '? '
stream.write('%s %s\n' % (vstr, str(gndatom)))
def print_world_vars(self, world, stream=sys.stdout, tb=2):
'''
Prints the given world `world` as a readable string of the MRF variables to the given stream.
'''
out('=== WORLD VARIABLES ===', tb=tb)
for var in self.variables:
stream.write(repr(var) + '\n')
for i, v in enumerate(var.evidence_value(world)):
vstr = '%.3f' % v if v is not None else '? '
stream.write(' %s %s\n' % (vstr, var.gndatoms[i]))
def print_domains(self):
out('=== MRF DOMAINS ==', tb=2)
for dom, values in self.domains.iteritems():
print dom, '=', ','.join(values)
def evidence_dicts(self):
'''
Returns, from the current evidence list, a dictionary that maps ground atom names to truth values
'''
d = {}
for idx, tv in enumerate(self._evidence):
d[str(self._gndatoms_by_idx[idx])] = tv
return d
def evidence_dicti(self):
'''
Returns, from the current evidence list, a dictionary that maps ground atom indices to truth values
'''
d = {}
for idx, tv in enumerate(self._evidence):
d[idx] = tv
return d
def countworlds(self, withevidence=False):
'''
Computes the number of possible worlds this MRF can take.
:param withevidence: (bool) if True, takes into account the evidence which is currently set in the MRF.
if False, computes the total number of possible worlds.
.. note:: this method does not enumerate the possible worlds.
'''
worlds = 1
ev = self.evidence_dicti if withevidence else {}
for var in self.variables:
worlds *= var.valuecount(ev)
return worlds
def iterworlds(self):
'''
Iterates over the possible worlds of this MRF taking into account the evidence vector of truth values.
:returns: a generator of (idx, possible world) tuples.
'''
for res in self._iterworlds([v for v in self.variables if v.valuecount(self.evidence) > 1], list(self.evidence), CallByRef(0), self.evidence_dicti()):
yield res
def _iterworlds(self, variables, world, worldidx, evidence):
if not variables:
yield worldidx.value, world
worldidx.value += 1
return
variable = variables[0]
if isinstance(variable, FuzzyVariable):
world_ = list(world)
value = variable.evidence_value(evidence)
for res in self._iterworlds(variables[1:], variable.setval(value, world_), worldidx, evidence):
yield res
else:
for _, value in variable.itervalues(evidence):
world_ = list(world)
for res in self._iterworlds(variables[1:], variable.setval(value, world_), worldidx, evidence):
yield res
def worlds(self):
'''
Iterates over all possible worlds (taking evidence into account).
:returns: a generator of possible worlds.
'''
for _, world in self.iterworlds():
yield world
def iterallworlds(self):
'''
Iterates over all possible worlds (without) taking evidence into account).
:returns: a generator of possible worlds.
'''
world = [None] * len(self.evidence)
for i, w in self._iterworlds(self.variables, world, CallByRef(0), {}):
yield i, w
def itergroundings(self, simplify=False, grounding_factory='DefaultGroundingFactory'):
'''
Iterates over all groundings of all formulas of this MRF.
:param simplify: if True, the ground formulas are simplified wrt to the evidence in the MRF.
:param grounding_factory: the grounding factory to be used.
:returns: a generator yielding ground formulas
'''
grounder = eval('%s(self, simplify=simplify)' % grounding_factory)
for gndf in grounder.itergroundings():
yield gndf
def print_evidence_atoms(self, stream=sys.stdout):
'''
Prints the evidence truth values of plain ground atoms to the given `stream`.
'''
self.print_world_atoms(self.evidence, stream)
def print_evidence_vars(self, stream=sys.stdout):
'''
Prints the evidence truth values of the variables of this MRF to the given `stream`.
'''
self.print_world_vars(self.evidence, stream, tb=3)
def getTruthDegreeGivenSoftEvidence(self, gf, world):
cnf = gf.cnf()
prod = 1.0
if isinstance(cnf, FirstOrderLogic.Conjunction):
for disj in cnf.children:
prod *= self._noisyOr(world, disj)
else:
prod *= self._noisyOr(world, cnf)
return prod
def _getEvidenceTruthDegreeCW(self, gndAtom, worldValues):
'''
gets (soft or hard) evidence as a degree of belief from 0 to 1, making the closed world assumption,
soft evidence has precedence over hard evidence
'''
se = self._getSoftEvidence(gndAtom)
if se is not None:
return se if (True == worldValues[gndAtom.idx] or None == worldValues[gndAtom.idx]) else 1.0 - se # TODO allSoft currently unsupported
return 1.0 if worldValues[gndAtom.idx] else 0.0
def print_gndatoms(self, stream=sys.stdout):
'''
Prints the alphabetically sorted list of ground atoms in this MRF to the given `stream`.
'''
out('=== GROUND ATOMS ===', tb=2)
l = self._gndatoms.keys()
for ga in sorted(l):
stream.write(str(ga) + '\n')
def apply_prob_constraints(self, constraints, method=InferenceMethods.EnumerationAsk,
thr=1.0e-3, steps=20, fittingMCSATSteps=5000,
fittingParams=None, given=None, queries=None,
maxThreshold=None, greedy=False, probabilityFittingResultFileName=None, **args):
'''
Applies the given probability constraints (if any), dynamically
modifying weights of the underlying MLN by applying iterative proportional fitting
:param constraints: list of constraints
:param method: one of the inference methods defined in InferenceMethods
inferenceParams: parameters to pass on to the inference method
:param given: if not None, fit parameters of posterior (given the evidence) rather than prior
:param querie queries to compute along the way, results for which will be returned
:param thr: when maximum absolute difference between desired and actual probability drops below this value, then stop (convergence)
maxThreshold:
if not None, then convergence is relaxed, and we stop when the *mean* absolute difference between desired and
actual probability drops below "threshold" *and* the maximum is below "maxThreshold"
'''
if fittingParams is None:
fittingParams = {}
inferenceParams = fittingParams
inferenceParams["doProbabilityFitting"] = False # avoid recursive fitting calls when calling embedded inference method
if given == None:
given = ""
if queries is None:
queries = []
if inferenceParams is None:
inferenceParams = {}
if not constraints:
if queries: pass # TODO !!!! because this is called from inferIPFPM, should perform inference anyhow
return
t_start = time.time()
# determine relevant formulas
for req in constraints:
# if we don't yet have a ground formula to fit, create one
if not "gndFormula" in req:
# if we don't yet have a formula to use, search for one that matches the expression to fit
if not "idxFormula" in req:
idxFormula = None
for idxF, formula in enumerate(self.formulas):
#print strFormula(formula), req["expr"]
if fstr(formula).replace(" ", "") == req["expr"]:
idxFormula = idxF
break
if idxFormula is None:
raise Exception("Probability constraint on '%s' cannot be applied because the formula is not part of the MLN!" % req["expr"])
req["idxFormula"] = idxFormula
# instantiate a ground formula
formula = self.formulas[req["idxFormula"]]
variables = formula.getVariables(self)
groundVars = {}
for varName, domName in variables.iteritems(): # instantiate vars arbitrarily (just use first element of domain)
groundVars[varName] = self.domains[domName][0]
gndFormula = formula.ground(self, groundVars)
req["gndExpr"] = str(gndFormula)
req["gndFormula"] = gndFormula
# iterative fitting algorithm
step = 1 # fitting round
fittingStep = 1 # actual IPFP iteration
#print "probConstraints", probConstraints, "queries", queries
what = [r["gndFormula"] for r in constraints] + queries
done = False
while step <= steps and not done:
# calculate probabilities of the constrained formulas (ground formula)
if method is InferenceMethods.Exact:
if not hasattr(self, "worlds"):
self._getWorlds()
else:
self._calculateWorldValues()
results = self.inferExact(what, given=given, verbose=False, **inferenceParams)
elif method == InferenceMethods.EnumerationAsk:
results = self.inferEnumerationAsk(what, given=given, verbose=False, **inferenceParams)
#elif inferenceMethod == InferenceMethods.ExactLazy:
# results = self.inferExactLazy(what, given=given, verbose=False, **inferenceParams)
elif method == InferenceMethods.MCSAT:
results = self.inferMCSAT(what, given=given, verbose=False, maxSteps = fittingMCSATSteps, **inferenceParams)
else:
raise Exception("Requested inference method (%s) not supported by probability constraint fitting" % InferenceMethods.getName(method))
if type(results) != list:
results = [results]
# compute deviations
diffs = [abs(r["p"] - results[i]) for (i, r) in enumerate(constraints)]
maxdiff = max(diffs)
meandiff = sum(diffs) / len(diffs)
# are we done?
done = maxdiff <= thr
if not done and maxThreshold is not None: # relaxed convergence criterion
done = (meandiff <= thr) and (maxdiff <= maxThreshold)
if done: break
# select constraint to fit
if greedy:
idxConstraint = diffs.index(maxdiff)
strStep = "%d;%d" % (step, fittingStep)
else:
idxConstraint = (fittingStep - 1) % len(constraints)
strStep = "%d;%d/%d" % (step, idxConstraint + 1, len(constraints))
req = constraints[idxConstraint]
# get the scaling factor and apply it
formula = self.formulas[req["idxFormula"]]
p = results[idxConstraint]
#print "p", p, "results", results, "idxConstraint", idxConstraint
pnew = req["p"]
precision = 1e-3
if p == 0.0: p = precision
if p == 1.0: p = 1 - precision
f = pnew * (1 - p) / p / (1 - pnew)
old_weight = formula.weight
formula.weight += float(logx(f)) #make sure to set the weight to a native float and not an mpmath value
diff = diffs[idxConstraint]
# print status
logger.debug(" [%s] p=%f vs. %f (diff = %f), weight %s: %f -> %f, dev max %f mean %f, elapsed: %.3fs" % (strStep, p, pnew, diff, str(formula), old_weight, formula.weight, maxdiff, meandiff, time.time() - t_start))
if fittingStep % len(constraints) == 0:
step += 1
fittingStep += 1
#write resulting mln:
if probabilityFittingResultFileName != None:
mlnFile = file(probabilityFittingResultFileName, "w")
self.mln.write(mlnFile)
mlnFile.close()
print "written MLN with probability constraints to:", probabilityFittingResultFileName
return (results[len(constraints):], {"steps": min(step, steps), "fittingSteps": fittingStep, "maxdiff": maxdiff, "meandiff": meandiff, "time": time.time() - t_start})
def _weights(self):
''' returns the weight vector as a list '''
return [f.weight for f in self.formulas]
def dotfile(self, filename):
'''
write a .dot file for use with GraphViz (in order to visualize the current ground Markov network)
'''
if not hasattr(self, "gndFormulas") or len(self.gndFormulas) == 0:
raise Exception("Error: cannot create graph because the MLN was not combined with a concrete domain")
with open(filename, "wb") as f:
f.write("graph G {\n")
graph = {}
for gf in self.itergroundings:
atomindices = gf.gndatom_indices()
for i in range(len(atomindices)):
for j in range(i + 1, len(atomindices)):
edge = [atomindices[i], atomindices[j]]
edge.sort()
edge = tuple(edge)
if not edge in graph:
f.write(" ga%d -- ga%d\n" % edge)
graph[edge] = True
for atom in self.gndatoms.values():
f.write(' ga%d [label="%s"]\n' % (atom.idx, str(atom)))
f.write("}\n")
def graphml(self, filename):
import graphml # @UnresolvedImport
G = graphml.Graph()
nodes = []
for i in xrange(len(self.gndAtomsByIdx)):
ga = self.gndAtomsByIdx[i]
nodes.append(graphml.Node(G, label=str(ga), shape="ellipse", color=graphml.randomVariableColor))
links = {}
for gf in self.gndFormulas:
print gf
idxGAs = sorted(gf.idxGroundAtoms())
for idx, i in enumerate(idxGAs):
for j in idxGAs[idx+1:]:
t = (i,j)
if not t in links:
print " %s -- %s" % (nodes[i], nodes[j])
graphml.UndirectedEdge(G, nodes[i], nodes[j])
links[t] = True
f = open(filename, "w")
G.write(f)
f.close()
| [
"[email protected]"
] | |
a9710d3e6cebed174b49ca7389a2ff5cedf15dbf | 33c23cb18917d6b1255fa45a4f1944f1774fdb99 | /scripts/local_lcs_pam_250.py | 0b29540cf2ff0ac610c6d0b89c4448edfd3265f0 | [] | no_license | sjuvekar/Bioinformatics | ff0c0f4d4b77c322ce59cd98ae0036d71305710f | 97bf341f2b8b63b7eba78e736be6703a2f651e90 | refs/heads/master | 2020-05-17T00:18:18.056611 | 2013-12-11T02:46:34 | 2013-12-11T02:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!/usr/bin/env python
from util.lcs_util import LCSUtil
import sys
if __name__ == "__main__":
f = open(sys.argv[1])
dna = LCSUtil(f.readline().strip())
dna.parse_score_matrix("../matrices/PAM250.txt")
other_dna = LCSUtil(f.readline().strip())
(best_score, seq1, seq2) = dna.graph_based_local_alignment(other_dna, 5, True)
print best_score
print seq1
print seq2
| [
"[email protected]"
] | |
a2785bd0b41bd5bb7bb9d3b20b2e2922d476bae4 | a9f38bb28ff9bd04b151d86c653cde9f46768c7c | /medium/validateBST.py | ce4596ddfbf18317b96c151cb041324fd72f9669 | [] | no_license | Xynoclafe/leetcode | 02388516b10b8ee6bec6ee1b91ab5681c3254d33 | 4a80f02683e7fc14cb49c07170651ea3eeb280ac | refs/heads/master | 2020-12-01T21:05:44.656581 | 2020-02-02T09:05:32 | 2020-02-02T09:05:32 | 230,770,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def inOrderT(root, inOrder):
if root == None:
return
inOrderT(root.left, inOrder)
inOrder.append(root.val)
inOrderT(root.right, inOrder)
inOrder = []
inOrderT(root, inOrder)
for i in range(len(inOrder) - 1):
if inOrder[i] >= inOrder[i + 1]:
return False
return True
| [
"[email protected]"
] | |
1323bf6ec05675b2462147830b71f4051ac71fc7 | 1dc67a30f9af553243088668d51bc4e75e87d83d | /pythonNet/udp_client.py | 2d2eeb35ecd220d4b54e3db4f2dd9be943735345 | [] | no_license | houyinhu/AID1812 | 00db45b3e8905bd069b31f2e7689f83bca3fa61f | 8eeb9f06ed9f4e742d480354ef0e336dfe8c2f17 | refs/heads/master | 2020-04-27T16:33:57.275890 | 2019-04-10T01:09:51 | 2019-04-10T01:09:51 | 174,486,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from socket import *
#服务器地址
host = '127.0.0.1'
port = 9999
addr = (host,port)
#创建套接字
sockfd = socket(AF_INET,SOCK_DGRAM)
#收发消息
while True:
#发送消息
data = input("Msg>>")
if not data:
break
sockfd.sendto(data.encode(),addr)
#接受消息
msg,addr = sockfd.recvfrom(1024)
print("Receive from server:",msg.decode())
sockfd.close()
| [
"[email protected]"
] | |
a39c4901cd3ea3b970eac16df6e9449edf83b9bf | ed0780889408c9968f3c987fbace61aa11770ba1 | /rythmize/__init__.py | 7fbe776402cfcb70ac2e582492b9d2840232a719 | [] | no_license | maleksal/rythmize-api | b45af58c594e882dbbe248a479d6f88064332cf4 | 6d538a7eae617c32b5405c8c92f1cd4f7f42ce3c | refs/heads/main | 2023-07-18T18:40:54.718307 | 2021-09-13T14:14:17 | 2021-09-13T14:14:17 | 304,356,537 | 5 | 1 | null | 2020-11-04T13:43:10 | 2020-10-15T14:43:15 | Python | UTF-8 | Python | false | false | 875 | py | """
Create flask application.
"""
import os
from flask import Flask
from .admin import admin_settings
from .api.v1.views import api_views
from .extensions import cors, db, guard, ma, mail
from .models.user import User
def create_app(config_env):
"""Initiate app using Flask Factory Pattern."""
app = Flask(__name__)
app.config.from_object(config_env)
# Initialize extentions
db.init_app(app) # Database
ma.init_app(app) # Serilizer && Deserializer extension
guard.init_app(app, User) # Flask-praetorian
cors.init_app(app) # Flask-cors
mail.init_app(app) # Flask-Mail
# setup admin panel
admin_settings.init_app(app)
admin_settings.name = 'rythmize-panel'
admin_settings.template_mode = 'bootstrap3'
# register routes
app.register_blueprint(api_views)
return app
| [
"[email protected]"
] | |
433a20c2b321ceee12bcc5ba041c9f6638a6c4b4 | e2a6cc522daca1a0060644fcc487b684a0849c34 | /ecommerce_project/login_app/forms.py | ad20e0f8b1d8ef135c0e16ae8659bb72fb9fe0b8 | [] | no_license | Tanzin-Ul-Islam/Django_Ecommerce_sslcommerz | 0c341ab0f045479a8cd7fce6a736ebfa62db55c4 | fdec49a5c4ed7e943d4e7c8778c8f254117f87cd | refs/heads/main | 2023-02-08T17:19:11.283296 | 2020-12-29T22:02:31 | 2020-12-29T22:02:31 | 321,987,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from django.forms import ModelForm
from login_app.models import User, Profile
from django.contrib.auth.forms import UserCreationForm
class ProfileForm(ModelForm):
class Meta:
model = Profile
exclude = ('user',)
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ('email', 'password1', 'password2',) | [
"[email protected]"
] | |
dd8412c8a6aece59b5d524a5c1c11c537bc38c52 | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode/325-presum.py | c3e5f9dbbe27aeaad335dcc81cdb202360b980a3 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from typing import List
class Solution:
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
memo = {}
res = 0
cur_sum = 0
memo[cur_sum] = -1
for idx, val in enumerate(nums):
cur_sum += val
if cur_sum - k in memo:
res = max(res, idx - memo[cur_sum - k])
if cur_sum not in memo:
memo[cur_sum] = idx
return res | [
"[email protected]"
] | |
b7a63f283bc4352d3165fd8aae7a005711aa608d | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/tensorflow/core/protobuf/data/experimental/snapshot_pb2.py | 6ff5b17b88d56e9937049ac3adbb5748da83bf01 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6ddffc53da063702a6f6211385bdd679c42227bb1dd87be76feeacb69fc30bb6
size 10684
| [
"[email protected]"
] | |
583cfa8145a469e7dbf7f5fa01e42d36462ea762 | c5d6e21744f10c6e57d58b57bba2763b82a9726b | /Bimestre_04_Aula_04/02_letras.py | bed4bf61752730b5ca1c49735df58909d70d1e6b | [] | no_license | valeriacavalcanti/ALP-2020-R | bf32af707d49db650deb6d122a1abdf58d94ae4f | 62e0be861ad7439b99ae5d0b0e14d97c887424c7 | refs/heads/main | 2023-05-05T02:05:00.128872 | 2021-06-04T10:30:05 | 2021-06-04T10:30:05 | 316,784,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | frase = input('Frase: ')
letras = []
for s in frase:
if ((s >= 'a' and s <= 'z') or (s >= 'A' and s <= 'Z')) and (s not in letras):
letras.append(s)
print(letras)
| [
"[email protected]"
] | |
e922ca8c459ceb36f136a75e1fe68b947faf0553 | 90d3af65fc9900f2abb7eaa7631646856e115da3 | /COMP9021/challenge/merge_strings.py | 09d6d075032614f7aafc9a7756415ff50498952b | [] | no_license | Tim-hyx/UNSW-Courses | d414b79b6c5b428be12456ba85e1757ac871535b | b7031ea9ac833b5a396e7938ef73cc335a2e37b7 | refs/heads/main | 2023-07-10T19:48:34.731340 | 2021-08-10T02:39:14 | 2021-08-10T02:39:14 | 300,894,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Written by Eric Martin for COMP9021
def can_merge(string_1, string_2, string_3):
if not string_1:
return string_2 == string_3
if not string_2:
return string_1 == string_3
if string_1[0] == string_3[0]\
and can_merge(string_1[1 :], string_2, string_3[1 :]):
return True
if string_2[0] == string_3[0]:
return can_merge(string_1, string_2[1 :], string_3[1 :])
return False
def report_failure():
print('No string can be merged from the other two.')
ranks = 'first', 'second', 'third'
shortest, in_between, longest =\
sorted(zip(ranks,
(input(f'Please input the {rank} string: ') for rank in ranks)
), key=lambda x: len(x[1])
)
if not longest[1]:
print('Any string can be obtained from the other two.')
elif not shortest[1]:
if in_between[1] == longest[1]:
print(f'The {in_between[0]} and {longest[0]} strings can be obtained '
'by merging the other two.'
)
else:
report_failure()
elif len(longest[1]) != len(shortest[1]) + len(in_between[1])\
or not can_merge(shortest[1], in_between[1], longest[1]):
report_failure()
else:
print(f'The {longest[0]} string can be obtained by merging the other two.')
| [
"[email protected]"
] | |
510d52809fb94163286af1dd16c5f0d892dc29df | 3b98ee18977177e10b57e6162a03204e3774d3b8 | /Kirk_Byers_Nornir_Automation/env/lib/python3.8/site-packages/nornir_napalm/plugins/tasks/__init__.py | f48b5b83218e80cf28db8c5fb893eb17186ed306 | [] | no_license | mattmiller87/practice | 0a3d1cae1283abb683dfab0af86e6c569a6104e1 | 9655a8020038e0f6dfe8df842867debac0fcb1e3 | refs/heads/master | 2022-06-23T23:47:50.350379 | 2022-06-14T13:30:51 | 2022-06-14T13:38:56 | 51,970,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from .napalm_cli import napalm_cli
from .napalm_configure import napalm_configure
from .napalm_get import napalm_get
from .napalm_ping import napalm_ping
from .napalm_validate import napalm_validate
__all__ = (
"napalm_cli",
"napalm_configure",
"napalm_get",
"napalm_ping",
"napalm_validate",
)
| [
"[email protected]"
] | |
1684baab9978a790c1a1abaa5ba07d46c9297150 | 1798bed996931a9e7b6c9a469f86e24589fa9cf0 | /huxley/api/tests/test_committee.py | 14041809495b2be4aecd91ede104035ddcb95cdd | [
"BSD-3-Clause"
] | permissive | joannejqi/huxley | 8eae38af706b4f5d714736c99741541d2c3aae73 | b4b5cac213c9605599900eca8ed0225086a5cf4c | refs/heads/master | 2020-05-29T08:41:22.712756 | 2016-10-02T22:18:12 | 2016-10-02T22:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,822 | py | # Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from huxley.api import tests
from huxley.api.tests import auto
from huxley.utils.test import TestCommittees, TestUsers
class CommitteeDetailGetTestCase(auto.RetrieveAPIAutoTestCase):
url_name = 'api:committee_detail'
@classmethod
def get_test_object(cls):
return TestCommittees.new_committee()
def test_anonymous_user(self):
self.do_test()
class CommitteeDetailPutTestCase(tests.UpdateAPITestCase):
url_name = 'api:committee_detail'
params = {'name':'DISC',
'special':True}
def setUp(self):
self.committee = TestCommittees.new_committee()
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update committees.'''
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to update committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
def test_superuser(self):
'''Superusers shouldn't be able to update committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
class CommitteeDetailPatchTestCase(tests.PartialUpdateAPITestCase):
url_name = 'api:committee_detail'
params = {'name':'DISC',
'special':True}
def setUp(self):
self.committee = TestCommittees.new_committee()
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update committees.'''
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to update committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
def test_superuser(self):
'''Superusers shouldn't be able to update committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
class CommitteeDetailDeleteTestCase(auto.DestroyAPIAutoTestCase):
url_name = 'api:committee_detail'
@classmethod
def get_test_object(cls):
return TestCommittees.new_committee()
def test_anonymous_user(self):
'''Anonymous users cannot delete committees.'''
self.do_test(expected_error=auto.EXP_DELETE_NOT_ALLOWED)
def test_authenticated_user(self):
'''Authenticated users cannot delete committees.'''
TestUsers.new_user(username='user', password='user')
self.do_test(
username='user', password='user',
expected_error=auto.EXP_DELETE_NOT_ALLOWED)
def test_superuser(self):
'''Superusers cannot delete committees.'''
TestUsers.new_superuser(username='user', password='user')
self.do_test(
username='user', password='user',
expected_error=auto.EXP_DELETE_NOT_ALLOWED)
class CommitteeListGetTestCase(tests.ListAPITestCase):
url_name = 'api:committee_list'
def test_anonymous_user(self):
'''Anyone should be able to access a list of all the committees.'''
c1 = TestCommittees.new_committee(name='DISC', delegation_size=100)
c2 = TestCommittees.new_committee(name='JCC', special=True,
delegation_size=30)
response = self.get_response()
self.assertEqual(response.data, [
{'delegation_size': c1.delegation_size,
'special': c1.special,
'id': c1.id,
'full_name': c1.full_name,
'name': c1.name},
{'delegation_size': c2.delegation_size,
'special': c2.special,
'id': c2.id,
'full_name': c2.full_name,
'name': c2.name}])
class CommitteeListPostTestCase(tests.CreateAPITestCase):
url_name = 'api:committee_list'
params = {'name': 'DISC',
'full_name': 'Disarmament and International Security',
'delegation_size': 100}
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to create committees.'''
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to create committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
def test_superuser(self):
'''Superusers shouldn't be able to create committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
| [
"[email protected]"
] | |
acf4f07c5b846474dd3390e18f33eb6453daf203 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/dominator_20200827125530.py | 4033c37b5502fead050650b44dcd3b8bd2988b5c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | def leader(A):
# create a dictionary the element that occurs more than n//2 times
# once I find it I return the index
store = {}
candidate = -1
for i in A:
if i in store:
store[i] +=1
else:
store[i] = 1
for i in store:
if store[i] > (len(A) // 2):
candidate = i
for k in range(len(A)):
leader([3,4,3,2,3,-1,3,3]) | [
"[email protected]"
] | |
e435b233f1d458a10d2400e3ce0e0c5739b80ec8 | d687773b56813a684454542b55953289fd2d6fb9 | /tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py | 43107c092e36d13b03290c38145b7b62a0df0dde | [
"Apache-2.0"
] | permissive | danielyou0230/tensorflow | 988cc69f47153b5d1c8045e6cc5d0852091977f5 | c7c3c5c9e602c5670c15d7a81d440ed6cc1d13f6 | refs/heads/master | 2022-12-13T04:57:08.347868 | 2020-08-31T19:10:52 | 2020-08-31T19:16:12 | 279,970,720 | 2 | 1 | Apache-2.0 | 2020-07-17T18:29:15 | 2020-07-15T20:26:27 | C++ | UTF-8 | Python | false | false | 37,270 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create a blockwise lower-triangular operator from `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorBlockLowerTriangular"]
@tf_export("linalg.LinearOperatorBlockLowerTriangular")
class LinearOperatorBlockLowerTriangular(linear_operator.LinearOperator):
"""Combines `LinearOperators` into a blockwise lower-triangular matrix.
This operator is initialized with a nested list of linear operators, which
are combined into a new `LinearOperator` whose underlying matrix
representation is square and has each operator on or below the main diagonal,
and zero's elsewhere. Each element of the outer list is a list of
`LinearOperators` corresponding to a row-partition of the blockwise structure.
The number of `LinearOperator`s in row-partion `i` must be equal to `i`.
For example, a blockwise `3 x 3` `LinearOperatorBlockLowerTriangular` is
initialized with the list `[[op_00], [op_10, op_11], [op_20, op_21, op_22]]`,
where the `op_ij`, `i < 3, j <= i`, are `LinearOperator` instances. The
`LinearOperatorBlockLowerTriangular` behaves as the following blockwise
matrix, where `0` represents appropriately-sized [batch] matrices of zeros:
```none
[[op_00, 0, 0],
[op_10, op_11, 0],
[op_20, op_21, op_22]]
```
Each `op_jj` on the diagonal is required to represent a square matrix, and
hence will have shape `batch_shape_j + [M_j, M_j]`. `LinearOperator`s in row
`j` of the blockwise structure must have `range_dimension` equal to that of
`op_jj`, and `LinearOperators` in column `j` must have `domain_dimension`
equal to that of `op_jj`.
If each `op_jj` on the diagonal has shape `batch_shape_j + [M_j, M_j]`, then
the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`,
where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
`j = 0, 1, ..., J`, assuming the intermediate batch shapes broadcast.
Even if the combined shape is well defined, the combined operator's
methods may fail due to lack of broadcasting ability in the defining
operators' methods.
For example, to create a 4 x 4 linear operator combined of three 2 x 2
operators:
>>> operator_0 = tf.linalg.LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
>>> operator_1 = tf.linalg.LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
>>> operator_2 = tf.linalg.LinearOperatorLowerTriangular([[5., 6.], [7., 8]])
>>> operator = LinearOperatorBlockLowerTriangular(
... [[operator_0], [operator_1, operator_2]])
>>> operator.to_dense()
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 2., 0., 0.],
[3., 4., 0., 0.],
[1., 0., 5., 0.],
[0., 1., 7., 8.]], dtype=float32)>
>>> operator.shape
TensorShape([4, 4])
>>> operator.log_abs_determinant()
<tf.Tensor: shape=(), dtype=float32, numpy=4.3820267>
>>> x0 = [[1., 6.], [-3., 4.]]
>>> x1 = [[0., 2.], [4., 0.]]
>>> x = tf.concat([x0, x1], 0) # Shape [2, 4] Tensor
>>> operator.matmul(x)
<tf.Tensor: shape=(4, 2), dtype=float32, numpy=
array([[-5., 14.],
[-9., 34.],
[ 1., 16.],
[29., 18.]], dtype=float32)>
The above `matmul` is equivalent to:
>>> tf.concat([operator_0.matmul(x0),
... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0)
<tf.Tensor: shape=(4, 2), dtype=float32, numpy=
array([[-5., 14.],
[-9., 34.],
[ 1., 16.],
[29., 18.]], dtype=float32)>
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
For example:
Create a [2, 3] batch of 4 x 4 linear operators:
>>> matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
>>> operator_44 = tf.linalg.LinearOperatorFullMatrix(matrix_44)
Create a [1, 3] batch of 5 x 4 linear operators:
>>> matrix_54 = tf.random.normal(shape=[1, 3, 5, 4])
>>> operator_54 = tf.linalg.LinearOperatorFullMatrix(matrix_54)
Create a [1, 3] batch of 5 x 5 linear operators:
>>> matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
>>> operator_55 = tf.linalg.LinearOperatorFullMatrix(matrix_55)
Combine to create a [2, 3] batch of 9 x 9 operators:
>>> operator_99 = LinearOperatorBlockLowerTriangular(
... [[operator_44], [operator_54, operator_55]])
>>> operator_99.shape
TensorShape([2, 3, 9, 9])
Create a shape [2, 1, 9] batch of vectors and apply the operator to it.
>>> x = tf.random.normal(shape=[2, 1, 9])
>>> y = operator_99.matvec(x)
>>> y.shape
TensorShape([2, 3, 9])
Create a blockwise list of vectors and apply the operator to it. A blockwise
list is returned.
>>> x4 = tf.random.normal(shape=[2, 1, 4])
>>> x5 = tf.random.normal(shape=[2, 3, 5])
>>> y_blockwise = operator_99.matvec([x4, x5])
>>> y_blockwise[0].shape
TensorShape([2, 3, 4])
>>> y_blockwise[1].shape
TensorShape([2, 3, 5])
#### Performance
Suppose `operator` is a `LinearOperatorBlockLowerTriangular` consisting of `D`
row-partitions and `D` column-partitions, such that the total number of
operators is `N = D * (D + 1) // 2`.
* `operator.matmul` has complexity equal to the sum of the `matmul`
complexities of the individual operators.
* `operator.solve` has complexity equal to the sum of the `solve` complexities
of the operators on the diagonal and the `matmul` complexities of the
operators off the diagonal.
* `operator.determinant` has complexity equal to the sum of the `determinant`
complexities of the operators on the diagonal.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorBlockLowerTriangular"):
r"""Initialize a `LinearOperatorBlockLowerTriangular`.
`LinearOperatorBlockLowerTriangular` is initialized with a list of lists of
operators `[[op_0], [op_1, op_2], [op_3, op_4, op_5],...]`.
Args:
operators: Iterable of iterables of `LinearOperator` objects, each with
the same `dtype`. Each element of `operators` corresponds to a row-
partition, in top-to-bottom order. The operators in each row-partition
are filled in left-to-right. For example,
`operators = [[op_0], [op_1, op_2], [op_3, op_4, op_5]]` creates a
`LinearOperatorBlockLowerTriangular` with full block structure
`[[op_0, 0, 0], [op_1, op_2, 0], [op_3, op_4, op_5]]`. The number of
operators in the `i`th row must be equal to `i`, such that each operator
falls on or below the diagonal of the blockwise structure.
`LinearOperator`s that fall on the diagonal (the last elements of each
row) must be square. The other `LinearOperator`s must have domain
dimension equal to the domain dimension of the `LinearOperator`s in the
same column-partition, and range dimension equal to the range dimension
of the `LinearOperator`s in the same row-partition.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
This will raise a `ValueError` if set to `False`.
name: A name for this `LinearOperator`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty, contains an erroneous number of
elements, or contains operators with incompatible shapes.
"""
parameters = dict(
operators=operators,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
# Validate operators.
check_ops.assert_proper_iterable(operators)
for row in operators:
check_ops.assert_proper_iterable(row)
operators = [list(row) for row in operators]
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: {}".format(operators))
self._operators = operators
self._diagonal_operators = [row[-1] for row in operators]
dtype = operators[0][0].dtype
self._validate_dtype(dtype)
is_non_singular = self._validate_non_singular(is_non_singular)
self._validate_num_operators()
self._validate_operator_dimensions()
is_square = self._validate_square(is_square)
with ops.name_scope(name):
super(LinearOperatorBlockLowerTriangular, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _validate_num_operators(self):
for i, row in enumerate(self.operators):
if len(row) != i + 1:
raise ValueError(
"The `i`th row-partition (`i`th element of `operators`) must "
"contain `i` blocks (`LinearOperator` instances). Row {} contains "
"{} blocks.".format(i + 1, len(row)))
def _validate_operator_dimensions(self):
"""Check that `operators` have compatible dimensions."""
for i in range(1, len(self.operators)):
for j in range(i):
op = self.operators[i][j]
# `above_op` is the operator directly above `op` in the blockwise
# structure, in row partition `i-1`, column partition `j`. `op` should
# have the same `domain_dimension` as `above_op`.
above_op = self.operators[i - 1][j]
# `right_op` is the operator to the right of `op` in the blockwise
# structure, in row partition `i`, column partition `j+1`. `op` should
# have the same `range_dimension` as `right_op`.
right_op = self.operators[i][j + 1]
if (op.domain_dimension is not None and
above_op.domain_dimension is not None):
if op.domain_dimension != above_op.domain_dimension:
raise ValueError(
"Operator domain dimensions {} and {} must be equal to fit a "
"blockwise structure.".format(
op.domain_dimension, above_op.domain_dimension))
if (op.range_dimension is not None and
right_op.range_dimension is not None):
if op.range_dimension != right_op.range_dimension:
raise ValueError(
"Operator range dimensions {} and {} must be equal to fit a "
"blockwise structure.".format(
op.range_dimension, right_op.range_dimension))
# pylint: disable=g-bool-id-comparison
def _validate_non_singular(self, is_non_singular):
if all(op.is_non_singular for op in self._diagonal_operators):
if is_non_singular is False:
raise ValueError(
"A blockwise lower-triangular operator with non-singular operators "
" on the main diagonal is always non-singular.")
return True
if any(op.is_non_singular is False for op in self._diagonal_operators):
if is_non_singular is True:
raise ValueError(
"A blockwise lower-triangular operator with a singular operator on "
"the main diagonal is always singular.")
return False
def _validate_square(self, is_square):
if is_square is False:
raise ValueError("`LinearOperatorBlockLowerTriangular` must be square.")
if any(op.is_square is False for op in self._diagonal_operators):
raise ValueError(
"Matrices on the diagonal (the final elements of each row-partition "
"in the `operators` list) must be square.")
return True
# pylint: enable=g-bool-id-comparison
def _validate_dtype(self, dtype):
for i, row in enumerate(self.operators):
for operator in row:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in row)
raise TypeError(
"Expected all operators to have the same dtype. Found {} in row "
"{} and {} in row 0.".format(name_type, i, str(dtype)))
@property
def operators(self):
return self._operators
def _block_range_dimensions(self):
return [op.range_dimension for op in self._diagonal_operators]
def _block_domain_dimensions(self):
return [op.domain_dimension for op in self._diagonal_operators]
def _block_range_dimension_tensors(self):
return [op.range_dimension_tensor() for op in self._diagonal_operators]
def _block_domain_dimension_tensors(self):
return [op.domain_dimension_tensor() for op in self._diagonal_operators]
def _shape(self):
# Get final matrix shape.
domain_dimension = sum(self._block_domain_dimensions())
range_dimension = sum(self._block_range_dimensions())
matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0][0].batch_shape
for row in self.operators[1:]:
for operator in row:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor_v2_with_dispatch(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
domain_dimension = sum(self._block_domain_dimension_tensors())
range_dimension = sum(self._block_range_dimension_tensors())
matrix_shape = array_ops.stack([domain_dimension, range_dimension])
batch_shape = self.operators[0][0].batch_shape_tensor()
for row in self.operators[1:]:
for operator in row:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as
`self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See
class docstring for definition of shape compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that
concatenate to `[..., M, R]`.
"""
if isinstance(x, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = x.adjoint() if adjoint_arg else x
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(left_operator, right_operator)
with self._name_scope(name):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor_v2_with_dispatch(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
x[i] = block
else:
x = ops.convert_to_tensor_v2_with_dispatch(x, name="x")
self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(x.shape[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, x, arg_dim)
if blockwise_arg:
split_x = x
else:
split_dim = -1 if adjoint_arg else -2
# Split input by columns if adjoint_arg is True, else rows
split_x = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
x, axis=split_dim)
result_list = []
# Iterate over row-partitions (i.e. column-partitions of the adjoint).
if adjoint:
for index in range(len(self.operators)):
# Begin with the operator on the diagonal and apply it to the
# respective `rhs` block.
result = self.operators[index][index].matmul(
split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)
# Iterate top to bottom over the operators in the remainder of the
# column-partition (i.e. left to right over the row-partition of the
# adjoint), apply the operator to the respective `rhs` block and
# accumulate the sum. For example, given the
# `LinearOperatorBlockLowerTriangular`:
#
# op = [[A, 0, 0],
# [B, C, 0],
# [D, E, F]]
#
# if `index = 1`, the following loop calculates:
# `y_1 = (C.matmul(x_1, adjoint=adjoint) +
# E.matmul(x_2, adjoint=adjoint)`,
# where `x_1` and `x_2` are splits of `x`.
for j in range(index + 1, len(self.operators)):
result += self.operators[j][index].matmul(
split_x[j], adjoint=adjoint, adjoint_arg=adjoint_arg)
result_list.append(result)
else:
for row in self.operators:
# Begin with the left-most operator in the row-partition and apply it
# to the first `rhs` block.
result = row[0].matmul(
split_x[0], adjoint=adjoint, adjoint_arg=adjoint_arg)
# Iterate left to right over the operators in the remainder of the row
# partition, apply the operator to the respective `rhs` block, and
# accumulate the sum.
for j, operator in enumerate(row[1:]):
result += operator.matmul(
split_x[j + 1], adjoint=adjoint, adjoint_arg=adjoint_arg)
result_list.append(result)
if blockwise_arg:
return result_list
result_list = linear_operator_util.broadcast_matrix_batch_dims(
result_list)
return array_ops.concat(result_list, axis=-2)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`, or an
iterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning
for every set of leading dimensions, the last dimension defines a
vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name):
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor_v2_with_dispatch(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[-1])
x[i] = block
x_mat = [block[..., array_ops.newaxis] for block in x]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return [array_ops.squeeze(y, axis=-1) for y in y_mat]
x = ops.convert_to_tensor_v2_with_dispatch(x, name="x")
self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(x.shape[-1])
x_mat = x[..., array_ops.newaxis]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def _determinant(self):
if all(op.is_positive_definite for op in self._diagonal_operators):
return math_ops.exp(self._log_abs_determinant())
result = self._diagonal_operators[0].determinant()
for op in self._diagonal_operators[1:]:
result *= op.determinant()
return result
def _log_abs_determinant(self):
result = self._diagonal_operators[0].log_abs_determinant()
for op in self._diagonal_operators[1:]:
result += op.log_abs_determinant()
return result
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Given the blockwise `n + 1`-by-`n + 1` linear operator:
op = [[A_00 0 ... 0 ... 0],
[A_10 A_11 ... 0 ... 0],
...
[A_k0 A_k1 ... A_kk ... 0],
...
[A_n0 A_n1 ... A_nk ... A_nn]]
we find `x = op.solve(y)` by observing that
`y_k = A_k0.matmul(x_0) + A_k1.matmul(x_1) + ... + A_kk.matmul(x_k)`
and therefore
`x_k = A_kk.solve(y_k -
A_k0.matmul(x_0) - ... - A_k(k-1).matmul(x_(k-1)))`
where `x_k` and `y_k` are the `k`th blocks obtained by decomposing `x`
and `y` along their appropriate axes.
We first solve `x_0 = A_00.solve(y_0)`. Proceeding inductively, we solve
for `x_k`, `k = 1..n`, given `x_0..x_(k-1)`.
The adjoint case is solved similarly, beginning with
`x_n = A_nn.solve(y_n, adjoint=True)` and proceeding backwards.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape,
or a list of `Tensor`s. `Tensor`s are treated like a [batch] matrices
meaning for every set of leading dimensions, the last two dimensions
defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
if isinstance(rhs, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = rhs.adjoint() if adjoint_arg else rhs
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `rhs` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.solve(left_operator, right_operator)
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
arg_dim = -1 if adjoint_arg else -2
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, rhs, arg_dim)
if blockwise_arg:
for i, block in enumerate(rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor_v2_with_dispatch(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
rhs[i] = block
if adjoint_arg:
split_rhs = [linalg.adjoint(y) for y in rhs]
else:
split_rhs = rhs
else:
rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name="rhs")
self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
split_rhs = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
rhs, axis=-2)
solution_list = []
if adjoint:
# For an adjoint blockwise lower-triangular linear operator, the system
# must be solved bottom to top. Iterate backwards over rows of the
# adjoint (i.e. columns of the non-adjoint operator).
for index in reversed(range(len(self.operators))):
y = split_rhs[index]
# Iterate top to bottom over the operators in the off-diagonal portion
# of the column-partition (i.e. row-partition of the adjoint), apply
# the operator to the respective block of the solution found in
# previous iterations, and subtract the result from the `rhs` block.
# For example,let `A`, `B`, and `D` be the linear operators in the top
# row-partition of the adjoint of
# `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])`,
# and `x_1` and `x_2` be blocks of the solution found in previous
# iterations of the outer loop. The following loop (when `index == 0`)
# expresses
# `Ax_0 + Bx_1 + Dx_2 = y_0` as `Ax_0 = y_0*`, where
# `y_0* = y_0 - Bx_1 - Dx_2`.
for j in reversed(range(index + 1, len(self.operators))):
y -= self.operators[j][index].matmul(
solution_list[len(self.operators) - 1 - j],
adjoint=adjoint)
# Continuing the example above, solve `Ax_0 = y_0*` for `x_0`.
solution_list.append(
self._diagonal_operators[index].solve(y, adjoint=adjoint))
solution_list.reverse()
else:
# Iterate top to bottom over the row-partitions.
for row, y in zip(self.operators, split_rhs):
# Iterate left to right over the operators in the off-diagonal portion
# of the row-partition, apply the operator to the block of the
# solution found in previous iterations, and subtract the result from
# the `rhs` block. For example, let `D`, `E`, and `F` be the linear
# operators in the bottom row-partition of
# `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])` and
# `x_0` and `x_1` be blocks of the solution found in previous
# iterations of the outer loop. The following loop
# (when `index == 2`), expresses
# `Dx_0 + Ex_1 + Fx_2 = y_2` as `Fx_2 = y_2*`, where
# `y_2* = y_2 - D_x0 - Ex_1`.
for i, operator in enumerate(row[:-1]):
y -= operator.matmul(solution_list[i], adjoint=adjoint)
# Continuing the example above, solve `Fx_2 = y_2*` for `x_2`.
solution_list.append(row[-1].solve(y, adjoint=adjoint))
if blockwise_arg:
return solution_list
solution_list = linear_operator_util.broadcast_matrix_batch_dims(
solution_list)
return array_ops.concat(solution_list, axis=-2)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s
(for blockwise operators). `Tensor`s are treated as [batch] vectors,
meaning for every set of leading dimensions, the last dimension defines
a vector. See class docstring for definition of compatibility regarding
batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):
for i, block in enumerate(rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor_v2_with_dispatch(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[-1])
rhs[i] = block
rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return [array_ops.squeeze(x, axis=-1) for x in solution_mat]
rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name="rhs")
self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(rhs.shape[-1])
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def _diag_part(self):
diag_list = []
for op in self._diagonal_operators:
# Extend the axis, since `broadcast_matrix_batch_dims` treats all but the
# final two dimensions as batch dimensions.
diag_list.append(op.diag_part()[..., array_ops.newaxis])
diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
diagonal = array_ops.concat(diag_list, axis=-2)
return array_ops.squeeze(diagonal, axis=-1)
def _trace(self):
result = self._diagonal_operators[0].trace()
for op in self._diagonal_operators[1:]:
result += op.trace()
return result
def _to_dense(self):
num_cols = 0
dense_rows = []
flat_broadcast_operators = linear_operator_util.broadcast_matrix_batch_dims(
[op.to_dense() for row in self.operators for op in row]) # pylint: disable=g-complex-comprehension
broadcast_operators = [
flat_broadcast_operators[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(self.operators))]
for row_blocks in broadcast_operators:
batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
num_cols += array_ops.shape(row_blocks[-1])[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=self.dtype)
row_blocks.append(zeros_to_pad_after)
dense_rows.append(array_ops.concat(row_blocks, axis=-1))
mat = array_ops.concat(dense_rows, axis=-2)
mat.set_shape(self.shape)
return mat
def _assert_non_singular(self):
return control_flow_ops.group([
op.assert_non_singular() for op in self._diagonal_operators])
def _eigvals(self):
eig_list = []
for op in self._diagonal_operators:
# Extend the axis for broadcasting.
eig_list.append(op.eigvals()[..., array_ops.newaxis])
eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list)
eigs = array_ops.concat(eig_list, axis=-2)
return array_ops.squeeze(eigs, axis=-1)
| [
"[email protected]"
] | |
0fe855a596925a8feaac09b26bd6830c252b6375 | d996edcd595c565c5725a16286ce8d338af67246 | /src/rl/environments/bandit.py | 40ded8fedcd9633048d32e37660a86d830ceaa5b | [] | no_license | preddy5/dltemplate | fbbfce7660c451495e255cf8d8437e4b4e207f9c | 77b04b767cbd4914e0a3d3609c645e475aabcc43 | refs/heads/master | 2020-04-28T19:37:04.893001 | 2019-03-13T13:35:04 | 2019-03-13T13:35:04 | 175,517,056 | 1 | 1 | null | 2019-03-13T23:59:40 | 2019-03-13T23:59:39 | null | UTF-8 | Python | false | false | 3,429 | py | """
From https://courses.edx.org/courses/course-v1:Microsoft+DAT257x+2T2018/course/ (errors and redundant code included ;)
"""
import numpy as np
import sys
# Interface
class Environment(object):
def reset(self):
raise NotImplementedError('Subclasses must override reset.')
def actions(self):
raise NotImplementedError('Subclasses must override actions.')
def step(self, action):
raise NotImplementedError('Subclasses must override step.')
class ActionSpace(object):
def __init__(self, actions):
self.actions = actions
self.n = len(actions)
# BanditEnv Environment
class BanditEnv(Environment):
def __init__(self, n_actions=10, distribution='bernoulli', evaluation_seed='387'):
super(BanditEnv, self).__init__()
self.action_space = ActionSpace(range(n_actions))
self.distribution = distribution
np.random_seed = evaluation_seed
self.is_reset = False
self.reward_parameters = None
if distribution == 'bernoulli':
self.reward_parameters = np.random.rand(n_actions)
elif distribution == 'normal':
self.reward_parameters = (np.random.randn(n_actions), np.random.rand(n_actions))
elif distribution == 'heavy-tail':
self.reward_parameters = np.random.rand(n_actions)
else:
print('Please use a supported reward distribution', flush=True)
sys.exit(0)
if distribution == 'normal':
self.optimal_arm = np.argmax(self.reward_parameters)
else:
self.optimal_arm = np.argmax(self.reward_parameters[0])
def reset(self):
self.is_reset = True
def actions(self):
return range(self.action_space.n)
def compute_gap(self, action):
if self.distribution == 'normal':
gap = np.abs(self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action])
else:
gap = np.abs(self.reward_parameters[self.optimal_arm] - self.reward_parameters[action])
return gap
def step(self, action):
self.is_reset = False
valid_action = True
reward = 0
# gap = 0
if action is None or action < 0 or action >= self.action_space.n:
print('Algorithm chose an invalid action; reset reward to -inf', flush=True)
reward = float('-inf')
# gap = float('inf')
valid_action = False
if self.distribution == 'bernoulli':
if valid_action:
reward = np.random.binomial(1, self.reward_parameters[action])
# gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]
elif self.distribution == 'normal':
if valid_action:
reward = self.reward_parameters[0][action] + self.reward_parameters[1][action] * np.random.randn()
# gap = self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action]
elif self.distribution == 'heavy_tail':
if valid_action:
reward = self.reward_parameters[action] + np.random.standard_cauchy()
# gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]
else:
print('Please use a supported reward distribution', flush=True)
sys.exit(0)
return None, reward, self.is_reset, ''
| [
"[email protected]"
] | |
a89a60b286af90a01447471f18579f1512a3c20b | a8314fb4e71a229f2288ca0588bbb3ebd58b7db0 | /leet/merge_two_sorted_lists/main.py | 810455adabdede95f2ff4ba1f28c025393049b90 | [] | no_license | blhwong/algos_py | 6fc72f1c15fe04f760a199535a0df7769f6abbe6 | 9b54ad6512cf0464ecdd084d899454a99abd17b2 | refs/heads/master | 2023-08-30T17:45:51.862913 | 2023-07-24T18:56:38 | 2023-07-24T18:56:38 | 264,782,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | from data_structures.list_node import ListNode
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
curr1 = l1
curr2 = l2
head = None
tail = None
def add_to_tail(val):
nonlocal head
nonlocal tail
if not head:
head = ListNode(val)
tail = head
else:
if not head.next:
head.next = tail
tail.next = ListNode(val)
tail = tail.next
while curr1 and curr2:
if curr1.val < curr2.val:
add_to_tail(curr1.val)
curr1 = curr1.next
else:
add_to_tail(curr2.val)
curr2 = curr2.next
if curr1:
if tail:
tail.next = curr1
else:
return curr1
elif curr2:
if tail:
tail.next = curr2
else:
return curr2
return head
| [
"[email protected]"
] | |
29262b4bc686b91c5cee62b93f08b580f4d31284 | e838076bc1c8aedbb8c77710b1a1a32efc3a4da1 | /site_selection/apps.py | b4f50ec62e3c821e21b1a8a29ac8fa68633ac19f | [] | no_license | abbasgis/ferrp | 5f2f7768f0e38e299498c2e74379311698b6321f | 77736c33e7ec82b6adf247a1bf30ccbc4897f02e | refs/heads/master | 2023-05-25T09:59:45.185025 | 2021-06-12T09:15:07 | 2021-06-12T09:15:07 | 376,236,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class SiteSelectionConfig(AppConfig):
name = 'site_selection'
| [
"abbas123@abc"
] | abbas123@abc |
88748958f155fdf6f5309640b3a89f748bad225e | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_Seasonal_Second_SVR.py | 14fcfd600f9d3e0f16af66b34160eb3958b35e58 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 163 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_Second'] , ['SVR'] ); | [
"[email protected]"
] | |
712e14f4ca6830112d3e199b7a2dddaf97f50512 | 9cc1b58d0319308da98187d071295b2fabf1f080 | /0608/a0608_03_matplotlib模組試用2.py | 97333bfaef57ef352a284ad83d82ab346c07fd2f | [
"MIT"
] | permissive | Arwen0905/Python_Test | 60d1dee383c9cf27df6b93cfde7884c91092229c | c75357e4354a684a9fae41f751dae60d4cf0716c | refs/heads/master | 2023-01-13T13:14:55.355898 | 2020-10-31T18:52:07 | 2020-10-31T18:52:07 | 265,150,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | import matplotlib.pyplot as plt
import random
# list_x1 = [1,5,7,9,13,16]
# list_y1 = [15,50,80,40,70,50]
# list_x2 = [2,6,8,11,14,16]
# list_y2 = [10,40,30,50,80,60]
list_x1 = []
list_y1 = []
list_x2 = []
list_y2 = []
for i in range(4):
list_x1.append(random.randint(1,50))
list_y1.append(random.randint(1,50))
for i in range(6):
list_x2.append(random.randint(1,50))
list_y2.append(random.randint(1,50))
plt.plot(list_x1, list_y1,color="#ffff55",linewidth="5",\
linestyle="-.",label=list_x2)
plt.plot(list_x2, list_y2, color="#ff2244",linewidth=5,\
linestyle=":",label=list_y2)
# 顯示圖例
plt.legend()
# plt.xlim(0,18) #顯示x軸的線條→ 顯示比例
# plt.ylim(0,120) #顯示y軸的線條→ 顯示比例
plt.title("Pocket Money") #上方標題
plt.xlabel("Age") # 列標題
plt.ylabel("Money") #欄標題
plt.gca().set_facecolor('black') #背景設定
plt.show()
| [
"[email protected]"
] | |
3f9e0de70db659c1bc01acd9909894dc74a0c1a2 | a87eed5b49858ee547c2363a9d29a5c625db254f | /examples/log_requests.py | 035cc4e95d5243acbcfef0c810012b8089ee290d | [
"BSD-2-Clause"
] | permissive | parkerhancock/requests-cache | 3a58d8829eba27796dd551d98d93237f24fd2179 | e3ae526cba37a4ea2d8a48b05aaeff062847c644 | refs/heads/master | 2023-05-31T11:59:29.964100 | 2021-07-09T21:01:11 | 2021-07-09T21:01:11 | 371,452,485 | 1 | 0 | BSD-2-Clause | 2021-07-09T14:03:50 | 2021-05-27T17:25:43 | Python | UTF-8 | Python | false | false | 1,522 | py | #!/usr/bin/env python3
"""
An example of testing the cache to prove that it's not making more requests than expected.
"""
from contextlib import contextmanager
from logging import basicConfig, getLogger
from unittest.mock import patch
import requests
from requests_cache import CachedSession
from requests_cache.session import OriginalSession, set_response_defaults
basicConfig(level='INFO')
logger = getLogger('requests_cache.examples')
# Uncomment for more verbose debug output
# getLogger('requests_cache').setLevel('DEBUG')
@contextmanager
def log_requests():
"""Context manager that mocks and logs all non-cached requests"""
real_response = set_response_defaults(requests.get('http://httpbin.org/get'))
with patch.object(OriginalSession, 'send', return_value=real_response) as mock_send:
session = CachedSession('cache-test', backend='sqlite')
session.cache.clear()
yield session
cached_responses = session.cache.responses.values()
logger.debug('All calls to Session._request():')
logger.debug(mock_send.mock_calls)
logger.info(f'Responses cached: {len(cached_responses)}')
logger.info(f'Requests sent: {mock_send.call_count}')
def main():
"""Example usage; replace with any other requests you want to test"""
with log_requests() as session:
for i in range(10):
response = session.get('http://httpbin.org/get')
logger.debug(f'Response {i}: {type(response).__name__}')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
85ed8f1963ad348e607ad90fca7242976a2638a6 | eb99769b7c9e0eb1cf3b88878934a400ba42f0bf | /users/migrations/0002_auto_20180614_1023.py | 7649e59641c40a952777371d5d47701aa6f2a3bf | [] | no_license | Levalife/petsterr2.0 | 3657b200b9e236b81896f4ac104932e85517ceb3 | 43d20e65362596d72942fe624c29fd4f84d90f9a | refs/heads/master | 2023-01-13T04:58:23.496527 | 2018-09-13T09:50:48 | 2018-09-13T09:50:48 | 203,134,329 | 0 | 0 | null | 2023-01-05T21:55:18 | 2019-08-19T08:48:32 | Python | UTF-8 | Python | false | false | 3,743 | py | # Generated by Django 2.0.6 on 2018-06-14 10:23
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('countries', '0003_auto_20180614_1023'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='api_key',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='countries.Country'),
),
migrations.AddField(
model_name='userprofile',
name='date_of_birth',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='facebook_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='gender',
field=models.CharField(blank=True, choices=[('male', 'male'), ('female', 'female')], max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='google_access_key',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='locale',
field=models.CharField(blank=True, default='en', max_length=10, null=True),
),
migrations.AddField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='userprofile',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to='users/pictures/%Y/%m/%d'),
),
migrations.AddField(
model_name='userprofile',
name='premium',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userprofile',
name='referral_code',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='timezone',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_access_key',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_access_secret',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='user_ip',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterModelTable(
name='userprofile',
table='user_profiles',
),
]
| [
"[email protected]"
] | |
b58bc1f53f284b177d9a168c3cd8522e9ce5c134 | 30d02ec6dd309dced011d266ca40bace293fb23e | /20210315/swapping_nodes_in_a_linked_list.py | 6c72e9f8fb965b8d2a1629cd4d2d42b7e2144379 | [] | no_license | jyeoniii/algorithm | b72f5e9f7fe63098c251bcc1585787ba39ca750c | 7d80e27aec8fbac936911ee78a92c47b00daa3ba | refs/heads/master | 2023-04-15T01:39:41.149528 | 2021-04-22T13:55:58 | 2021-04-22T13:55:58 | 316,533,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | # https://leetcode.com/explore/challenge/card/march-leetcoding-challenge-2021/589/week-2-march-8th-march-14th/3671/
from common.common_data import ListNode
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
length = 0
node, node1 = head, None
while node:
length += 1
if length == k:
node1 = node
node = node.next
node = head
while length - k > 0:
node = node.next
length -= 1
node2 = node
node1.val, node2.val = node2.val, node1.val
return head
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
slow, fast = head, head
while k > 1:
fast = fast.next
k -= 1
node1 = fast
while fast.next:
slow, fast = slow.next, fast.next
node2 = slow
node1.val, node2.val = node2.val, node1.val
return head
# Swapping node itself, not just a value
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
prev, nodes = {}, []
node = head
while node.next:
prev[node.next] = node
nodes.append(node)
node = node.next
nodes.append(node)
node1, node2 = (nodes[k - 1], nodes[len(nodes) - k]) if k - 1 <= len(nodes) - k else (nodes[len(nodes) - k], nodes[k - 1])
node1.next, node2.next = node2.next, node1.next if node1.next != node2 else node1
if node1 in prev and prev[node1] != node2:
prev[node1].next = node2
if node2 in prev and prev[node2] != node1:
prev[node2].next = node1
return head if node1 != head else node2
| [
"[email protected]"
] | |
745625739eb2a5d142639ae759a2c01bc73b0535 | 69da8d0f4d5d50b40019959a83dda09aa75f6dd3 | /test/test_columndatatypegetter.py | 19b486d3dce2c01e583932793a1daac2d5f241ce | [
"MIT"
] | permissive | Peter-32/neatdata | 62e8fbccd28257ec7e533eeec1cd5f579ae93247 | 8796ca9f027ad727440b2f11479ad5ab22aa8e09 | refs/heads/master | 2021-05-10T09:52:34.488575 | 2018-03-31T23:12:29 | 2018-03-31T23:12:29 | 118,937,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import unittest
import pandas as pd
import numpy as np
from neatdata.neatdata import *
class TestColumnDataTypeGetter(unittest.TestCase):
def testColumnDataTypeGetter_Execute(self):
# Assemble
now = pd.datetime.now()
trainX = pd.DataFrame({'col1': [1,1,1,1,1,1,1],
'col2': ['a','a','a','b','a','b','b'],
'col3': ['a','a','a','b','a','b','b'],
'col4': ['a','a','a','b','a','b','b'],
'col5': ['a','a','a','b','a','b','b'],
'col6': [now,now,now,now,now,now,now],
'col7': [1,None,None,None,None,None,None],
'col8': ['a',None,None,None,None,None,None],
'col9': [now,None,None,None,None,None,None],
'col10': [np.nan,None,None,None,None,None,None],
'col11': [np.inf,None,None,None,None,None,None],
'col12': [-np.inf,1,None,None,None,None,None]})
indexColumns = ['col3','col4']
skipColumns = ['col5']
# Act
numberColumns, categoryColumns, datetimeColumns = ColumnDataTypeGetter().execute(trainX, indexColumns, skipColumns)
# Assert
self.assertTrue('col1' in numberColumns)
self.assertTrue('col2' in categoryColumns)
self.assertTrue('col3' not in numberColumns)
self.assertTrue('col3' not in categoryColumns)
self.assertTrue('col3' not in datetimeColumns)
self.assertTrue('col4' not in numberColumns)
self.assertTrue('col4' not in categoryColumns)
self.assertTrue('col4' not in datetimeColumns)
self.assertTrue('col5' not in numberColumns)
self.assertTrue('col5' not in categoryColumns)
self.assertTrue('col5' not in datetimeColumns)
self.assertTrue('col6' in datetimeColumns)
self.assertTrue('col7' in numberColumns)
self.assertTrue('col8' in categoryColumns)
self.assertTrue('col9' in datetimeColumns)
self.assertTrue('col10' in numberColumns)
self.assertTrue('col11' in numberColumns)
self.assertTrue('col12' in numberColumns)
| [
"[email protected]"
] | |
25d958bd368dc46714b98ad965f5d05421db8589 | 190aad44ef1892d413ce29ee3b3bf08d78622181 | /notebooks/Easily_creating_MAB_problems.py | d63694f86e4ed918545d1903e9b03048a216a8c1 | [
"MIT"
] | permissive | choltz95/SMPyBandits | 0ea40ed50a0d4db1833cba028b3f1cf779137a0c | 04bc2b2bf10f8043afa5cac6589c191745735d9c | refs/heads/master | 2021-01-04T13:10:46.724834 | 2020-02-14T15:04:07 | 2020-02-14T15:04:07 | 240,565,363 | 1 | 0 | MIT | 2020-02-14T17:39:19 | 2020-02-14T17:39:19 | null | UTF-8 | Python | false | false | 11,872 | py |
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Easily-creating-MAB-problems" data-toc-modified-id="Easily-creating-MAB-problems-1"><span class="toc-item-num">1 </span>Easily creating MAB problems</a></div><div class="lev2 toc-item"><a href="#Constant-arms" data-toc-modified-id="Constant-arms-11"><span class="toc-item-num">1.1 </span>Constant arms</a></div><div class="lev2 toc-item"><a href="#Bernoulli-arms" data-toc-modified-id="Bernoulli-arms-12"><span class="toc-item-num">1.2 </span>Bernoulli arms</a></div><div class="lev2 toc-item"><a href="#Gaussian-arms" data-toc-modified-id="Gaussian-arms-13"><span class="toc-item-num">1.3 </span>Gaussian arms</a></div><div class="lev3 toc-item"><a href="#Wrong-means-for-Gaussian-arms-?" data-toc-modified-id="Wrong-means-for-Gaussian-arms-?-131"><span class="toc-item-num">1.3.1 </span>Wrong means for Gaussian arms ?</a></div><div class="lev3 toc-item"><a href="#Closed-form-formula" data-toc-modified-id="Closed-form-formula-132"><span class="toc-item-num">1.3.2 </span>Closed form formula</a></div><div class="lev3 toc-item"><a href="#With-a-larger-variance-?" data-toc-modified-id="With-a-larger-variance-?-133"><span class="toc-item-num">1.3.3 </span>With a larger variance ?</a></div><div class="lev2 toc-item"><a href="#Exponential-arms" data-toc-modified-id="Exponential-arms-14"><span class="toc-item-num">1.4 </span>Exponential arms</a></div><div class="lev2 toc-item"><a href="#Uniform-arms" data-toc-modified-id="Uniform-arms-15"><span class="toc-item-num">1.5 </span>Uniform arms</a></div><div class="lev2 toc-item"><a href="#Arms-with-rewards-outside-of-$[0,-1]$" data-toc-modified-id="Arms-with-rewards-outside-of-$[0,-1]$-16"><span class="toc-item-num">1.6 </span>Arms with rewards outside of <span class="MathJax_Preview" style="color: inherit;"><span class="MJXp-math" id="MJXp-Span-243"><span class="MJXp-mo" id="MJXp-Span-244" style="margin-left: 0em; margin-right: 0em;">[</span><span class="MJXp-mn" id="MJXp-Span-245">0</span><span class="MJXp-mo" id="MJXp-Span-246" style="margin-left: 0em; margin-right: 0.222em;">,</span><span class="MJXp-mn" id="MJXp-Span-247">1</span><span class="MJXp-mo" id="MJXp-Span-248" style="margin-left: 0em; margin-right: 0em;">]</span></span></span><script type="math/tex" id="MathJax-Element-30">[0, 1]</script></a></div><div class="lev2 toc-item"><a href="#Gamma-arms" data-toc-modified-id="Gamma-arms-17"><span class="toc-item-num">1.7 </span>Gamma arms</a></div><div class="lev2 toc-item"><a href="#Non-truncated-Gaussian-and-Gamma-arms" data-toc-modified-id="Non-truncated-Gaussian-and-Gamma-arms-18"><span class="toc-item-num">1.8 </span>Non-truncated Gaussian and Gamma arms</a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-19"><span class="toc-item-num">1.9 </span>Conclusion</a></div>
# ---
# # Easily creating MAB problems
# First, be sure to be in the main folder, or to have installed [`SMPyBandits`](https://github.com/SMPyBandits/SMPyBandits), and import `MAB` from `Environment` package:
# In[1]:
get_ipython().system('pip install SMPyBandits watermark')
get_ipython().run_line_magic('load_ext', 'watermark')
get_ipython().run_line_magic('watermark', '-v -m -p SMPyBandits -a "Lilian Besson"')
# In[2]:
from SMPyBandits.Environment import MAB
# And also, import all the types of arms.
# In[3]:
from SMPyBandits.Arms import *
# Check it exists:
Constant, Bernoulli, Gaussian, Exponential, ExponentialFromMean, Poisson, UniformArm, Gamma, GammaFromMean
# In[4]:
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12.4, 7)
# ## Constant arms
#
# This is the simpler example of arms : rewards are constant, and not randomly drawn from a distribution.
# Let consider an example with $K = 3$ arms.
# In[5]:
M_C = MAB([Constant(mu) for mu in [0.1, 0.5, 0.9]])
# The `plotHistogram()` method draws samples from each arm, and plot a histogram of their repartition.
# For constant arms, no need to take a lot of samples as they are constant.
# In[6]:
_ = M_C.plotHistogram(10)
# ## Bernoulli arms
# Then it's easy to create a Multi-Armed Bandit problem, instance of `MAB` class, either from a list of `Arm` objects:
# In[7]:
M_B = MAB([Bernoulli(mu) for mu in [0.1, 0.5, 0.9]])
# Or from a dictionary, with keys `"arm_type"` and `"params"`:
# In[8]:
M_B = MAB({
"arm_type": Bernoulli,
"params": [0.1, 0.5, 0.9]
})
# The `plotHistogram()` method draws a lot of samples from each arm, and plot a histogram of their repartition:
# In[9]:
_ = M_B.plotHistogram()
# ## Gaussian arms
# And with Gaussian arms, with a small variance of $\sigma^2 = 0.05$, for rewards truncated into $[0, 1]$:
# In[10]:
M_G = MAB([Gaussian(mu, sigma=0.05) for mu in [0.1, 0.5, 0.9]])
# The histogram clearly shows that low-variance Gaussian arms are easy to separate:
# In[11]:
_ = M_G.plotHistogram(100000)
# ### Wrong means for Gaussian arms ?
# The truncation seems to change the means.
#
# > For instance, the first arm (in <span style="color:red;">red</span>) has a small mass on the special value $0$, so it probably reduces its mean.
#
# Let's estimate it empirically, and then check with the closed form solution.
# In[12]:
arm = Gaussian(0.1, sigma=0.05)
# In[13]:
mean = arm.mean
estimated_mean = np.mean(arm.draw_nparray((10000000,)))
# In[14]:
mean, estimated_mean
# In[15]:
def relative_error(x, y):
return abs(x - y) / x
relative_error(mean, estimated_mean)
# $\implies$ That's a relative difference of $0.4\%$, really negligible!
#
# And for other values for $(\mu, \sigma)$:
# In[16]:
arm = Gaussian(0.7, sigma=3)
# In[17]:
mean = arm.mean
estimated_mean = np.mean(arm.draw_nparray((10000000,)))
# In[18]:
mean, estimated_mean
# In[19]:
relative_error(mean, estimated_mean)
# $\implies$ That's a relative difference of $25\%$!
#
# > Clearly, this effect cannot be neglected!
# ### Closed form formula
# Apparently, the closed form formula for the mean of a Gaussian arm $\mathcal{N}(\mu, \sigma)$, **truncated to $[a,b]$** is :
# $$\mathbb{E} (X\mid a<X<b)=\mu +\sigma {\frac {\phi ({\frac {a-\mu }{\sigma }})-\phi ({\frac {b-\mu }{\sigma }})}{\Phi ({\frac {b-\mu }{\sigma }})-\Phi ({\frac {a-\mu }{\sigma }})}}\!=\mu +\sigma {\frac {\phi (\alpha )-\phi (\beta )}{\Phi (\beta )-\Phi (\alpha )}}.$$
#
# Let's compute that.
# In[20]:
import numpy as np
from scipy.special import erf
# The fonction
# $$\phi(x) := \frac{1}{\sqrt{2 \pi}} \exp\left(- \frac{1}{2} x^2 \right).$$
# In[21]:
def phi(xi):
r"""The :math:`\phi(\xi)` function, defined by:
.. math:: \phi(\xi) := \frac{1}{\sqrt{2 \pi}} \exp\left(- \frac12 \xi^2 \right)
It is the probability density function of the standard normal distribution, see https://en.wikipedia.org/wiki/Standard_normal_distribution.
"""
return np.exp(- 0.5 * xi**2) / np.sqrt(2. * np.pi)
# The fonction
# $$\Phi(x) := \frac{1}{2} \left(1 + \mathrm{erf}\left( \frac{x}{\sqrt{2}} \right) \right).$$
# In[22]:
def Phi(x):
r"""The :math:`\Phi(x)` function, defined by:
.. math:: \Phi(x) := \frac{1}{2} \left(1 + \mathrm{erf}\left( \frac{x}{\sqrt{2}} \right) \right).
It is the probability density function of the standard normal distribution, see https://en.wikipedia.org/wiki/Cumulative_distribution_function
"""
return (1. + erf(x / np.sqrt(2.))) / 2.
# In[23]:
mu, sigma, mini, maxi = arm.mu, arm.sigma, arm.min, arm.max
mu, sigma, mini, maxi
# In[24]:
other_mean = mu + sigma * (phi(mini) - phi(maxi)) / (Phi(maxi) - Phi(mini))
# In[25]:
mean, estimated_mean, other_mean
# Well, apparently, the [theoretical formula](https://en.wikipedia.org/wiki/Truncated_normal_distribution#Moments) is false for this case.
# It is not even bounded in $[0, 1]$!
#
# Let's forget about this possible issue, and consider that the mean $\mu$ of a Gaussian arm $\mathcal{N}(\mu, \sigma)$ truncated to $[0,1]$ is indeed $\mu$.
# ### With a larger variance ?
# But if the variance is larger, it can be very hard to differentiate between arms, and so MAB learning will be harder.
# With a big variance of $\sigma^2 = 0.5$, for rewards truncated into $[0, 1]$:
# In[26]:
M_G = MAB([Gaussian(mu, sigma=0.10) for mu in [0.1, 0.5, 0.9]])
_ = M_G.plotHistogram(100000)
# We see that due to the truncation, if mean of the Gaussian is too close to $0$ or $1$, then actual mean rewards is pushed to $0$ or $1$ (here the blue arm clearly has a mean higher than $0.9$).
#
# And for larger variances, it is even stronger:
# In[27]:
M_G = MAB([Gaussian(mu, sigma=0.25) for mu in [0.1, 0.5, 0.9]])
_ = M_G.plotHistogram()
# ## Exponential arms
# We can do the same with (truncated) Exponential arms, and as a convenience I prefer to work with `ExponentialFromMean`, to use the mean and not the $\lambda$ parameter to create the arm.
# In[28]:
M_E = MAB({ "arm_type": ExponentialFromMean, "params": [0.1, 0.5, 0.9]})
# In[29]:
_ = M_E.plotHistogram()
# ## Uniform arms
# Arms with rewards uniform in $[0,1]$, are continuous versions of Bernoulli$(0.5)$.
# They can also be uniform in other intervals.
# In[30]:
UniformArm(0, 1).lower_amplitude
UniformArm(0, 0.1).lower_amplitude
UniformArm(0.4, 0.5).lower_amplitude
UniformArm(0.8, 0.9).lower_amplitude
# In[31]:
M_U = MAB([UniformArm(0, 1), UniformArm(0, 0.1), UniformArm(0.4, 0.5), UniformArm(0.8, 0.9)])
# In[32]:
_ = M_U.plotHistogram(100000)
# ----
# ## Arms with rewards outside of $[0, 1]$
#
# Of course, everything work similarly if rewards are not in $[0, 1]$ but in any interval $[a, b]$.
#
# Note that all my algorithms assume $a = \text{lower} = 0$ and $b = 1$ (and use
# $\text{amplitude} = b - a$ instead of $b$).
# They just need to be specified if we stop using the default choice $[0, 1]$.
#
# For example, Gaussian arms can be truncated into $[-10, 10]$ instead of $[0, 1]$.
# Let define some Gaussian arms, with means $-5, 0, 5$ and a variance of $\sigma^2 = 2$.
# In[33]:
M_G = MAB([Gaussian(mu, sigma=2, mini=-10, maxi=10) for mu in [-5, 0, 5]])
# In[34]:
_ = M_G.plotHistogram(100000)
# In[35]:
M_G = MAB([Gaussian(mu, sigma=0.1, mini=-10, maxi=10) for mu in [-5, 0, 5]])
# In[36]:
_ = M_G.plotHistogram()
# ## Gamma arms
#
# We can do the same with (truncated) Gamma arms, and as a convenience I prefer to work with `GammaFromMean`, to use the mean and not the $k$ shape parameter to create the arm.
# The scale $\theta$ is fixed to $1$ by default, and here the rewards will be in $[0, 10]$.
# In[37]:
M_Gamma = MAB([GammaFromMean(shape, scale=1, mini=0, maxi=10) for shape in [1, 2, 3, 4, 5]])
# In[38]:
_ = M_Gamma.plotHistogram(100000)
# As for Gaussian arms, the truncation is strongly changing the means of the arm rewards.
# Here the arm with mean parameter $5$ has an empirical mean close to $10$ due to truncation.
# ## Non-truncated Gaussian and Gamma arms
#
# Let try with non-truncated rewards.
# In[39]:
M_G = MAB([Gaussian(mu, sigma=3, mini=float('-inf'), maxi=float('+inf')) for mu in [-10, 0, 10]])
# In[40]:
_ = M_G.plotHistogram(100000)
# And with non-truncated Gamma arms ?
# In[41]:
M_Gamma = MAB([GammaFromMean(shape, scale=1, mini=float('-inf'), maxi=float('+inf')) for shape in [1, 2, 3, 4, 5]])
_ = M_Gamma.plotHistogram(100000)
# In[42]:
M_Gamma = MAB([GammaFromMean(shape, scale=1, mini=float('-inf'), maxi=float('+inf')) for shape in [10, 20, 30, 40, 50]])
_ = M_Gamma.plotHistogram(1000000)
# ----
# ## Conclusion
#
# This small notebook demonstrated how to define arms and Multi-Armed Bandit problems in my framework, [SMPyBandits](https://github.com/SMPyBandits/SMPyBandits).
| [
"[email protected]"
] | |
634bc45cb7e7a4fac71119db55fdd5b876c9f2c1 | d2a2546165b3db6295a3f21972dda8ab9aab7846 | /src/vehicles/witch_hill_dump.py | 1c97fe495c9bea288903b47daf6afdd2b767a4a8 | [] | no_license | andythenorth/road-hog | bab12b133dd674f0e6d7ae87498675f8da96b982 | 1800d57d4ce904e7041f24646c393b37903d9466 | refs/heads/main | 2022-09-26T19:57:31.006800 | 2022-09-17T10:09:37 | 2022-09-17T10:09:37 | 214,848,659 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | from road_vehicle import DumpHauler, DieselRoadVehicle
consist = DumpHauler(id='witch_hill_dump',
base_numeric_id=500,
name='Witch Hill',
road_type='HAUL',
power=900,
speed=50, # dibbled up above RL for game balance
type_base_running_cost_points=30, # dibble running costs for game balance
vehicle_life=40,
intro_date=2007)
consist.add_unit(type=DieselRoadVehicle,
capacity=85, # much bigger is not much better here
vehicle_length=7,
effects=['EFFECT_SPRITE_AIRCRAFT_BREAKDOWN_SMOKE, -2, 1, 10', 'EFFECT_SPRITE_AIRCRAFT_BREAKDOWN_SMOKE, -2, -1, 10'])
| [
"[email protected]"
] | |
79ad6dc22c43fed47a393c0aff8caff6d7af35e4 | f93ecb6738037629d6a7f81ccdc278a0e6051859 | /backend/users/migrations/0002_auto_20210107_1422.py | 0fcc43097aae6dd692bacb76114aaf83f58efae8 | [] | no_license | crowdbotics-apps/rntest-23713 | 5b30bda09e0023387c8f831655fc2c61178e54e9 | 1e08218f2b705815a63bba73a00590c439543e0d | refs/heads/master | 2023-02-10T01:12:56.843111 | 2021-01-07T15:52:33 | 2021-01-07T15:52:33 | 327,614,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Generated by Django 2.2.17 on 2021-01-07 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
e0576df71c2522cdb2051d37f75b5bdada967a89 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v10/services/services/audience_service/transports/base.py | ff56552e7c1a9f7d86489c08a7f4d73273e0a941 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 5,929 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import audience_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AudienceServiceTransport(abc.ABC):
"""Abstract transport class for AudienceService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_audiences: gapic_v1.method.wrap_method(
self.mutate_audiences,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_audiences(
self,
) -> Callable[
[audience_service.MutateAudiencesRequest],
Union[
audience_service.MutateAudiencesResponse,
Awaitable[audience_service.MutateAudiencesResponse],
],
]:
raise NotImplementedError()
__all__ = ("AudienceServiceTransport",)
| [
"[email protected]"
] | |
e7d8a1682099ac7153d8ad000c1e50c2359043a1 | c87397b08516625c178040e736cf87e61b227fa5 | /inversioncount.py | 73a7f1595a4acf8fc7b6152a498cb1a0cc991c25 | [] | no_license | sainihimanshu1999/HackerRank-Solution | 26cb839aeb46c373643d5ad347a348103c1a147e | ad1e9e450474782b06add3c0c66108e3890d56ec | refs/heads/master | 2022-12-24T07:16:48.576461 | 2020-09-11T15:16:34 | 2020-09-11T15:16:34 | 271,944,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countInversions function below.
def countInversions(arr):
n = len(arr)
temp = [0]*n
return _mergeSort(arr,temp,0,n-1)
def _mergeSort(arr,temp,left,right):
count = 0
if left<right:
mid = (left+right)//2
count += _mergeSort(arr,temp,left,mid)
count += _mergeSort(arr,temp,mid+1,right)
count += merge(arr, temp, left, mid, right)
return count
def merge(arr,temp,left,mid,right):
i =left
j = mid+1
k = left
count = 0
while i<=mid and j <=right:
if arr[i]<=arr[j]:
temp[k] = arr[i]
k+=1
i+=1
else:
temp[k] = arr[j]
count += (mid-i+1)
k+= 1
j += 1
while i<=mid:
temp[k]=arr[i]
k += 1
i += 1
while j<= right:
temp[k] = arr[j]
k+= 1
j+= 1
for x in range(left,right+1):
arr[x] = temp[x]
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = countInversions(arr)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
69cc6cdd46138f0cab03ad3c1137e1b4b13e2da9 | b3d552675b36cb88a1388fcfc531e497ad7cbee9 | /day2/filter_demo/filter_demo/views.py | 096b4b76668d64379b9b12eb6c60b5e53333f08f | [] | no_license | gaohj/1902_django | 3cea1f0935fd983f25c6fd832b103ac5165a2e30 | 822af7b42120c6edc699bf97c800887ff84f5621 | refs/heads/master | 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 | Python | UTF-8 | Python | false | false | 733 | py | from django.shortcuts import render
from datetime import datetime
def greet(word):
return "hello world %s" % word
def index(request):
context = {
'greet':greet
}
return render(request,'index.html',context=context)
def add_view(request):
context = {
'value1': ['1','2','3','4'],
'value2':[5,'6',7]
}
return render(request, 'add.html', context=context)
def cut_view(request):
return render(request, 'cut.html')
def date_view(request):
context = {
'today':datetime.now()
}
return render(request, 'date.html',context=context)
def default_view(request):
context = {
'value':'haha'
}
return render(request, 'default.html',context=context) | [
"[email protected]"
] | |
fac49fce5a9dca3eb4fba19fc3f0b99240d3b0d7 | 4bf344f5069a0048b7ee4fb49dc9a1126256f2ee | /fotalora_project/settings.py | 48ec2c322f17cbee166e3cbc7f38246c16bcca96 | [] | no_license | rikicop/fotalora | 1377881f866990ad96a90b3d3add04583c6a9175 | 4313bfce6423bcd6cdd79850e5c3975ae42b9de3 | refs/heads/main | 2023-04-12T02:22:05.124849 | 2021-05-15T23:07:32 | 2021-05-15T23:07:32 | 345,458,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'j1v0z5+s9%9_iaczr^8#!y%!xcmta93p3y_afjyor7w=^pf^%9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fotalora_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fotalora_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_URL = '/media/' #NEW
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') #NEW
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
| [
"[email protected]"
] | |
9f430d239d4708d95d76a6d4db2165837fcbc7e6 | 94bfb1346a9ce4cf6ca8bfeeb5194b7a467731a6 | /aclark/db/migrations/0017_profile_twitter_username.py | b51796a7afd925ff83b1d5063aa2f2333263afca | [
"MIT"
] | permissive | aclark4life/aclarknet-best-pro | 4006cad37c2eec166a98a73e988b9b490a10e5cb | e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea | refs/heads/master | 2023-03-01T09:10:04.041913 | 2020-12-01T18:40:07 | 2020-12-01T18:40:07 | 140,634,961 | 0 | 0 | MIT | 2021-02-10T01:57:38 | 2018-07-11T22:49:33 | CSS | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.1.9 on 2019-06-14 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("db", "0016_auto_20190614_1208")]
operations = [
migrations.AddField(
model_name="profile",
name="twitter_username",
field=models.CharField(blank=True, max_length=150, null=True),
)
]
| [
"[email protected]"
] | |
7301b9b8125559dd52eb5fc208f1086b7c2c123a | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/contrib/ffmpeg/__init__.py | 484ffee3e7afe55c63cab2a463454353b2663e18 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 1,329 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Working with audio using FFmpeg.
See the @{$python/contrib.ffmpeg} guide.
@@decode_audio
@@encode_audio
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ffmpeg_ops import decode_audio
from tensorflow.contrib.ffmpeg.ffmpeg_ops import decode_video
from tensorflow.contrib.ffmpeg.ffmpeg_ops import encode_audio
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['decode_audio', 'encode_audio', 'decode_video']
remove_undocumented(__name__, _allowed_symbols)
| [
"[email protected]"
] | |
9da5dce08297733c59ac76e87bfeff418f8cd12d | 3a9379132ef3ebb5ab9ae67a3baea146006381e6 | /Pc_06_Beautiful Soup/pc_02_基本用法.py | 328e49540567d5e34da008278433b7fa3527d567 | [] | no_license | ahaoao/PySpider | 9c8280affcee27985105a09ea354ac77773d77a6 | 9c32bd56a8b198050f3b467fe233a3699de73ecf | refs/heads/master | 2020-08-09T21:29:37.666947 | 2019-10-10T12:09:03 | 2019-10-10T12:09:03 | 214,172,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from bs4 import BeautifulSoup
import requests
url = 'http://www.baidu.com/'
html = requests.get(url)
soup = BeautifulSoup(html.text, 'lxml')
# 调用prettify()方法,这个方法可以 把解析的字符串以标准的缩进格式输出
# 对于不标准的HTML字符串BeautifulSoup可以自动更正格式。这一步不是由prettify做的,而是由BeautifulSoup初始化时完成的
print(soup.prettify())
print(soup.script.string)
# soup.script.string 输出HTML中的script节点的文本内容 | [
"[email protected]"
] | |
dfe78435554de2c451f2c5930ffba74f7560af9e | e8fe313e5598fd123f8055b811b1a09f6224bd00 | /service/actions.py | 2d24e26c5fac500e6b0656499ab358da363f26db | [
"MIT"
] | permissive | getcircle/python-soa | 2a6a7cfd6a3ccaa92b6f57b82debe48434a9fa1c | 590b53691ff959713e331c25353d8c9280e10100 | refs/heads/master | 2021-04-30T23:35:04.385252 | 2016-12-05T04:50:12 | 2016-12-05T04:50:12 | 27,792,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,994 | py | import traceback
import service.control
from . import settings
from .paginator import Paginator
class Action(object):
class ActionError(Exception):
def __init__(self, error, details=None, *args, **kwargs):
self.error = error
self.details = details
super(Action.ActionError, self).__init__(*args, **kwargs)
class ActionFieldError(Exception):
def __init__(self, field_name, error_message, *args, **kwargs):
self.field_name = field_name
self.error_message = error_message
super(Action.ActionFieldError, self).__init__(*args, **kwargs)
class PermissionDenied(ActionError):
def __init__(self, *args, **kwargs):
error = 'PERMISSION_DENIED'
details = ('PERMISSION_DENIED', 'permission denied')
super(Action.PermissionDenied, self).__init__(error, details, *args, **kwargs)
type_validators = None
field_validators = None
exception_to_error_map = None
required_fields = None
def __init__(self, service_control, action_request, action_response):
self.service_control = service_control
self.token = service_control.token
self.service_name = service_control.service
self._action_request = action_request
self._action_response = action_response
self._errors = action_response.result.errors
self._error_details = action_response.result.error_details
self.control = self._action_response.control
self.request = service.control.get_request_extension(action_request)
self.response = service.control.get_response_extension(action_response)
if self.field_validators is None:
self.field_validators = {}
if self.type_validators is None:
self.type_validators = {}
if self.exception_to_error_map is None:
self.exception_to_error_map = {}
if self.required_fields is None:
self.required_fields = tuple()
def note_error(self, error, details=None):
if details and len(details) != 2:
raise ValueError(
'`details` must be a list or tuple of (key, value)'
' with a max of 2'
)
if error not in self._errors:
self._errors.append(error)
if details:
error_detail = self._error_details.add()
error_detail.error = error
error_detail.key = details[0]
error_detail.detail = details[1]
def note_field_error(self, field_name, error_message):
self.note_error('FIELD_ERROR', (field_name, error_message))
def is_error(self):
return bool(self._errors)
def check_type_validators(self, field_name, value):
valid = True
validators = self.type_validators.get(field_name, [])
for validator in validators:
if not validator(value):
self.note_field_error(field_name, 'INVALID')
valid = False
return valid
def check_field_validators(self, field_name, value):
validators = self.field_validators.get(field_name, {})
for validator, error_message in validators.iteritems():
if not validator(value):
self.note_field_error(field_name, error_message)
def add_prefix(self, prefix, name):
return prefix + '.' + name if prefix else name
def validate_control(self):
if not self.control.paginator.page_size:
self.control.paginator.page_size = settings.DEFAULT_PAGE_SIZE
if self.control.paginator.page_size > settings.MAX_PAGE_SIZE:
self.note_field_error('paginator.page_size', 'OVER_MAXIMUM')
def _validate_required_fields(self, message, required_fields=None, prefix=''):
field_dict = dict((field.name, value) for field, value in message.ListFields())
if required_fields is None:
required_fields = self.required_fields
for field_name in required_fields:
full_name = field_name
if prefix:
full_name = '%s.%s' % (prefix, field_name)
if '.' in field_name:
container_name, path = field_name.split('.', 1)
container = field_dict.get(container_name)
if not container:
self.note_field_error(full_name, 'MISSING')
continue
else:
self._validate_required_fields(
container,
required_fields=[path],
prefix=container_name,
)
continue
descriptor = message.DESCRIPTOR.fields_by_name[field_name]
if (
descriptor.type != descriptor.TYPE_ENUM and
descriptor.type != descriptor.TYPE_MESSAGE and
descriptor.label != descriptor.LABEL_REPEATED and
getattr(message, field_name) == descriptor.default_value
):
self.note_field_error(full_name, 'MISSING')
elif descriptor.label == descriptor.LABEL_REPEATED:
try:
if not len(getattr(message, field_name, [])):
self.note_field_error(full_name, 'MISSING')
except TypeError:
self.note_field_error(full_name, 'MISSING')
elif (
descriptor.type == descriptor.TYPE_MESSAGE and
not getattr(message, field_name).ByteSize()
):
self.note_field_error(full_name, 'MISSING')
def validate_message(self, message, prefix=''):
if not prefix:
self._validate_required_fields(message)
# don't run type or field validators if we're missing required fields
if self.is_error():
return
for field, value in message.ListFields():
field_name = self.add_prefix(prefix, field.name)
valid = self.check_type_validators(field_name, value)
# only run field validators if type_validators passed
if valid:
self.check_field_validators(field_name, value)
if hasattr(value, 'ListFields'):
self.validate_message(value, prefix=field_name)
def validate(self, *args, **kwargs):
self.validate_control()
self.validate_message(self.request)
def pre_run(self):
# TODO add test case to ensure called
pass
def post_run(self):
# TODO add test case to ensure called
pass
def execute(self, *args, **kwargs):
try:
self.validate()
if not self.is_error():
self.pre_run()
self.run(*args, **kwargs)
self.post_run()
except self.ActionFieldError as e:
self.note_field_error(e.field_name, e.error_message)
except self.ActionError as e:
self.note_error(e.error, e.details)
except Exception as e:
mapped_error = self.exception_to_error_map.get(e.__class__)
if mapped_error:
self.note_error(mapped_error, (mapped_error, str(e)))
else:
self.note_error('SERVER_ERROR', ('SERVER_ERROR', traceback.format_exc()))
self._action_response.result.success = not self.is_error()
def get_paginator(self, objects, count=None):
return Paginator(
objects,
self.control.paginator.page_size,
count=count,
disabled=self.control.paginator.disabled,
)
def get_page(self, paginator):
return paginator.page(self.control.paginator.page)
def get_pagination_offset_and_limit(self, total_count):
paginator = Paginator([], self.control.paginator.page_size)
paginator._count = total_count
bottom, _ = paginator.get_page_bottom_top(self.control.paginator.page)
return bottom, self.control.paginator.page_size
def get_paginated_objects(self, objects, paginator=None, page=None, count=None):
if paginator is None:
paginator = self.get_paginator(objects, count)
if page is None:
page = self.get_page(paginator)
service.control.update_paginator_protobuf(self.control.paginator, paginator, page)
return page.object_list
def paginated_response(
self,
repeated_container,
objects,
transport_func,
paginator=None,
page=None,
count=None,
):
paginated_objects = self.get_paginated_objects(
objects,
paginator=paginator,
page=page,
count=count,
)
for item in paginated_objects:
transport_func(item, repeated_container)
def run(self, *args, **kwargs):
raise NotImplementedError('Action must define `run` method')
| [
"[email protected]"
] | |
5f0d9d471cca84f57ebe03ec9ef0319c27f74a5a | 5f64c8189e195f6b1f530e8c6acd0e843c72c02a | /feets/libs/ls_fap.py | 9493f4c88361313d2aef2185e49c194c74a04c2e | [
"MIT"
] | permissive | LVFerrero/feets | 388967873fabb592c89f04dcd758a402d99dd8b9 | 9938db85c80721eeae621c0a41630e757c23da68 | refs/heads/master | 2022-10-03T22:24:20.955364 | 2020-06-03T02:47:01 | 2020-06-03T02:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,374 | py | """
Utilities for computing periodogram statistics.
"""
import numpy as np
from scipy.special import gammaln
from astropy.stats.lombscargle import LombScargle
def _weighted_sum(val, dy):
return (val / dy ** 2).sum()
def _weighted_mean(val, dy):
return _weighted_sum(val, dy) / _weighted_sum(np.ones_like(val), dy)
def _weighted_var(val, dy):
return _weighted_mean(val ** 2, dy) - _weighted_mean(val, dy) ** 2
def _gamma(N):
# Note: this is closely approximated by (1 - 0.75 / N) for large N
return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2))
def _log_gamma(N):
return 0.5 * np.log(2 / N) + gammaln(N / 2) - gammaln((N - 1) / 2)
def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
pdf : np.ndarray
The expected probability density function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == "psd":
return np.exp(-z)
elif normalization == "standard":
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == "model":
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == "log":
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError(
"normalization='{0}' is not recognized" "".format(normalization)
)
def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
fap : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == "psd":
return np.exp(-z)
elif normalization == "standard":
return (1 - z) ** (0.5 * Nk)
elif normalization == "model":
return (1 + z) ** (-0.5 * Nk)
elif normalization == "log":
return np.exp(-0.5 * Nk * z)
else:
raise ValueError(
"normalization='{0}' is not recognized" "".format(normalization)
)
def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
def tau_davies(Z, fmax, t, y, dy, normalization="standard", dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt)
W = fmax * Teff
if normalization == "psd":
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == "standard":
# 'standard' normalization is Z = 2/NH * z_1
return (
_gamma(NH)
* W
* (1 - Z) ** (0.5 * (NK - 1))
* np.sqrt(0.5 * NH * Z)
)
elif normalization == "model":
# 'model' normalization is Z = 2/NK * z_2
return _gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z)
elif normalization == "log":
# 'log' normalization is Z = 2/NK * z_3
return (
_gamma(NK)
* W
* np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z))
)
else:
raise NotImplementedError("normalization={0}".format(normalization))
def fap_simple(Z, fmax, t, y, dy, normalization="standard"):
"""False Alarm Probability based on estimated number of indep frequencies
"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
p_s = cdf_single(Z, N, normalization=normalization)
return 1 - p_s ** N_eff
def fap_davies(Z, fmax, t, y, dy, normalization="standard"):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau
def fap_baluev(Z, fmax, t, y, dy, normalization="standard"):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
cdf = cdf_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return 1 - cdf * np.exp(-tau)
def fap_bootstrap(
Z,
fmax,
t,
y,
dy,
normalization="standard",
n_bootstraps=1000,
random_seed=None,
):
rng = np.random.RandomState(random_seed)
def bootstrapped_power():
resample = rng.randint(0, len(y), len(y)) # sample with replacement
ls_boot = LombScargle(t, y[resample], dy[resample])
freq, power = ls_boot.autopower(
normalization=normalization, maximum_frequency=fmax
)
return power.max()
pmax = np.array([bootstrapped_power() for i in range(n_bootstraps)])
pmax.sort()
return 1 - np.searchsorted(pmax, Z) / len(pmax)
METHODS = {
"simple": fap_simple,
"davies": fap_davies,
"baluev": fap_baluev,
"bootstrap": fap_bootstrap,
}
def false_alarm_probability(
Z, fmax, t, y, dy, normalization, method="baluev", method_kwds=None
):
"""Approximate the False Alarm Probability
Parameters
----------
TODO
Returns
-------
TODO
"""
if method not in METHODS:
raise ValueError("Unrecognized method: {0}".format(method))
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds)
| [
"[email protected]"
] | |
565efacfb179f927cd8af26c61ae0c3ba3ef8487 | 81d0bfe1262008587ddf5ac12ae034d6922b9747 | /.history/Smart/__init___20201119002524.py | cbb9b10af0ff19c067a3b2a07463887dced7e82c | [] | no_license | elvinyeka/Smart-Mobile | 525fffac14b8c460e85002bbf154bf54b4a341fe | a32f557306ae1bfe3ae01f5a8beef93727cfbc47 | refs/heads/master | 2023-06-09T09:52:18.446572 | 2021-07-06T11:35:34 | 2021-07-06T11:35:34 | 313,988,596 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
app.config['SQLALCHEMY_FATABASE_URI'] = 'sqlite:///smart.db'
db = SQLAlchemy(app)
from Smart.admin import routes | [
"[email protected]"
] | |
db6fcccd53af36e20f21c598728bfabd1fd1670b | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/datapoints/count_f.py | bb3f91dd8f3a573a6949efe5dc7b8e474bfb55b5 | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | """count_f standard datapoint type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0. """
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.base
from pylon.resources.standard import standard
class count_f(pylon.resources.base.Float):
"""count_f standard datapoint type. Absolute count. (units)."""
def __init__(self):
super().__init__(
single=True,
minimum=0,
maximum=3.40282E+038,
scope=0,
key=51
)
self._original_name = 'SNVT_count_f'
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = count_f()
pass
| [
"[email protected]"
] | |
144be98081d6a3fcf808759973235a49fa3db8bf | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/r2h.py | fac514c85439c28023704dae8965d97ca969d91f | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'r2H':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
5d3c950baf2810efddb7193b9a250d54b794cb01 | 6a6d8c0c8ddd6f5a1c03788f35320dd4b82314ea | /yamtbx/dataproc/cbf.py | f0cfd76d63abd9e9a3ee424e9184619b4ec59abc | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"MIT"
] | permissive | nsls-ii-mx/yamtbx | b817a131a8f6f515db99bc1743f81218997ac4ed | 311cf5a20e27a035a9e89c2abcb3c7d5e3684d67 | refs/heads/master | 2021-01-11T12:05:38.166937 | 2017-01-24T16:26:44 | 2017-01-24T16:26:44 | 76,574,177 | 1 | 0 | null | 2016-12-15T16:00:06 | 2016-12-15T16:00:06 | null | UTF-8 | Python | false | false | 3,840 | py | """
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import os
import pycbf
import numpy
from cbflib_adaptbx import cbf_binary_adaptor, CBFWriteAdaptor
def load_cbf_as_numpy(filein, quiet=True):
assert os.path.isfile(filein)
if not quiet:
print "reading", filein, "as cbf"
h = pycbf.cbf_handle_struct()
h.read_file(filein, pycbf.MSG_DIGEST)
ndimfast, ndimslow = h.get_image_size_fs(0)
arr = numpy.fromstring(h.get_image_fs_as_string(0, 4, 1, ndimfast, ndimslow), dtype=numpy.int32)
return arr, ndimfast, ndimslow
# load_cbf_as_numpy()
def load_minicbf_as_numpy(filein, quiet=True): # This can also read XDS special cbf
assert os.path.isfile(filein)
if not quiet:
print "reading", filein, "as minicbf"
h = pycbf.cbf_handle_struct()
h.read_file(filein, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("data")
compression, binary_id, elsize, elsigned, elunsigned, elements, minelement, maxelement, bo, ndimfast, ndimmid, ndimslow, padding = h.get_integerarrayparameters_wdims()
assert elsize == 4 or elsize == 8
assert elsigned == 1
assert ndimslow <= 1
arr = numpy.fromstring(h.get_integerarray_as_string(), dtype=numpy.int32 if elsize==4 else numpy.int64)
return arr, ndimfast, ndimmid
# load_minicbf_as_numpy()
def load_cbf_as_flex(filein): # This can also read XDS special cbf
M = cbf_binary_adaptor(filein)
data = M.uncompress_implementation("buffer_based").uncompress_data()
nslow, nfast = M.dim_slow(), M.dim_fast() # can be obtained after getting data
return data, nfast, nslow
# load_cbf_as_flex()
def load_xds_special(cbfin):
h = pycbf.cbf_handle_struct()
h.read_file(cbfin, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("header_contents")
header = h.get_value()
M = cbf_binary_adaptor(cbfin)
data = M.uncompress_implementation("buffer_based").uncompress_data()
#print "slow, fast=", M.dim_slow(), M.dim_fast() # can be obtained after getting data
return header, data, M.dim_slow(), M.dim_fast()
# load_xds_special()
def save_numpy_data_as_cbf(data, size1, size2, title, cbfout, pilatus_header=None):
h = pycbf.cbf_handle_struct()
h.new_datablock(title)
h.require_category('array_data')
if pilatus_header is not None:
h.require_column('header_convention')
h.set_value('"PILATUS_1.2"')
h.require_column('header_contents')
h.set_value(pilatus_header)
h.require_category('array_data')
h.require_column('data')
elsigned = 1
if data.dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64):
elsigned = 0
h.set_integerarray_wdims_fs(pycbf.CBF_BYTE_OFFSET, 1, data.tostring(), data.dtype.itemsize,
elsigned, len(data), "little_endian",
size1, size2, 1, 0)
h.write_file(cbfout, pycbf.CBF,
pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, pycbf.ENC_NONE)
# save_numpy_data_as_cbf()
def save_flex_int_as_cbf(data, cbfout):
writer = CBFWriteAdaptor(cbfout)
writer.write_data(data)
# save_flex_int_as_cbf()
def get_pilatus_header(cbfin):
h = pycbf.cbf_handle_struct()
if cbfin.endswith(".bz2"):
# TODO to speed up, better only bunzip2 the first part of file..
import tempfile
import bz2
junk, tmpf = tempfile.mkstemp()
open(tmpf, "wb").write(bz2.BZ2File(cbfin).read())
h.read_file(tmpf, pycbf.MSG_DIGEST)
os.remove(tmpf)
else:
h.read_file(cbfin, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("header_contents")
header = h.get_value()
return header
# get_pilatus_header()
| [
"[email protected]"
] | |
d1551b299452133a0b0cbed2ff1bbd265b8010d5 | 999a0db734e9fc858a3708831fa9a55d7672ebf3 | /Code/EnhanceCWE-master/muo/admin.py | b2c20e11f2828ccd88165fb9605a84e0f8f9ac25 | [] | no_license | coredamage/MORE | 45acc0499ec9af5ddca1783459966e58fdd83fb2 | 72d3464cf4e737fd1c0cb67f73f1bcf0ffbc98f8 | refs/heads/master | 2020-05-23T18:32:37.769720 | 2016-06-14T13:35:52 | 2016-06-14T13:35:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,133 | py | # @OPENSOURCE_HEADER_START@
# MORE Tool
# Copyright 2016 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER.
# CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT
# PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES,
# INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY
# RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full
# terms. DM-0003473
# @OPENSOURCE_HEADER_END@
from django.db.models import Q
from django.contrib import admin
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from django.template.response import TemplateResponse
from django.conf.urls import url
import autocomplete_light
from models import *
from django.utils.safestring import mark_safe
from django.http import HttpResponseRedirect
from base.admin import BaseAdmin
# Tags are not used anywhere for now
# @admin.register(Tag)
# class TagAdmin(BaseAdmin):
# fields = ['name']
# search_fields = ['name']
@admin.register(UseCase)
class UseCaseAdmin(BaseAdmin):
fields = ['name', 'misuse_case', 'use_case_description', 'osr', 'tags']
readonly_fields = ['name']
list_display = ['name']
search_fields = ['name', 'use_case_description', 'tags__name']
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
class UseCaseAdminInLine(admin.StackedInline):
model = UseCase
extra = 0
fields = ['name', 'use_case_description', 'use_case_primary_actor',
'use_case_secondary_actor', 'use_case_precondition', 'use_case_flow_of_events',
'use_case_postcondition', 'use_case_assumption', 'use_case_source',
'osr_pattern_type', 'osr']
readonly_fields = ['name']
def has_delete_permission(self, request, obj=None):
"""
Overriding the method such that the delete option on the UseCaseAdminInline form on change form
is not available for the users except the original author or users with 'can_edit_all' permission.
The delete option is only available to the original author or users with 'can_edit_all' permission
if the related MUOContainer is in draft or rejected state
"""
if obj is None:
# This is add form, let super handle this
return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)
else:
# This is change form. Only original author or users with 'can_edit_all' permission are allowed
# to delete the UseCase from the related MUOContainer if it is in 'draft' or 'rejected' state
if (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):
return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)
else:
# Set deletion permission to False
return False
def get_readonly_fields(self, request, obj=None):
"""
Overriding the method such that all the fields on the UseCaseAdminInline form on change form
are read-only for all the users except the original author or users with 'can_edit_all' permission.
Only the original author or users with 'can_edit_all' permission can edit the fields that too
when the related MUOContainer is in the 'draft' state
"""
if obj is None:
# This is add form, let super handle this
return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)
else:
# This is the change form. Only the original author or users with 'can_edit_all' permission
# are allowed to edit the UseCase if the related MUOContainer is in the 'draft' state
if (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':
return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)
else:
# Set all the fields as read-only
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
def get_max_num(self, request, obj=None, **kwargs):
"""
Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form
on change form is not available for the users except the original author or users with 'can_edit_all'
permission. The 'Add another UseCase' option is only available to the original author or users
with 'can_edit_all' permission if the related MUOContainer is in draft state
"""
if obj is None:
# This is add form, let super handle this
return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)
else:
# This is change form. Only original author is allowed to add another Use Case in the
# MUOContainer if it is in 'draft' state
if (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':
return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)
else:
# No 'Add another Use Case' button
return 0
# This class is to implement an ENUM for PUBLISH and UNPUBLISH. Used in response_change method.
class PublishUnpublishValues:
UNPUBLISH, PUBLISH = range(2)
@admin.register(MUOContainer)
class MUOContainerAdmin(BaseAdmin):
form = autocomplete_light.modelform_factory(MUOContainer, fields="__all__")
fields = ['name', 'cwes', 'misuse_case_type', 'misuse_case', 'misuse_case_description',
'misuse_case_primary_actor', 'misuse_case_secondary_actor', 'misuse_case_precondition',
'misuse_case_flow_of_events', 'misuse_case_postcondition', 'misuse_case_assumption',
'misuse_case_source', 'status']
list_display = ['name', 'status']
readonly_fields = ['name', 'status']
search_fields = ['name', 'status']
date_hierarchy = 'created_at'
list_filter = ['status', 'is_published', ('created_by', admin.RelatedOnlyFieldListFilter)]
inlines = [UseCaseAdminInLine]
def get_actions(self, request):
"""
Overriding the method in order to disable the delete selected (and bulk delete) option the
changelist form
"""
actions = super(MUOContainerAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def get_queryset(self, request):
"""
If user doesn't have access to view all MUO Containers (review), then limit to his own MUOs
or to approved MUOs written by other contributors
"""
qs = super(MUOContainerAdmin, self).get_queryset(request)
if request.user.has_perm('muo.can_view_all') or request.user.has_perm('muo.can_edit_all'):
return qs
return qs.filter(Q(created_by=request.user) | Q(status='approved'))
def get_readonly_fields(self, request, obj=None):
"""
Overriding the method such that the change form is read-only for all the users. Only the original
author of the MUOContainer or the users with 'can_edit_all' permission can edit it that too only
when MUOContainer is in 'draft' state
"""
if obj is None:
# This is add form, let super handle this
return super(MUOContainerAdmin, self).get_readonly_fields(request, obj)
else:
# This is change form. Only original author or users with 'can_edit_all' permission are allowed
# to edit the MUOContainer in draft state
if (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':
return super(MUOContainerAdmin, self).get_readonly_fields(request, obj)
else:
# Set all the fields as read-only
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
def has_delete_permission(self, request, obj=None):
"""
Overriding the method such that the delete option on the change form is not available
for the users except the original author or users with permission 'can_edit_all'.
The delete option is only available to the original author or users with permission 'can_edit_all'
if the related MUOContainer is in draft or rejected state
"""
if obj is None:
# This is add form, let super handle this
return super(MUOContainerAdmin, self).has_delete_permission(request, obj=None)
else:
# This is change form. Only original author or users with 'can_edit_all' are allowed
# to delete the MUOContainer and that too if it is in 'draft' state
if (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):
return super(MUOContainerAdmin, self).has_delete_permission(request, obj=None)
else:
# Set deletion permission to False
return False
def response_change(self, request, obj, *args, **kwargs):
'''
Override response_change method of admin/options.py to handle the click of
newly added buttons
'''
# Get the metadata about self (it tells you app and current model)
opts = self.model._meta
# Get the primary key of the model object i.e. MUO Container
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,))
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
# Check which button is clicked, handle accordingly.
try:
if "_approve" in request.POST:
obj.action_approve(request.user)
msg = "You have approved the submission"
elif "_reject" in request.POST:
reject_reason = request.POST.get('reject_reason_text', '')
obj.action_reject(reject_reason, request.user)
msg = "The submission has been sent back to the author for review"
elif "_submit_for_review" in request.POST:
obj.action_submit()
msg = "Your review request has been successfully submitted"
elif "_edit" in request.POST:
obj.action_save_in_draft()
msg = "You can now edit the MUO"
elif "_promote" in request.POST:
obj.action_promote(request.user)
msg = "This MUO has been promoted and now everyone will have access to it."
elif "_unpublish" in request.POST:
obj.action_set_publish(PublishUnpublishValues.UNPUBLISH)
msg = "This MUO has been unpublished."
elif "_publish" in request.POST:
obj.action_set_publish(PublishUnpublishValues.PUBLISH)
msg = "This MUO has been published."
else:
# Let super class 'ModelAdmin' handle rest of the button clicks i.e. 'save' 'save and continue' etc.
return super(MUOContainerAdmin, self).response_change(request, obj, *args, **kwargs)
except ValueError as e:
# In case the state of the object is not suitable for the corresponding action,
# model will raise the value exception with the appropriate message. Catch the
# exception and show the error message to the user
msg = e.message
self.message_user(request, msg, messages.ERROR)
return HttpResponseRedirect(redirect_url)
except ValidationError as e:
# If incomplete MUO Container is attempted to be approved or submitted for review, a validation error will
# be raised with an appropriate message
msg = e.message
self.message_user(request, msg, messages.ERROR)
return HttpResponseRedirect(redirect_url)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(redirect_url)
@admin.register(IssueReport)
class IssueReportAdmin(BaseAdmin):
form = autocomplete_light.modelform_factory(IssueReport, fields="__all__")
fields = [('name', 'status'), 'type', 'usecase', 'usecase_duplicate', 'description',
('created_by', 'created_at'), ('reviewed_by', 'reviewed_at'), 'resolve_reason']
readonly_fields = ['name', 'status', 'created_by', 'created_at', 'reviewed_by', 'reviewed_at', 'resolve_reason']
list_display = ['name', 'type', 'created_by', 'created_at', 'status',]
search_fields = ['name', 'usecase__id', 'usecase__name', 'created_by__name']
list_filter = ['type', 'status']
date_hierarchy = 'created_at'
def get_fields(self, request, obj=None):
""" Override to hide the 'usecase_duplicate' if type is not 'duplicate' """
fields = super(IssueReportAdmin, self).get_fields(request, obj)
if obj and obj.type != 'duplicate' and 'usecase_duplicate' in fields:
fields.remove('usecase_duplicate')
return fields
def response_change(self, request, obj, *args, **kwargs):
'''
Override response_change method of admin/options.py to handle the click of
newly added buttons
'''
# Get the metadata about self (it tells you app and current model)
opts = self.model._meta
# Get the primary key of the model object i.e. Issue Report
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,))
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
# Check which button is clicked, handle accordingly.
try:
if "_investigate" in request.POST:
obj.action_investigate(request.user)
msg = "The issue is now being investigated."
elif "_resolve" in request.POST:
resolve_reason = request.POST.get('resolve_reason_text', '')
obj.action_resolve(resolve_reason,request.user)
msg = "The issue is now resolved because " + resolve_reason
elif "_reopen" in request.POST:
obj.action_reopen(request.user)
msg = "The issue has been re-opened."
elif "_open" in request.POST:
obj.action_open(request.user)
msg = "The issue is now opened."
except ValueError as e:
# In case the state of the object is not suitable for the corresponding action,
# model will raise the value exception with the appropriate message. Catch the
# exception and show the error message to the user
msg = e.message
self.message_user(request, msg, messages.ERROR)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(redirect_url)
| [
"[email protected]"
] | |
0ade154f6e8c21659fa6a191193b26eca83f5fed | d439cfe7ae0b01026ba1a821fa2ab853ccee9600 | /bi_eval/negativePointer1.py | 5164723a9738a64080ea3830beee70725268479a | [] | no_license | luofang0212/synyi_test | 6e16a7d52aab8aba39605e09df1a4115bd7af39e | 386d1c7a72bd7eae8d16c64492cd0ca3bc6cd775 | refs/heads/master | 2023-08-29T17:30:23.307048 | 2021-11-03T08:10:01 | 2021-11-03T08:10:01 | 411,960,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from bi_eval.score_color import to_color
'''
负向指标-情况1
当(参考值/实际值)>=150% ,
则指标得分=分值权重满分*150%
否则指标得分= (参考值/实际值)*分值权重满分
'''
# 实际值
actual_value = 200.76
# 参考值
reference_value = 32
# 分值
score = 100
# 分值权重
score_weight = 0.4
# 分值权重满分:指标分值
full_score = score * score_weight
print("实际值:{0} 参考值:{1} 分值权重满分(指标分值):{2}".format(actual_value, reference_value, full_score))
# 浮动范围
domain_of_walker = 0
# 指标得分
index_score = 0
result = reference_value / actual_value
print("比率:{0}".format(result))
if (result >= 1.5):
# 指标得分=分值权重满分*150%
index_score = full_score * 1.5
print("1指标得分= {0}".format(index_score))
to_color(index_score,score,full_score)
else:
# 则指标得分= (参考值/实际值)*分值权重满分
index_score = (reference_value / actual_value) * full_score
print("2指标得分= {0}".format(index_score))
to_color(index_score,score,full_score)
| [
"[email protected]"
] | |
2a5399b7ae8b60b9ef91e4857e333b52637e45aa | d305e9667f18127e4a1d4d65e5370cf60df30102 | /mindspore/nn/layer/activation.py | 2ff36e3771ea3bb61485c355d5c49b02dd9ac886 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 15,984 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""activation"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import _selected_ops
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore._extends import cell_attr_register
from mindspore._checkparam import Validator as validator
from ..cell import Cell
__all__ = ['Softmax',
'LogSoftmax',
'ReLU',
'ReLU6',
'Tanh',
'GELU',
'Sigmoid',
'PReLU',
'get_activation',
'LeakyReLU',
'HSigmoid',
'HSwish',
'ELU',
'LogSigmoid',
]
class Softmax(Cell):
r"""
Softmax activation function.
Applies the Softmax function to an n-dimensional input Tensor.
The input is a Tensor of logits transformed with exponential function and then
normalized to lie in range [0, 1] and sum up to 1.
Softmax is defined as:
.. math::
\text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_{j=0}^{n-1}\exp(x_j)},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Args:
axis (Union[int, tuple[int]]): The axis to apply Softmax operation, -1 means the last dimension. Default: -1.
Inputs:
- **x** (Tensor) - The input of Softmax.
Outputs:
Tensor, which has the same type and shape as `x` with values in the range[0,1].
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> softmax = nn.Softmax()
>>> softmax(input_x)
[0.03168 0.01166 0.0861 0.636 0.2341]
"""
def __init__(self, axis=-1):
super(Softmax, self).__init__()
self.softmax = _selected_ops.Softmax(axis)
def construct(self, x):
return self.softmax(x)
class LogSoftmax(Cell):
r"""
LogSoftmax activation function.
Applies the LogSoftmax function to n-dimensional input tensor.
The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
Logsoftmax is defined as:
:math:`\text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right)`,
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Args:
axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: -1.
Inputs:
- **x** (Tensor) - The input of LogSoftmax.
Outputs:
Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0).
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> log_softmax = nn.LogSoftmax()
>>> log_softmax(input_x)
[[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
[-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
"""
def __init__(self, axis=-1):
super(LogSoftmax, self).__init__()
self.log_softmax = _selected_ops.LogSoftmax(axis)
def construct(self, x):
return self.log_softmax(x)
class ELU(Cell):
r"""
Exponential Linear Uint activation function.
Applies the exponential linear unit function element-wise.
The activation function is defined as:
.. math::
E_{i} =
\begin{cases}
x, &\text{if } x \geq 0; \cr
\text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
\end{cases}
Args:
alpha (float): The coefficient of negative factor whose type is float. Default: 1.0.
Inputs:
- **input_data** (Tensor) - The input of ELU.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
>>> elu = nn.ELU()
>>> elu(input_x)
"""
def __init__(self, alpha=1.0):
super(ELU, self).__init__()
self.elu = P.Elu(alpha)
def construct(self, x):
return self.elu(x)
class ReLU(Cell):
r"""
Rectified Linear Unit activation function.
Applies the rectified linear unit function element-wise. It returns
element-wise :math:`\max(0, x)`, specially, the neurons with the negative output
will be suppressed and the active neurons will stay the same.
Inputs:
- **input_data** (Tensor) - The input of ReLU.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
>>> relu = nn.ReLU()
>>> relu(input_x)
[0. 2. 0. 2. 0.]
"""
def __init__(self):
super(ReLU, self).__init__()
self.relu = P.ReLU()
def construct(self, x):
return self.relu(x)
class ReLU6(Cell):
r"""
Compute ReLU6 activation function.
ReLU6 is similar to ReLU with a upper limit of 6, which if the inputs are greater than 6, the outputs
will be suppressed to 6.
It computes element-wise as :math:`\min(\max(0, x), 6)`. The input is a Tensor of any valid shape.
Inputs:
- **input_data** (Tensor) - The input of ReLU6.
Outputs:
Tensor, which has the same type as `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> relu6 = nn.ReLU6()
>>> relu6(input_x)
[0. 0. 0. 2. 1.]
"""
def __init__(self):
super(ReLU6, self).__init__()
self.relu6 = P.ReLU6()
def construct(self, x):
return self.relu6(x)
class LeakyReLU(Cell):
r"""
Leaky ReLU activation function.
LeakyReLU is similar to ReLU, but LeakyReLU has a slope that makes it not equal to 0 at x < 0.
The activation function is defined as:
.. math::
\text{leaky_relu}(x) = \begin{cases}x, &\text{if } x \geq 0; \cr
\text{alpha} * x, &\text{otherwise.}\end{cases}
See https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
alpha (Union[int, float]): Slope of the activation function at x < 0. Default: 0.2.
Inputs:
- **input_x** (Tensor) - The input of LeakyReLU.
Outputs:
Tensor, has the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> leaky_relu = nn.LeakyReLU()
>>> leaky_relu(input_x)
[[-0.2 4. -1.6]
[ 2 -1. 9.]]
"""
def __init__(self, alpha=0.2):
super(LeakyReLU, self).__init__()
validator.check_value_type('alpha', alpha, [float, int], self.cls_name)
self.greater_equal = P.GreaterEqual()
self.mul = P.Mul()
self.alpha = alpha
def construct(self, x):
alpha_array = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x))
if self.alpha <= 1:
out = P.Maximum()(alpha_array * x, x)
else:
out = P.Minimum()(alpha_array * x, x)
return out
class Tanh(Cell):
r"""
Tanh activation function.
Applies the Tanh function element-wise, returns a new tensor with the hyperbolic tangent of the elements of input,
The input is a Tensor with any valid shape.
Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
>>> tanh = nn.Tanh()
>>> tanh(input_x)
[0.7617 0.964 0.995 0.964 0.7617]
"""
def __init__(self):
super(Tanh, self).__init__()
self.tanh = _selected_ops.Tanh()
def construct(self, x):
return self.tanh(x)
class GELU(Cell):
r"""
Gaussian error linear unit activation function.
Applies GELU function to each element of the input. The input is a Tensor with any valid shape.
GELU is defined as:
:math:`GELU(x_i) = x_i*P(X < x_i)`, where :math:`P` is the cumulative distribution function
of standard Gaussian distribution and :math:`x_i` is the element of the input.
Inputs:
- **input_data** (Tensor) - The input of GELU.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> gelu = nn.GELU()
>>> gelu(input_x)
[[-1.5880802e-01 3.9999299e+00 -3.1077917e-21]
[ 1.9545976e+00 -2.2918017e-07 9.0000000e+00]]
"""
def __init__(self):
super(GELU, self).__init__()
self.gelu = _selected_ops.Gelu()
def construct(self, x):
return self.gelu(x)
class Sigmoid(Cell):
r"""
Sigmoid activation function.
Applies sigmoid-type activation element-wise.
Sigmoid function is defined as:
:math:`\text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)}`, where :math:`x_i` is the element of the input.
Inputs:
- **input_data** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> sigmoid = nn.Sigmoid()
>>> sigmoid(input_x)
[0.2688 0.11914 0.5 0.881 0.7305]
"""
def __init__(self):
super(Sigmoid, self).__init__()
self.sigmoid = P.Sigmoid()
def construct(self, x):
return self.sigmoid(x)
class PReLU(Cell):
r"""
PReLU activation function.
Applies the PReLU function element-wise.
PReLU is defined as: :math:`prelu(x_i)= \max(0, x_i) + w * \min(0, x_i)`, where :math:`x_i`
is an element of an channel of the input.
Here :math:`w` is a learnable parameter with a default initial value 0.25.
Parameter :math:`w` has dimensionality of the argument channel. If called without argument
channel, a single parameter :math:`w` will be shared across all channels.
Args:
channel (int): The dimension of input. Default: 1.
w (float): The initial value of w. Default: 0.25.
Inputs:
- **input_data** (Tensor) - The input of PReLU.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
>>> prelu = nn.PReLU()
>>> prelu(input_x)
[[[[0.1 0.6]
[0.9 0.9]]]]
"""
@cell_attr_register(attrs="")
def __init__(self, channel=1, w=0.25):
super(PReLU, self).__init__()
if isinstance(w, (np.float32, float)):
tmp = np.empty((channel,), dtype=np.float32)
tmp.fill(w)
w = Tensor(tmp)
elif isinstance(w, list):
w = Tensor(w)
if not isinstance(w, Tensor):
raise TypeError("w only support np.float32, float or Tensor type.")
self.w = Parameter(initializer(w, [channel]), name='a')
self.prelu = P.PReLU()
self.relu = P.ReLU()
self.assign = P.Assign()
def construct(self, x):
u = self.relu(self.w)
v = self.prelu(x, u)
if self.training:
self.assign(self.w, u)
return v
class HSwish(Cell):
r"""
Hard swish activation function.
Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSwish.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> hswish = nn.HSwish()
>>> hswish(input_x)
"""
def __init__(self):
super(HSwish, self).__init__()
self.hswish = P.HSwish()
def construct(self, x):
return self.hswish(x)
class HSigmoid(Cell):
r"""
Hard sigmoid activation function.
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
Hard sigmoid is defined as:
.. math::
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSigmoid.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> hsigmoid = nn.HSigmoid()
>>> hsigmoid(input_x)
"""
def __init__(self):
super(HSigmoid, self).__init__()
self.hsigmoid = P.HSigmoid()
def construct(self, x):
return self.hsigmoid(x)
class LogSigmoid(Cell):
r"""
Logsigmoid activation function.
Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
Logsigmoid is defined as:
.. math::
\text{logsigmoid}(x_{i}) = log(\frac{1}{1 + \exp(-x_i)}),
where :math:`x_{i}` is the element of the input.
Inputs:
- **input_data** (Tensor) - The input of LogSigmoid.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> net = nn.LogSigmoid()
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> logsigmoid = net(input_x)
[-3.1326166e-01, -1.2692806e-01, -4.8587345e-02]
"""
def __init__(self):
super(LogSigmoid, self).__init__()
self.mul = P.Mul()
self.exp = P.Exp()
self.add = P.TensorAdd()
self.rec = P.Reciprocal()
self.log = P.Log()
def construct(self, input_x):
neg_input = self.mul(input_x, -1)
exp_neg_input = self.exp(neg_input)
exp_neg_input_1 = self.add(exp_neg_input, 1)
rec_exp_neg_input_1 = self.rec(exp_neg_input_1)
ret = self.log(rec_exp_neg_input_1)
return ret
_activation = {
'softmax': Softmax,
'logsoftmax': LogSoftmax,
'relu': ReLU,
'relu6': ReLU6,
'tanh': Tanh,
'gelu': GELU,
'elu': ELU,
'sigmoid': Sigmoid,
'prelu': PReLU,
'leakyrelu': LeakyReLU,
'hswish': HSwish,
'hsigmoid': HSigmoid,
'logsigmoid': LogSigmoid,
}
def get_activation(name):
"""
Gets the activation function.
Args:
name (str): The name of the activation function.
Returns:
Function, the activation function.
Examples:
>>> sigmoid = nn.get_activation('sigmoid')
"""
if name is None:
return None
if name not in _activation:
raise KeyError(f"Unknown activation type '{name}'")
return _activation[name]()
| [
"[email protected]"
] | |
5787255ed323fe6e376304ba1c7501341403c07f | 720668c26680d91db9e19cca9a9e348ec8f615ee | /app/snippets/serializers/users.py | a79228232b8b1679fbb934783f51016ae39dbbca | [] | no_license | orca9s/drf-tutorial | a619f4669d5cf38d5450e19f27491ddaa0fbe4b3 | 4a214a51b94e7449ad16e061e3b799e215059955 | refs/heads/master | 2020-03-23T07:41:01.865456 | 2018-08-22T12:18:04 | 2018-08-22T12:18:04 | 141,285,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
__all__ = (
'UserListSerializer',
)
class UserBaseSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'pk',
'username',
)
class UserListSerializer(UserBaseSerializer):
pass | [
"[email protected]"
] | |
66f36110ab14cc56a6425df036cd827d82a1dd07 | 81c85850747f97ccc6ed36e3e0a859b99ef38fe8 | /agesprot/settings.py | 6630d034f511e8d362388af3c9ce8257d623e610 | [] | no_license | agesprot1/agesprot | f5047447a37ea8e92b4ffa2d72ae7814d0af8950 | 34c14a176bca5523999d27d5b9f695a6fac9df96 | refs/heads/master | 2021-01-20T22:11:18.686295 | 2016-08-23T22:38:57 | 2016-08-23T22:38:57 | 61,495,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | """
Django settings for agesprot project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*@a%42&a%_4$uibdzen_^!f+gy)su!3m4anho4%vwpl1^n@b3c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mail_templated',
'agesprot.apps.base',
'agesprot.apps.users',
'agesprot.apps.project',
'agesprot.apps.activity',
'agesprot.apps.task',
'agesprot.apps.audit',
'agesprot.apps.notification',
'agesprot.apps.project.templatetags',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'agesprot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
'agesprot.backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend'
)
WSGI_APPLICATION = 'agesprot.wsgi.application'
# CONFIGURATION RESET PASSWORD
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'ftvoyvddltwpylyl'
EMAIL_PORT = 587
# END
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# LOCAL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'agesprot_db',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '',
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'd263b094gddtj',
'USER': 'lprhehqrpyfzrs',
'PASSWORD': 'BpKbhvVzip_5LaJP1kLlXVvyy7',
'HOST': 'ec2-50-19-219-148.compute-1.amazonaws.com',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/project/my-list-project/'
LOGIN_URL = '/users/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'agesprot/static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'agesprot/static'),
) | [
"[email protected]"
] | |
ee7b9a746e26d3d7ca43bc6c61e95f16d6ebf222 | e9c3e8f6ae05b0144237d01671f9a02404c43154 | /miltiple_leds_blink.py | 82014aa46d4c8b36a31d58522a17bd32d2b2ee2b | [] | no_license | vtt-info/micropython-stm32-examples | b836fe8a54119fcfdd95046d4edae043a156b695 | b6bbcb98a23b615914a015c7cbdedd550f5807ed | refs/heads/master | 2022-07-27T17:38:31.202661 | 2020-05-13T10:57:20 | 2020-05-13T10:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # File: miltiple_leds_blink.py
# Date: 2020-05-12
import utime as time
from machine import Pin, Timer
from micropython import const
import pyb
LED_ON = const(0)
LED_OFF = const(1)
pin_names = ['PB7', 'PB8', 'PB9']
leds = []
timers = []
def timer_cb(t):
for i in range(len(leds)):
if t is timers[i]:
# toggle: read-modify-write
x = leds[i].value()
leds[i].value( not x )
break
for pin in pin_names:
leds.append( Pin(pin,mode=Pin.OUT_PP,value=LED_OFF) )
for i in range(len(leds)):
timers.append( Timer(-1, freq=(1<<i), callback=timer_cb) )
try:
while True:
pass
except KeyboardInterrupt:
pass
finally:
for led in leds:
led.value(LED_OFF)
for tim in timers:
tim.deinit()
print('Done')
| [
"[email protected]"
] | |
0546778d3f2fa010ce9c2c93f6bc71b9f51e646d | 28dbe47aba287ed94ef7bba734203736bcc06249 | /.history/dmac_20200622205638.py | 77543d6f6b94646db2f428cbd9cca2864dcb43d5 | [] | no_license | ntung88/Trading_Algorithms | 242fd816b19df95e02e9fcd8c5c91c862d2ede40 | d96488b1754e3751f739d9c3f094a8f8dc54a0a9 | refs/heads/master | 2022-11-19T16:04:07.800344 | 2020-07-17T21:14:10 | 2020-07-17T21:14:10 | 276,239,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | import yfinance as yf
import numpy as np
from scipy.stats import norm
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize, LinearConstraint
def clean_data(data):
incomplete_idxs = False
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
num_points = len(clean_data(lma))
#Currently using only closing prices
sma = sma['Close']
lma = lma['Close']
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
return crossovers[crossovers != 0]
def profit(data, crossovers):
if len(crossovers) == 0:
return 0
total = 0
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
# short_period = cp.Variable(integer=True, nonneg=True)
# long_period = cp.Variable(integer=True, nonneg=True)
# constraints = [short_period >= 1, long_period >= short_period]
# obj = cp.Maximize(run_analysis(short_period, long_period, data))
# # Form and solve problem.
# prob = cp.Problem(obj, constraints)
# prob.solve() # Returns the optimal value.
# return (short_period.value, long_period.value, prob.value, prob.status)
cons = {'type': 'ineq', 'fun': lambda x: x[1] - x[0],
'type':'eq', 'fun': lambda x : max([x[i]-int(x[i]) for i in range(len(x))]),
'type': 'ineq', 'fun': lambda x: x[0] - 1}
short_seeds = range(5, 200, 20)
long_seeds = range(20, 800, 25)
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
if long_seed > short_seed:
res = minimize(run_analysis, [short_seed, long_seed], args=data, method='COBYLA', constraints=cons, options={'rhobeg': 10.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (best_short, best_long, minimum)
def run_analysis(periods, data):
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
result = -1 * profit(data['Close'], crossovers)
# print(short_period, long_period, result)
return result
def main():
tickers = 'SPY AAPL MRNA TSLA'
data = yf.download(tickers, period='max', group_by='ticker')
dirty = pd.DataFrame(data['TSLA'])
frame = clean_data(dirty)
print(optimize(frame))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
c7d3a8082fd97115e80cd4558e5ee03d7cbd7f89 | 64b8f6bdb761f9f278599f43ebc5207b9dc92089 | /RVFollowupCalculator.py | e29841d72dc78326eaea1252c1e9dbf71885d2d2 | [] | no_license | r-cloutier/RVFollowupCalculator | 86f20dcf3ae95dcf9b38b170ff5d6ff4fc84b9a7 | 001742eec53d14854e89b372f4d5ae52faf89561 | refs/heads/master | 2023-01-07T23:48:41.093636 | 2020-11-16T22:01:39 | 2020-11-16T22:01:39 | 114,957,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,996 | py | from imports import *
from compute_sigmaRV import *
from sigmaRV_activity import *
from sigmaRV_planets import *
from compute_nRV_GP import *
from Teff2color import *
global G
G = 6.67e-11
def nRV_calculator(Kdetsig,
input_planet_fname='InputFiles/user_planet.in',
input_star_fname='InputFiles/user_star.in',
input_spectrograph_fname='InputFiles/user_spectrograph.in',
input_sigRV_fname='InputFiles/user_sigRV.in',
output_fname='RVFollowupCalculator',
duration=100, NGPtrials=1, runGP=True, verbose_results=True):
'''
Compute the number of RV measurements required to detect an input
transiting planet around an input star with an input spectrograph at a
given significance.
Parameters
----------
`Kdetsig': scalar
The desired RV semi-amplitude detection significance measured as
the semi-amplitude over its measurement uncertainty
(i.e. Kdetsig = K / sigmaK)
`duration': scalar
Duration (in days) of the assumed uniform time-series for calculations
of nRV with a GP.
`NGPtrials': scalar
Number of times to compute Nrv with a GP as these results can vary
during repeated trials. Returned results are the median values (e.g.
median(Nrv_GP) (*recommended)
`runGP': boolean
If True, compute nRV with a GP. Significantly faster if False.
'''
# get inputs
texp, sigRV_phot, sigRV_act, sigRV_planet, sigRV_eff = \
_read_sigRV_input(input_sigRV_fname)
wlmin, wlmax, R, aperture, throughput, RVnoisefloor, maxtelluric,\
toverhead = _read_spectrograph_input(input_spectrograph_fname)
P, rp, mp = _read_planet_input(input_planet_fname)
mag, Ms, Rs, Teff, Z, vsini, Prot = _read_star_input(input_star_fname)
# get central band
Vcen, Jcen = False, False
if (wlmin <= .555 <= wlmax):
centralwl_nm = 555
Vcen = True
elif (wlmin <= 1.250 <= wlmax):
centralwl_nm = 1250
Jcen = True
else:
raise ValueError('Spectral coverage does not include the V or J-band.')
# get spectral bands corresponding to the wavelength range
band_strs = _get_spectral_bands(wlmin, wlmax)
# get mags for each spectral bin based on reference magnitude and Teff
logg = float(unp.nominal_values(_compute_logg(Ms, Rs)))
mags = V2all(mag, Teff, logg, Z) if Vcen else J2all(mag, Teff, logg, Z)
all_band_strs = np.array(['U','B','V','R','I','Y','J','H','K'])
mags = mags[np.in1d(all_band_strs, band_strs)]
g = np.isnan(mags)
if g.sum() > 0:
mags[g] = float(.5 * (mags[np.where(g)[0]-1] + mags[np.where(g)[0]+1]))
# checks
if mags.size != band_strs.size:
raise ValueError('Must have the same number of magnitudes as ' + \
'bands: %s'%(''.join(band_strs)))
if (maxtelluric < 0) or (maxtelluric >= 1):
raise ValueError('Invalid telluric transmittance value.')
if (throughput <= 0) or (throughput >= 1):
raise ValueError('Invalid throughput value.')
if runGP and (duration < P):
raise ValueError('Time-series duration must be longer than the' + \
"planet's orbital period.")
# compute sigRV_eff from other sources if not specified
if sigRV_eff < 0:
# compute sigRV_phot once if needed
if sigRV_phot <= 0:
print('Computing the photon-noise limited RV precision...\n')
transmission_fname = 'tapas_000001.ipac'
wlTAPAS, transTAPAS = np.loadtxt('/Users/ryancloutier/Research/RVInformation/RVFollowupCalculator/InputData/%s'%transmission_fname,
skiprows=23).T
wlTAPAS *= 1e-3 # microns
SNRtarget, sigRV_phot = _compute_sigRV_phot(band_strs, mags, Teff, logg,
Z, vsini, texp, R, aperture,
throughput, RVnoisefloor,
centralwl_nm, maxtelluric,
wlTAPAS, transTAPAS)
# get RV noise sources
Bmag, Vmag = _get_magnitudes(band_strs, mags, Ms)
B_V = Bmag - Vmag
if sigRV_act < 0:
sigRV_act = get_sigmaRV_activity(Teff, Ms, Prot, B_V)
if sigRV_planet < 0:
sigRV_planet = get_sigmaRV_planets(P,rp,Teff,Ms,sigRV_phot)
# compute sigRV_eff
sigRV_eff = np.sqrt(RVnoisefloor**2 + \
sigRV_phot**2 + \
sigRV_act**2 + \
sigRV_planet**2)
# get target K measurement uncertainty
mp = float(_get_planet_mass(rp)) if mp == 0 else mp
K, sigK_target = _get_sigK(Kdetsig, P, Ms, mp)
# compute number of RVs required for a white and red noise model
print('Computing nRVs...')
nRV = 2. * (sigRV_eff / sigK_target)**2
if runGP:
nRVGPs = np.zeros(NGPtrials)
for i in range(NGPtrials):
aGP = sigRV_act if sigRV_act != 0 else sigRV_eff
lambdaGP = Prot * (3 + np.random.randn() * .1)
GammaGP = 2 + np.random.randn() * .1
GPtheta = aGP, lambdaGP, GammaGP, Prot, sigRV_planet
keptheta = P, K
nRVGPs[i] = compute_nRV_GP(GPtheta, keptheta, sigRV_phot,
sigK_target, duration=duration)
nRVGP = np.median(nRVGPs)
else:
nRVGP = 0.
# compute total observing time in hours
tobs = nRV * (texp + toverhead) / 60.
tobsGP = nRVGP * (texp + toverhead) / 60.
# is SNRtarget set?
try:
_ = SNRtarget
except NameError:
SNRtarget = np.nan
# write results to file
NGPtrials = int(NGPtrials) if runGP else 0
output_arr = [P, rp, mp, K,
mags, Ms, Rs, Teff, Z, vsini, Prot,
band_strs, R, aperture, throughput, RVnoisefloor,
centralwl_nm*1e-3, maxtelluric, toverhead, texp,
SNRtarget, sigRV_phot, sigRV_act, sigRV_planet, sigRV_eff,
sigK_target, nRV, nRVGP, NGPtrials, tobs, tobsGP]
self = RVFC(output_fname, output_arr)
##_write_results2file(output_fname, output)
##create_pdf(output_fname, output)
if verbose_results:
_print_results(output_arr)
return self
def _read_planet_input(input_planet_fname):
'''
Read-in planetary data from the input file.
'''
f = open('%s'%input_planet_fname, 'r')
g = f.readlines()
f.close()
return float(g[3]), float(g[5]), float(g[7])
def _read_star_input(input_star_fname):
'''
Read-in stellar data from the input file.
'''
f = open('%s'%input_star_fname, 'r')
g = f.readlines()
f.close()
return float(g[3]), float(g[5]), float(g[7]), float(g[9]), \
float(g[11]), float(g[13]), float(g[15])
def _read_spectrograph_input(input_spectrograph_fname):
'''
Read-in spectrograph data from the input file.
'''
f = open('%s'%input_spectrograph_fname, 'r')
g = f.readlines()
f.close()
return float(g[3])*1e-3, float(g[5])*1e-3, float(g[7]), \
float(g[9]), float(g[11]), float(g[13]), float(g[15]), float(g[17])
def _read_sigRV_input(input_sigRV_fname):
'''
Read-in RV noise source data from the input file.
'''
f = open('%s'%input_sigRV_fname, 'r')
g = f.readlines()
f.close()
return float(g[3]), float(g[5]), float(g[7]), float(g[9]), float(g[11])
def _get_spectral_bands(wlmin, wlmax):
band_strs = np.array(['U','B','V','R','I','Y','J','H','K'])
wlcens = np.array([.3531, .4430, .5537, .694, .8781, 1.0259, 1.2545,
1.631, 2.1498])
wlwidth = np.array([.0657, .0973, .089, .207, .2316, .1084, .1548,
.2886, .3209])
# define boundaries
tolerance = .0
lower_bnds, upper_bnds = (wlcens - wlwidth) * (1-tolerance), \
(wlcens + wlwidth) * (1+tolerance)
# get bands
bnds = np.append(np.where(abs(lower_bnds-wlmin) == \
np.min(abs(lower_bnds-wlmin))),
np.where(abs(upper_bnds-wlmax) == \
np.min(abs(upper_bnds-wlmax))))
# expand if necessary
if (lower_bnds[bnds[0]] > wlmin) and (bnds[0] != 0):
bnds[0] -= 1
if (upper_bnds[bnds[1]] < wlmax) and (bnds[1] != band_strs.size-1):
bnds[1] += 1
inds = np.arange(bnds.min(), bnds.max()+1)
if inds.size == 0:
raise ValueError('No spectral bands found.')
return band_strs[inds]
def _compute_logg(Ms, Rs):
'''
Compute stellar logg in cgs units.
'''
Ms, Rs = rvs.Msun2kg(Ms), rvs.Rsun2m(Rs)
return unp.log10(G * Ms / Rs**2 * 1e2)
def _compute_sigRV_phot(band_strs, mags, Teff, logg, Z, vsini, texp, R,
aperture, throughput, RVnoisefloor, centralwl_nm,
transmission_threshold, wl_telluric, trans_telluric):
'''
Calculate the photon-noise limited RV precision over the spectrograph's
full spectral domain.
'''
# get round values for PHOENIX stellar models
Teffs = np.append(np.arange(23e2,7e3,1e2), np.arange(7e3,121e2,2e2))
Teff_round = Teffs[abs(Teffs-Teff) == np.min(abs(Teffs-Teff))][0]
loggs = np.arange(0, 6.1, .5)
logg_round = loggs[abs(loggs-logg) == np.min(abs(loggs-logg))][0]
Zs = np.append(np.arange(-4,-1,dtype=float), np.arange(-1.5,1.5,.5))
Z_round = Zs[abs(Zs-Z) == np.min(abs(Zs-Z))][0]
# compute sigmaRV in each band for a fixed texp
sigmaRVs, SNRtargets = np.zeros(mags.size), np.zeros(mags.size)
for i in range(sigmaRVs.size):
t0 = time.time()
SNRtargets[i] = get_snr(mags[i], band_strs[i], texp, aperture, throughput, R)
wl, spec = get_reduced_spectrum(Teff_round, logg_round, Z_round, vsini,
band_strs[i], R, centralwl_nm*1e-3,
SNRtargets[i])
sigmaRVs[i] = compute_sigmaRV(wl, spec, mags[i], band_strs[i], texp,
aperture, throughput, R,
transmission_threshold, wl_telluric,
trans_telluric, SNRtargets[i])
print('Took %.1f seconds\n'%(time.time()-t0))
# compute SNRtarget
SNRtarget = SNRtargets.mean()
# compute sigmaRV over all bands
sigRV_phot = 1 / np.sqrt(np.sum(1. / sigmaRVs**2))
sigRV_phot = sigRV_phot if sigRV_phot > RVnoisefloor \
else float(RVnoisefloor)
return SNRtarget, sigRV_phot
def _get_planet_mass(rps, Fs=336.5):
'''
'''
rps, Fs = np.ascontiguousarray(rps), np.ascontiguousarray(Fs)
assert rps.size == Fs.size
# isolate different regimes
Fs = Fs*1367*1e7*1e-4 # erg/s/cm^2
rocky = rps < 1.5
small = (1.5 <= rps) & (rps < 4)
neptune = (4 <= rps) & (rps < 13.668)
giant = rps >= 13.668
# compute mean mass in each regime
mps = np.zeros(rps.size)
mps[rocky] = .44*rps[rocky]**3 + .614*rps[rocky]**4
mps[small] = 2.69 * rps[small]**(.93)
mps[neptune] = (rps[neptune]*Fs[neptune]**.03 / 1.78)**(1/.53)
mps[giant] = np.random.uniform(150,2e3)
return mps
def _get_sigK(Kdetsig, P, Ms, mp):
'''
Compute the desired semi-amplitude detection measurement uncertainty.
'''
K = rvs.RV_K(P, Ms, mp)
return K, K / float(Kdetsig)
def _get_magnitudes(band_strs, mags, Ms):
# Use isochrone colours to compute mags in each band of interest
# solar metallicity at a fixed age of 10^9 yrs
MU, MB, MV, MR, MI, MY, MJ, MH, MK = _get_absolute_stellar_magnitudes(Ms)
# only consider V or J as reference bands. could use more...
assert ('V' in band_strs) or ('J' in band_strs)
if 'V' in band_strs:
ref_band, ref_mag, ref_absmag = 'V', mags[band_strs == 'V'], MV
else:
ref_band, ref_mag, ref_absmag = 'J', mags[band_strs == 'J'], MJ
Bmag = MB - ref_absmag + ref_mag
Vmag = MV - ref_absmag + ref_mag
return Bmag, Vmag
def _get_absolute_stellar_magnitudes(Ms):
'''
Get the absolute magnitudes of a star with a given stellar mass at a
given age using the isochrones from 2005A&A...436..895G
Parameters
----------
`Ms': scalar
The stellar mass in MSun
Returns
-------
`mags': numpy.array
The absolute magnitudes of the star
'''
# Appoximate MS lifetime to see at what age to obtain colours
logtMS_yrs = np.log10(1e10 * (1./Ms)**(2.5))
logage = round(logtMS_yrs*.77 / 5e-2) * 5e-2 # get ~3/4 through MS
# First set of isochrones (ubvri)
logages,Mss,Mus,Mbs,Mvs,Mrs,Mis,Mjs,Mhs,Mks = \
np.loadtxt('/Users/ryancloutier/Research/RVInformation/RVFollowupCalculator/InputData/isoc_z019_ubvrijhk.dat',
usecols=(0,1,7,8,9,10,11,12,13,14)).T
g = abs(logages-logage) == np.min(abs(logages-logage))
Mss,Mus,Mbs,Mvs,Mrs,Mis,Mjs,Mhs,Mks = Mss[g],Mus[g],Mbs[g],Mvs[g],Mrs[g], \
Mis[g],Mjs[g],Mhs[g],Mks[g]
g = abs(Mss-Ms) == np.min(abs(Mss-Ms))
if g.sum() > 1:
g = np.where(g)[0][0]
Mu,Mb,Mv,Mr,Mi,Mj,Mh,Mk = Mus[g],Mbs[g],Mvs[g],Mrs[g],Mis[g],Mjs[g], \
Mhs[g],Mks[g]
# Second set of isochrones (ZYJHK)
logages2,Mss2,MZs,MYs,MJs,MHs,MKs = \
np.loadtxt('/Users/ryancloutier/Research/RVInformation/RVFollowupCalculator/InputData/isoc_z019_ZYJHK.dat',
usecols=(0,1,7,8,9,10,11)).T
g = abs(logages2-logage) == np.min(abs(logages2-logage))
Mss2,MZs,MYs,MJs,MHs,MKs = Mss2[g],MZs[g],MYs[g],MJs[g],MHs[g],MKs[g]
g = abs(Mss2-Ms) == np.min(abs(Mss2-Ms))
if g.sum() > 1:
g = np.where(g)[0][0]
MZ,MY,MJ,MH,MK = MZs[g],MYs[g],MJs[g],MHs[g],MKs[g]
return Mu, Mb, Mv, Mr, Mi, MY, MJ, MH, MK
def _scale_sigmaRV_phot(sigRV_phot, texp_old, texp_new):
return sigRV_phot * np.sqrt(float(texp_old) / texp_new)
def _write_results2file(output_fname, magiclistofstuff2write):
'''
Write the resulting parameters to a .dat file.
'''
# create header with indices
maglabels = ''
for i in range(magiclistofstuff2write[3].size):
maglabels += '%s magnitude\n'%magiclistofstuff2write[9][i]
hdr = 'Orbital period (days)\nPlanetary radius (Earth radii)\nPlanetary mass(Earth masses)\nRV semi-amplitude(m/s)\n%sStellar mass (Solar masses)\nStellar Radius (Solar radii)\nEffective temperature (K)\n[Fe/H] (Solar units)\nProjected rotation velocity (km/s)\nRotation period (days)\nSpectral resolution\nAperture (meters)\nThroughput\nRV noise floor (m/s)\nReference wavelength (microns)\nMaximum fractional telluric absorption\nMinimum exposure time (minutes)\nMaximum exposure time (minutes)\nOverhead (minutes)\nPhoton noise limited RV (m/s)\nRV activity rms (m/s)\nAdditional planet RV rms (m/s)\nEffective RV rms (m/s)\nTarget K measurement uncertainty (m/s)\nExposure time (minutes)\nNumber of RV measurements\nNumber of RV measurements with GP\nTotal observing time (hours)\nTotal observing time with GP (hours)'%maglabels
hdr, hdrv2 = hdr.split('\n'), ''
for i in range(len(hdr)):
hdrv2 += '# %i: %s\n'%(i, hdr[i])
g = hdrv2
for i in range(len(magiclistofstuff2write)):
if i == 3:
for j in range(magiclistofstuff2write[i].size):
g += '%.4e\n'%magiclistofstuff2write[i][j]
elif i == 9:
pass
else:
g += '%.4e\n'%magiclistofstuff2write[i]
# write .dat file
try:
os.mkdir('Results')
except OSError:
pass
f = open('Results/%s.dat'%output_fname, 'w')
f.write(g)
f.close()
def _print_results(output, output_fname=''):
# get data
P, rp, mp, K, mags, Ms, Rs, Teff, Z, vsini, Prot, band_strs, R, aperture, throughput, RVnoisefloor, centralwl_microns, maxtelluric, toverhead, texp, SNRtarget, sigRV_phot, sigRV_act, sigRV_planet, sigRV_eff, sigK_target, nRV, nRVGP, NGPtrials, tobs, tobsGP = output
mags = np.array(['%.3f'%m for m in mags])
# get string to print
g = '\n' + '#'*50
g += '\n#\tPlanet parameters:\n'
g += '# P = %.3f days\n# rp = %.2f REarth\n# mp = %.2f MEarth\n# K = %.2f m/s'%(P,rp,mp,K)
g += '\n\n#\tStellar parameters:\n'
g += '# mags = %s (%s)\n# Ms = %.2f MSun\n# Rs = %.2f RSun\n# Teff = %i K\n# vsini = %.1f km/s'%(', '.join(mags.astype(str)),''.join(band_strs),Ms,Rs,Teff,vsini)
g += '\n\n#\tSpectrograph parameters:\n'
g += '# R = %i\n# Aperture = %.1f m\n# Throughput = %.2f\n# Noise floor = %.2f m/s'%(R,aperture,throughput,RVnoisefloor)
g += '\n\n#\tRV noise parameters:\n'
g += '# texp = %.1f min\n# toverhead = %.1f min\n# SNRtarget = %.1f \n# sigRV_photon = %.2f m/s\n# sigRV_activity = %.2f m/s\n# sigRV_planets = %.2f m/s\n# sigRV_eff = %.2f m/s'%(texp,toverhead,SNRtarget,sigRV_phot,sigRV_act,sigRV_planet,sigRV_eff)
g += '\n' + '#'*50
g += '\n\n#\tResults: (NGPtrials = %i)\n'%NGPtrials
g += '# Desired sigK = %.2f m/s (%.1f sigma K detection)\n# Nrv = %.1f\n# tobs = %.2f hours\n# tobs = %.2f nights\n# Nrv_GP = %.1f\n# tobs_GP = %.2f hours\n# tobs_GP = %.2f nights\n'%(sigK_target,K/sigK_target,nRV,tobs,tobs/7.,nRVGP,tobsGP,tobsGP/7.)
print(g)
# save text file if desired
if output_fname != '':
h = open('Results/%s.txt'%output_fname, 'w')
h.write(g)
h.close()
def _save_results(output):
# get data
P, rp, mp, K, \
mags, Ms, Rs, Teff, Z, vsini, Prot, \
band_strs, R, aperture, throughput, RVnoisefloor, \
centralwl_microns, maxtelluric, toverhead, \
texp, sigRV_phot, sigRV_act, sigRV_planet, sigRV_eff, \
sigK_target, nRV, nRVGP, NGPtrials, tobs, tobsGP = output
# save mags to csv file first
tocsv = np.zeros(9, dtype='str')
all_band_strs = np.array(['U','B','V','R','I','Y','J','H','K'])
tocsv[np.in1d(all_band_strs, band_strs)] = mags
tocsv = ','.join(tocsv.astype(str))
# add remaining parameters
tocsv = tocsv + ',' + ','.join(np.array(output[:4]).astype(str))
tocsv = tocsv + ',' + ','.join(np.array(output[5:11]).astype(str))
tocsv = tocsv + ',' + ','.join(np.array(output[12:]).astype(str))
# save to csv file for uploading into the RVFC for repeated calculations
fname = ('%.6f_%.6f'%(time.time(), np.random.rand())).replace('.','d') + '.csv'
f = open(fname, 'w')
f.write(tocsv)
f.close()
class RVFC:
def __init__(self, output_fname, params):
self.output_fname = output_fname
assert len(params) == 31
self.P, self.rp, self.mp, self.K = params[:4]
self.mags, self.band_strs = params[4], params[11]
self.Ms, self.Rs, self.Teff, self.Z, self.vsini, self.Prot = params[5:11]
self.R, self.aperture, self.throughput, self.RVfloor, self.wl_cen, self.maxtelluric = params[12:18]
self.toverhead, self.texp = np.array(params[18:20])*60 # min -> sec
self.sigRV_phot, self.sigRV_act, self.sigRV_planets, self.sigRV_eff = params[21:25]
self.sigK_target, self.nRV, self.nRVGP, self.NGtrials, self.tobs, self.tobsGP = params[25:]
self._pickleobject()
def _pickleobject(self):
fObj = open(self.output_fname, 'wb')
pickle.dump(self, fObj)
fObj.close()
def loadpickle(fname):
fObj = open(fname, 'rb')
self = pickle.load(fObj)
fObj.close()
return self
| [
"[email protected]"
] | |
9d125524877cbebcf436f96886124726b5d7b8dc | f89be43b609ba7adbd810d9e6be9a86535a5534b | /backend/manage.py | 3e49221ec710bb29434a7e4c802f8bcf2662d32c | [] | no_license | crowdbotics-apps/job10-1-21205 | 1bbe9e590bb56f0d9b79dd880fdc09eddb35365a | 03fecd5de5044b5cebd251db956fe488a2c5ddae | refs/heads/master | 2022-12-24T17:43:08.725348 | 2020-10-07T02:34:28 | 2020-10-07T02:34:28 | 301,907,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'job10_1_21205.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3a66b93859d7e2856ed8ab9d2d6816eb7ec770a5 | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/core/singleton.py | 5c6a0cf0ef5674d9b1f789df47abc4b97246a080 | [] | no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,396 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
"""Singleton mechanism"""
from __future__ import print_function, division
from .core import Registry
from .assumptions import ManagedProperties
from .sympify import sympify
class SingletonRegistry(Registry):
"""
A map between singleton classes and the corresponding instances.
E.g. S.Exp == C.Exp()
"""
__slots__ = []
__call__ = staticmethod(sympify)
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(ManagedProperties):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Examples
========
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> from sympy.core.compatibility import with_metaclass
>>> class MySingleton(with_metaclass(Singleton, Basic)):
... pass
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
** Developer notes **
The class is instantiated immediately at the point where it is defined
by calling cls.__new__(cls). This instance is cached and cls.__new__ is
rebound to return it directly.
The original constructor is also cached to allow subclasses to access it
and have their own instance.
"""
def __init__(cls, name, bases, dict_):
super(Singleton, cls).__init__(cls, name, bases, dict_)
for ancestor in cls.mro():
if '__new__' in ancestor.__dict__:
break
if isinstance(ancestor, Singleton) and ancestor is not cls:
ctor = ancestor._new_instance
else:
ctor = cls.__new__
cls._new_instance = staticmethod(ctor)
the_instance = ctor(cls)
def __new__(cls):
return the_instance
cls.__new__ = staticmethod(__new__)
setattr(S, name, the_instance)
# Inject pickling support.
def __getnewargs__(self):
return ()
cls.__getnewargs__ = __getnewargs__
| [
"[email protected]"
] | |
c9d96c82ef607caa0e93a5186ec8e99a4d87c8f7 | 22279487bee5c983c13887ba11e6a4cd40e8bbe3 | /PreprocessData/all_class_files/MediaObject.py | 1b3bdb85b4d8431bc6d31f3557fa26747e9361d0 | [
"MIT"
] | permissive | DylanNEU/Schema | 018c9f683c683068422ed7b6392dcebd4ab4d4cd | 4854720a15894dd814691a55e03329ecbbb6f558 | refs/heads/main | 2023-08-30T01:50:20.541634 | 2021-11-01T15:30:41 | 2021-11-01T15:30:41 | 425,238,713 | 1 | 0 | MIT | 2021-11-06T12:29:12 | 2021-11-06T12:29:11 | null | UTF-8 | Python | false | false | 6,727 | py | from PreprocessData.all_class_files.CreativeWork import CreativeWork
import global_data
class MediaObject(CreativeWork):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, about=None, accessMode=None, accessModeSufficient=None, accessibilityAPI=None, accessibilityControl=None, accessibilityFeature=None, accessibilityHazard=None, accessibilitySummary=None, accountablePerson=None, aggregateRating=None, alternativeHeadline=None, associatedMedia=None, audience=None, audio=None, author=None, award=None, character=None, citation=None, comment=None, commentCount=None, contentLocation=None, contentRating=None, contributor=None, copyrightHolder=None, copyrightYear=None, creator=None, dateCreated=None, dateModified=None, datePublished=None, discussionUrl=None, editor=None, educationalAlignment=None, educationalUse=None, encoding=None, encodingFormat=None, exampleOfWork=None, expires=None, funder=None, genre=None, hasPart=None, headline=None, inLanguage=None, interactionStatistic=None, interactivityType=None, isAccessibleForFree=None, isBasedOn=None, isFamilyFriendly=None, isPartOf=None, keywords=None, learningResourceType=None, license=None, locationCreated=None, mainEntity=None, material=None, mentions=None, offers=None, position=None, producer=None, provider=None, publication=None, publisher=None, publishingPrinciples=None, recordedAt=None, releasedEvent=None, review=None, schemaVersion=None, sourceOrganization=None, spatialCoverage=None, sponsor=None, temporalCoverage=None, text=None, thumbnailUrl=None, timeRequired=None, translator=None, typicalAgeRange=None, version=None, video=None, workExample=None, associatedArticle=None, bitrate=None, contentSize=None, contentUrl=None, duration=None, embedUrl=None, encodesCreativeWork=None, height=None, playerType=None, productionCompany=None, regionsAllowed=None, requiresSubscription=None, uploadDate=None, width=None):
CreativeWork.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, about, accessMode, accessModeSufficient, accessibilityAPI, accessibilityControl, accessibilityFeature, accessibilityHazard, accessibilitySummary, accountablePerson, aggregateRating, alternativeHeadline, associatedMedia, audience, audio, author, award, character, citation, comment, commentCount, contentLocation, contentRating, contributor, copyrightHolder, copyrightYear, creator, dateCreated, dateModified, datePublished, discussionUrl, editor, educationalAlignment, educationalUse, encoding, encodingFormat, exampleOfWork, expires, funder, genre, hasPart, headline, inLanguage, interactionStatistic, interactivityType, isAccessibleForFree, isBasedOn, isFamilyFriendly, isPartOf, keywords, learningResourceType, license, locationCreated, mainEntity, material, mentions, offers, position, producer, provider, publication, publisher, publishingPrinciples, recordedAt, releasedEvent, review, schemaVersion, sourceOrganization, spatialCoverage, sponsor, temporalCoverage, text, thumbnailUrl, timeRequired, translator, typicalAgeRange, version, video, workExample)
self.associatedArticle = associatedArticle
self.bitrate = bitrate
self.contentSize = contentSize
self.contentUrl = contentUrl
self.duration = duration
self.embedUrl = embedUrl
self.encodesCreativeWork = encodesCreativeWork
self.encodingFormat = encodingFormat
self.height = height
self.playerType = playerType
self.productionCompany = productionCompany
self.regionsAllowed = regionsAllowed
self.requiresSubscription = requiresSubscription
self.uploadDate = uploadDate
self.width = width
def set_associatedArticle(self, associatedArticle):
self.associatedArticle = associatedArticle
def get_associatedArticle(self):
return self.associatedArticle
def set_bitrate(self, bitrate):
self.bitrate = bitrate
def get_bitrate(self):
return self.bitrate
def set_contentSize(self, contentSize):
self.contentSize = contentSize
def get_contentSize(self):
return self.contentSize
def set_contentUrl(self, contentUrl):
self.contentUrl = contentUrl
def get_contentUrl(self):
return self.contentUrl
def set_duration(self, duration):
self.duration = duration
def get_duration(self):
return self.duration
def set_embedUrl(self, embedUrl):
self.embedUrl = embedUrl
def get_embedUrl(self):
return self.embedUrl
def set_encodesCreativeWork(self, encodesCreativeWork):
self.encodesCreativeWork = encodesCreativeWork
def get_encodesCreativeWork(self):
return self.encodesCreativeWork
def set_encodingFormat(self, encodingFormat):
self.encodingFormat = encodingFormat
def get_encodingFormat(self):
return self.encodingFormat
def set_height(self, height):
self.height = height
def get_height(self):
return self.height
def set_playerType(self, playerType):
self.playerType = playerType
def get_playerType(self):
return self.playerType
def set_productionCompany(self, productionCompany):
self.productionCompany = productionCompany
def get_productionCompany(self):
return self.productionCompany
def set_regionsAllowed(self, regionsAllowed):
self.regionsAllowed = regionsAllowed
def get_regionsAllowed(self):
return self.regionsAllowed
def set_requiresSubscription(self, requiresSubscription):
self.requiresSubscription = requiresSubscription
def get_requiresSubscription(self):
return self.requiresSubscription
def set_uploadDate(self, uploadDate):
self.uploadDate = uploadDate
def get_uploadDate(self):
return self.uploadDate
def set_width(self, width):
self.width = width
def get_width(self):
return self.width
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
| [
"[email protected]"
] | |
860f69580f038a5ee95e9bb3716a5de3706cd5e9 | 22b93005b05aa4cbfa6287c42e07244b9bf83be9 | /examples/evaluation/evaluate_on_binary_classifier.py | 401e318d6e536120fd59d628d4797c48f373b8c1 | [
"Apache-2.0"
] | permissive | dbczumar/mlflow | 63ede1f21966def17ded0da9c8e92a207b34b90d | e293a73b510c924cbca50b6337b6d6f9fd9f8f1b | refs/heads/master | 2023-08-31T23:40:55.475707 | 2023-07-15T04:22:18 | 2023-07-15T04:22:18 | 138,797,518 | 1 | 3 | Apache-2.0 | 2023-08-23T23:01:08 | 2018-06-26T21:51:19 | Python | UTF-8 | Python | false | false | 1,138 | py | import xgboost
import shap
import mlflow
from mlflow.models import infer_signature
from sklearn.model_selection import train_test_split
# Load the UCI Adult Dataset
X, y = shap.datasets.adult()
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Fit an XGBoost binary classifier on the training data split
model = xgboost.XGBClassifier().fit(X_train, y_train)
# Infer model signature
predictions = model.predict(X_train)
signature = infer_signature(X_train, predictions)
# Build the Evaluation Dataset from the test set
eval_data = X_test
eval_data["label"] = y_test
with mlflow.start_run() as run:
# Log the XGBoost binary classifier model to MLflow
mlflow.sklearn.log_model(model, "model", signature=signature)
model_uri = mlflow.get_artifact_uri("model")
# Evaluate the logged model
result = mlflow.evaluate(
model_uri,
eval_data,
targets="label",
model_type="classifier",
evaluators=["default"],
)
print(f"metrics:\n{result.metrics}")
print(f"artifacts:\n{result.artifacts}")
| [
"[email protected]"
] | |
86f7dca124eb48e1a49eadf8555a03606c97d20a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1234.py | 642dcec803933335fa83ea5efbed092fd01d0f03 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import sys
def compute_velocity(destination, horses, case):
horses.sort(reverse=True)
worst_time = (destination - horses[0][0]) / horses[0][1] # time = space / velocity
for horse in horses:
todo = destination - horse[0]
time = todo / horse[1]
if time > worst_time:
worst_time = time
curise_velocity = destination / worst_time
return "Case #" + str(case) + ": " + str(curise_velocity) + "\n"
def solve(input, output):
# Read input
with open(output, "w") as o:
with open(input, "r") as f:
f.readline() # Read number of examples
# Process examples
case = 1
while True:
line = f.readline()
if not line:
break
destination, num_horses = line.split(" ")
horses = []
for i in range(int(num_horses)):
row = f.readline()
pos, velocity = row.split(" ")
horses.append([int(pos), int(velocity)])
o.write(compute_velocity(int(destination), horses, case))
case += 1
if __name__ == '__main__':
solve(sys.argv[1], sys.argv[2])
| [
"[email protected]"
] | |
fa991b25b0077691bcdb0f4aebfdd7fbd9cfffa7 | 247d58856bb5f3ac70d115e78243af24dd9afec1 | /examples/riscv/llbin/macro_verilog_pp.py | 92dc3e6d3b92bbd421a4a9822bcc85546671297b | [] | no_license | greenblat/sv2v | f69d9b9c6312b770aadac7d88dc145cfd4f661ae | 7b69e7675b0f4f04fe7088f4223578c9e9c451d6 | refs/heads/master | 2020-03-28T15:47:29.133374 | 2018-11-14T11:12:10 | 2018-11-14T11:12:10 | 148,626,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,856 | py | #! /usr/bin/python
'''
help: invocation: macro_verilog_pp.py [-h] [-y Dir] [-I Dir] [-o <filename>] [-split [dir]] File1 File2 ....
this script massages verilog files.
it interprets ifdef/elsif/else/endif directives
it includes include files
it replaces all macro definitions
it computes constants, in the form of "16-1"
-y Dir adds search directory to includes list
-I Dir identical to -y
-o Filename [file3.v default] the script creates one file with all substitutions
-split create directory "tmp" where all modules have their own file called "module".v if -split has parameter, it will be the dir name.
-h print these instructions
verilog style params:
+incdir+DIR+
+libext+EXT+
-v LIBFILE
'''
import os,sys,string
import re
from logs import parse_args
Lines2=[]
Lines3=[]
Defined={}
IncDirs=[]
LibExt = ['.v']
def main():
Params = parse_args()
run_main(Params)
def run_main(Params):
global Lines2,Lines3
Fnames = Params['fnames']
if Fnames==[]:
print __doc__
print 'no files given'
return
if ('-h' in Params):
print __doc__
return
if ('-d' in Params):
Defs = Params['-d']
for Def in Defs:
print 'defined',Def
Defined[Def]=1
if ('-y' in Params):
IncDirs.extend(Params['-y'])
if ('-v' in Params):
Fnames.extend(Params['-v'])
if ('incdir' in Params):
IncDirs.extend(Params['incdir'])
if ('libext' in Params):
LibExt.extend(Params['libext'])
if ('-I' in Params):
IncDirs.extend(Params['-I'])
if ('-o' in Params):
OutFileName = Params['-o'][0]
else:
OutFileName = 'file3.v'
Synopsys = ('-synopsys' in Params)
Lines = []
for Fname in Fnames:
File = open(Fname)
Lines.extend(readfile(File))
File.close()
scan0(Lines)
File2 = open('filex0.v','w')
for line in Lines:
File2.write(line)
File2.close()
scan1(Lines)
File2 = open('filex1.v','w')
for line in Lines2:
File2.write(line)
File2.close()
if (Synopsys):
print 'removing translates'
Lines2= remove_synopsys_on_off(Lines2)
Lines3= remove_synopsys_on_off(Lines3)
File2 = open('file2.v','w')
for line in Lines2:
File2.write(line)
File2.close()
print 'preparing defines...'
Def2=[]
for Key in Defined:
Def2.append((len(Key),Key,Defined[Key]))
Def2.sort()
Def2.reverse()
print 'computing...'
dones=0
for i,line in enumerate(Lines3):
line = use_defines(line,Def2)
# linex = compute_constants(line)
linex = line
Lines3[i]=linex
# print line,linex
dones += 1
if (dones % 10000)==0:
print '%d lines'%dones
print 'done computing...'
File3 = open(OutFileName,'w')
Lines4=[]
for line in Lines3:
File3.write(line)
wrds = string.split(line)
if len(wrds)!=0:
Lines4.append(line)
File3.close()
print 'lines in the end %d'%len(Lines3)
if ('-split' in Params):
Dir = Params['-split'][0]
if (Dir=='y'):
Dir='tmp'
do_the_split(Lines4,Dir)
File4 = open('file4.v','w')
state='idle'
for line in Lines3:
if len(line)==0:
pass
elif line[-1]=='\n':
line=line[:-1]
if '//' in line:
line = line[:line.index('//')]
if ('/*' in line)and('*/' in line):
ok=True
ind0 = line.index('/*')
ind1 = line.index('*/')
while ok:
if ind0>ind1: ok=False
if ind0<ind1:
part1 = line[:line.index('/*')]
part2 = line[line.index('*/')+2:]
line=part1+part2
ok = ('/*' in line)and('*/' in line)
if ok:
ind0 = line.index('/*')
ind1 = line.index('*/')
if state=='idle':
if ('/*' in line):
line=line[:line.index('/*')]
state='work'
elif state=='work':
if ('*/' in line):
line=line[line.index('*/')+2:]
state='idle'
else:
line=''
wrds = string.split(line)
if len(wrds)>0:
File4.write('%s\n'%line)
File4.close()
Pat0 = re.compile('[{:\-+ \[][0-9]+ *[\*] *[0-9]+')
Pat1 = re.compile('[{:\-+ \[][0-9]+ *[+-] *[0-9]+')
Pat2 = re.compile('\( *[0-9]+ *\)')
Pat3 = re.compile(' 0[0-9]+')
Pat4 = re.compile('[-+*/]0[0-9]+')
def stupid1(Str):
while (len(Str)>1)and(Str[0]=='0')and(Str[1] in '0123456789'):
Str = Str[1:]
Mtch = Pat3.search(Str)
while (Mtch):
(A,B) = Mtch.span()
P1 = Str[A:B]
Bef = Str[:A]
Aft = Str[B:]
Str = Bef+P1[2:]+Aft
Mtch = Pat4.search(Str)
Mtch = Pat4.search(Str)
while (Mtch):
(A,B) = Mtch.span()
P1 = Str[A:B]
Bef = Str[:A]
Aft = Str[B:]
Str = Bef+P1[2:]+Aft
Mtch = Pat4.search(Str)
return Str
def computex(Str,Pat,Where):
Mtch = Pat.search(Str)
while (Mtch):
(A,B) = Mtch.span()
if Where in [0,1]:
Bef = Str[:A+1]
Aft = Str[B:]
P1 = Str[A+1:B]
else:
Bef = Str[:A]
Aft = Str[B:]
P1 = Str[A:B]
P1=stupid1(P1)
P1 = string.replace(P1,' ','')
try:
Eval = str(eval(P1))
Str= Bef+Eval+Aft
Mtch = Pat.search(Str)
except:
print 'error! failed %d eval on "%s" "%s" %s'%(Where,P1,Str[A:B],map(ord,list(Str[A:B])))
Mtch = False
return Str
def compute_constants(line):
done = False
while (not done):
done=True
linex = computex(line,Pat0,0)
linex = computex(linex,Pat1,1)
if linex!=line:
linex = computex(linex,Pat2,2)
done = linex==line
line = linex
return line
def use_defines(line,Def2):
dones=True
while ('`' in line)and(dones):
dones =False
i = 0
while i<len(Def2):
(Len,Key,Val)=Def2[i]
if '`'+Key in line:
line = string.replace(line,'`'+Key,Val)
i=len(Def2)+5
dones =True
i +=1
return line
def readfile(File):
Lines = []
num=0
while 1:
line = File.readline()
num+=1
if line=='':
return Lines
if line[-1]==13:
line = line[:-1]+'\n'
if (len(line)>1)and(ord(line[-2])==13):
line = line[:-2]+'\n'
if needs_work(line):
more = work(line)
Lines.extend(more)
else:
Lines.append(line)
return Lines
def work(line):
line = remove_comment1(line)
line = clear_defs(line)
if len(line)<2:
return []
wrds = string.split(line)
if '`ifdef' in wrds:
lines=rework(line,'`ifdef')
return lines
if '`ifndef' in wrds:
lines=rework(line,'`ifndef')
return lines
if '`elsif' in wrds:
lines=rework(line,'`elsif')
return lines
if '`else' in wrds:
lines=rework(line,'`else',False)
return lines
if '`endif' in wrds:
lines=rework(line,'`endif',False)
return lines
if '`include' in wrds:
lines = include_file(wrds[1])
return lines
if line[-1]!='\n':
line=line+'\n'
return [line]
def include_file(Fname):
if Fname[0]=='`': return ['`include "%s"\n'%Fname]
Fname = string.replace(Fname,'"','')
if os.path.exists(Fname):
File = open(Fname)
lines = readfile(File)
return lines
for Dir in IncDirs:
Fname1 = '%s/%s'%(Dir,Fname)
if os.path.exists(Fname1):
return include_file(Fname1)
print 'file "%s" cannot be included'%Fname
return []
def rework(line,Ifdef,Two=True):
wrds = string.split(line)
Ind = wrds.index(Ifdef)
Bef = wrds[:Ind]
if Two:
Aft = wrds[Ind+2:]
This = '%s %s\n'%(wrds[Ind],wrds[Ind+1])
else:
Aft = wrds[Ind+1:]
This = '%s\n'%(Ifdef)
Line0= string.join(Bef,' ')+'\n'
Line2= string.join(Aft,' ')+'\n'
L0 = work(Line0)
L2 = work(Line2)
return L0+[This]+L2
def clear_defs(line):
line = string.replace(line,'`elsif',' `elsif ')
line = string.replace(line,'`ifdef',' `ifdef ')
line = string.replace(line,'`ifndef',' `ifndef ')
line = string.replace(line,'`else',' `else ')
line = string.replace(line,'`endif',' `endif ')
return line
def remove_comment1(line):
if '//' in line:
ind = line.index('//')
return line[:ind]
return line
def needs_work(line):
return ('`ifdef' in line)or('`else' in line)or('`endif' in line)or('`define' in line)or('`ifndef' in line)or('`elsif' in line)or('`include' in line)
def scan0(Lines):
ind=0
while ind<len(Lines):
line = Lines[ind]
if '//' in line:
line = line[:line.index('//')]+'\n'
Lines[ind]=line
ind += 1
ind=0
while ind<len(Lines):
line = Lines[ind]
if (len(line)>1)and(line[-2]=='\\'):
line = line[:-2]+Lines[ind+1]
Lines[ind]=line
Lines.pop(ind+1)
else:
ind += 1
ind=0
while ind<len(Lines):
line = Lines[ind]
if (" 'b" in line): line = string.replace(line," 'b","'b")
if (" 'd" in line): line = string.replace(line," 'd","'d")
if (" 'h" in line): line = string.replace(line," 'h","'h")
if ("'b " in line): line = string.replace(line,"'b ","'b")
if ("'d " in line): line = string.replace(line,"'d ","'d")
if ("'h " in line): line = string.replace(line,"'h ","'h")
Lines[ind]=line
ind += 1
state = 'idle'
ind=0
while ind<len(Lines):
line = Lines[ind]
if state=='idle':
if '/*' in line:
Bef = line[:line.index('/*')]
Aft = line[line.index('/*')+2:]
if '*/' in Aft:
Aft2 = Aft[Aft.index('*/')+2:]
line = Bef+Aft2
Lines[ind]=line
else:
Lines[ind]=Bef
state = 'inside'
ind += 1
else:
ind += 1
elif state=='inside':
if '*/' in line:
line = line[line.index('*/')+2:]
Lines[ind]=line
state='idle'
else:
Lines[ind]=''
ind += 1
def scan1(Lines):
state='idle'
queue=[]
indx=0
while indx<len(Lines):
line = Lines[indx]
indx+=1
# print '>>',indx,state,line,
wrds = string.split(line)
if state=='idle':
if ('`define' in line):
Defined[wrds[1]]=string.join(wrds[2:],' ')
Lines2.append(line)
elif ('`ifndef' in line):
if wrds[1] in Defined:
state='ifdef_false'
else:
state='ifdef_true'
elif ('`ifdef' in line):
if wrds[1] in Defined:
state='ifdef_true'
else:
state='ifdef_false'
elif ('`include' in line):
Def2=[]
for Key in Defined:
Def2.append((len(Key),Key,Defined[Key]))
Def2.sort()
Def2.reverse()
wrds1 = use_defines(wrds[1],Def2)
lines = include_file(wrds1)
Lines = Lines[:indx-2]+lines+Lines[indx:]
elif needs_work(line):
print 'error! kind=1',state,indx,line,
else:
Lines2.append(line)
Lines3.append(line)
elif state=='ifdef_true':
if ('`define' in line):
Defined[wrds[1]]=string.join(wrds[2:],' ')
Lines2.append(line)
elif ('`ifdef' in line):
queue = [state]+queue
if wrds[1] in Defined:
state='ifdef_true'
else:
state='ifdef_false'
elif ('`ifndef' in line):
queue = [state]+queue
if wrds[1] in Defined:
state='ifdef_false'
else:
state='ifdef_true'
elif ('`else' in line):
state='wait_endif'
elif ('`elsif' in line):
state='wait_endif'
elif ('`endif' in line):
if queue==[]:
state='idle'
else:
state=queue.pop(0)
elif needs_work(line):
print 'error! kind=2',state,line,
else:
Lines2.append(line)
Lines3.append(line)
elif state=='ifdef_false':
if ('`else' in line):
state='active_endif'
elif ('`elsif' in line):
if wrds[1] in Defined:
state='ifdef_true'
else:
state='ifdef_false'
elif ('`ifdef' in line):
queue = [state]+queue
state='wait_endif'
elif ('`ifndef' in line):
queue = [state]+queue
state='wait_endif'
elif ('`endif' in line):
if queue==[]:
state='idle'
else:
state=queue.pop(0)
elif ('`define' in line):
pass
elif ('`include' in line):
pass
elif needs_work(line):
print 'error! kind=3',state,line,
elif state=='active_endif':
if ('`define' in line):
Defined[wrds[1]]=string.join(wrds[2:],' ')
Lines2.append(line)
elif ('`ifdef' in line):
queue = [state]+queue
if wrds[1] in Defined:
state='ifdef_true'
else:
state='ifdef_false'
elif ('`ifndef' in line):
queue = [state]+queue
if wrds[1] in Defined:
state='ifdef_false'
else:
state='ifdef_true'
elif ('`else' in line):
state='wait_endif'
elif ('`elsif' in line):
state='wait_endif'
elif ('`endif' in line):
if queue==[]:
state='idle'
else:
state=queue.pop(0)
elif ('`include' in line):
Def2=[]
for Key in Defined:
Def2.append((len(Key),Key,Defined[Key]))
Def2.sort()
Def2.reverse()
wrds1 = use_defines(wrds[1],Def2)
lines = include_file(wrds1)
Lines = Lines[:indx-2]+lines+Lines[indx:]
elif needs_work(line):
print 'error! kind=4',state,line,
else:
Lines2.append(line)
Lines3.append(line)
elif state=='wait_endif':
if ('`ifdef' in line):
queue = [state]+queue
state='wait_endif'
elif ('`ifndef' in line):
queue = [state]+queue
state='wait_endif'
elif ('`endif' in line):
if queue==[]:
state='idle'
else:
state=queue.pop(0)
elif ('`else' in line):
pass
elif ('`elsif' in line):
pass
elif ('`define' in line):
pass
elif ('`include' in line):
pass
elif needs_work(line):
print 'error! kind=5',state,line,
def do_the_split(wholelib,dir='tmp'):
modules=[]
state=0
if not os.path.exists(dir):
os.system('mkdir %s'%dir)
for ind,line1 in enumerate(wholelib):
line = fix_stupid_problems(line1)
wrds = string.split(line)
if (state==0):
if (len(wrds)>0)and(wrds[0] in ['module','primitive','interface','package']):
line2 = string.replace(line,';',' ; ')
line2 = string.replace(line2,'(',' ( ')
wrds = string.split(line2)
if len(wrds)==1:
line2 = wholelib[ind+1]
line2 = string.replace(line2,';',' ; ')
line2 = string.replace(line2,'(',' ( ')
wrds = string.split(line2)
Module = wrds[0]
File=open(dir+'/'+Module+'.v','w')
modules = [Module]+modules
File.write(line)
print 'opening ',Module
state=1
elif (state==1):
if (len(wrds)>0)and(has_end(wrds[0])):
File.write(line)
File.close()
state=0
else:
File.write(line)
def has_end(word):
if word in ['endmodule','endprimitive','endinterface','endpackage']:
return 1
x =string.find(word,'//')
if (x>0):
word = word[0:x]
return has_end(word)
x =string.find(word,'/*')
if (x>0):
word = word[0:x]
return has_end(word)
return 0
def fix_stupid_problems(inline):
for pattern in ["'b ","'h ","'d "]:
ind = 1
while (ind>0):
ind = string.find(inline,pattern)
if (ind>=0):
inline = inline[0:ind+2]+inline[ind+3:]
return inline
# // synopsys translate_off
# // synopsys translate_on
def remove_synopsys_on_off(Lines):
Linesx = []
state='on'
for line in Lines:
wrds = string.split(line)
if 'synopsys' in wrds:
ind = wrds.index('synopsys')
if (state=='on'):
if (len(wrds)>ind)and(wrds[ind+1]=='translate_off'):
state='off'
else:
Linesx.append(line)
elif (state=='off'):
ind = wrds.index('synopsys')
if (len(wrds)>ind)and(wrds[ind+1]=='translate_on'):
state='on'
elif (state=='on'):
Linesx.append(line)
return Linesx
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
486fee25f44daefb4a45ef3fdc8399fa53757948 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/vendored_sdks/teams/operations/_chats_tabs_operations.py | 5c7bf927ea10d271873b7c5d09f4c2b248533be7 | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 12,692 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ChatsTabsOperations(object):
"""ChatsTabsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~teams.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_teams_app(
self,
chat_id, # type: str
teams_tab_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum51"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum52"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphTeamsApp"
"""Get teamsApp from chats.
Get teamsApp from chats.
:param chat_id: key: id of chat.
:type chat_id: str
:param teams_tab_id: key: id of teamsTab.
:type teams_tab_id: str
:param select: Select properties to be returned.
:type select: list[str or ~teams.models.Enum51]
:param expand: Expand related entities.
:type expand: list[str or ~teams.models.Enum52]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphTeamsApp, or the result of cls(response)
:rtype: ~teams.models.MicrosoftGraphTeamsApp
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphTeamsApp"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_teams_app.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
'teamsTab-id': self._serialize.url("teams_tab_id", teams_tab_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphTeamsApp', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_teams_app.metadata = {'url': '/chats/{chat-id}/tabs/{teamsTab-id}/teamsApp'} # type: ignore
def get_ref_teams_app(
self,
chat_id, # type: str
teams_tab_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""Get ref of teamsApp from chats.
Get ref of teamsApp from chats.
:param chat_id: key: id of chat.
:type chat_id: str
:param teams_tab_id: key: id of teamsTab.
:type teams_tab_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_ref_teams_app.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
'teamsTab-id': self._serialize.url("teams_tab_id", teams_tab_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ref_teams_app.metadata = {'url': '/chats/{chat-id}/tabs/{teamsTab-id}/teamsApp/$ref'} # type: ignore
def set_ref_teams_app(
self,
chat_id, # type: str
teams_tab_id, # type: str
body, # type: Dict[str, object]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the ref of navigation property teamsApp in chats.
Update the ref of navigation property teamsApp in chats.
:param chat_id: key: id of chat.
:type chat_id: str
:param teams_tab_id: key: id of teamsTab.
:type teams_tab_id: str
:param body: New navigation property ref values.
:type body: dict[str, object]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_ref_teams_app.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
'teamsTab-id': self._serialize.url("teams_tab_id", teams_tab_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, '{object}')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
set_ref_teams_app.metadata = {'url': '/chats/{chat-id}/tabs/{teamsTab-id}/teamsApp/$ref'} # type: ignore
def delete_ref_teams_app(
self,
chat_id, # type: str
teams_tab_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete ref of navigation property teamsApp for chats.
Delete ref of navigation property teamsApp for chats.
:param chat_id: key: id of chat.
:type chat_id: str
:param teams_tab_id: key: id of teamsTab.
:type teams_tab_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_ref_teams_app.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
'teamsTab-id': self._serialize.url("teams_tab_id", teams_tab_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ref_teams_app.metadata = {'url': '/chats/{chat-id}/tabs/{teamsTab-id}/teamsApp/$ref'} # type: ignore
| [
"[email protected]"
] | |
67f5408bdbe2f902601c09601cf989801f06e51d | 80b7f2a10506f70477d8720e229d7530da2eff5d | /uhd_restpy/testplatform/sessions/ixnetwork/topology/dhcp6serversessions_cca5dcfe9b97a6a418479e240dcecfb7.py | c5b265aae20c3442542996d86f4810141e245cd3 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 20,885 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class Dhcp6ServerSessions(Base):
"""DHCPv6 Leases.
The Dhcp6ServerSessions class encapsulates a required dhcp6ServerSessions resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'dhcp6ServerSessions'
_SDM_ATT_MAP = {
'AddressDuidMask': 'addressDuidMask',
'AddressDuidPattern': 'addressDuidPattern',
'AddressesPerIA': 'addressesPerIA',
'Count': 'count',
'CustomRebindTime': 'customRebindTime',
'CustomRenewTime': 'customRenewTime',
'DefaultLeaseTime': 'defaultLeaseTime',
'DescriptiveName': 'descriptiveName',
'EnableAddressMatchDuid': 'enableAddressMatchDuid',
'EnablePrefixMatchDuid': 'enablePrefixMatchDuid',
'IaType': 'iaType',
'Ignore': 'ignore',
'IgnoreMask': 'ignoreMask',
'IgnorePattern': 'ignorePattern',
'IpAddress': 'ipAddress',
'IpAddressIncrement': 'ipAddressIncrement',
'IpAddressPD': 'ipAddressPD',
'IpPrefix': 'ipPrefix',
'IpPrefixIncrement': 'ipPrefixIncrement',
'LeaseTimeIncrement': 'leaseTimeIncrement',
'Nak': 'nak',
'NakMask': 'nakMask',
'NakPattern': 'nakPattern',
'Name': 'name',
'PoolPrefixSize': 'poolPrefixSize',
'PoolSize': 'poolSize',
'PrefixDuidIncrement': 'prefixDuidIncrement',
'PrefixDuidStart': 'prefixDuidStart',
'PrefixLength': 'prefixLength',
'PrefixesPerIA': 'prefixesPerIA',
'UseCustomTimes': 'useCustomTimes',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Dhcp6ServerSessions, self).__init__(parent, list_op)
@property
def AddressDuidMask(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The mask based on which the DUIDs are chosen for address assignment.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AddressDuidMask']))
@property
def AddressDuidPattern(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The pattern based on which the DUIDs are chosen for address assignment.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AddressDuidPattern']))
@property
def AddressesPerIA(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of addresses to be advertised in a single IANA option.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AddressesPerIA']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def CustomRebindTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Time (in seconds) after the client will start rebinding the leases from the server.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CustomRebindTime']))
@property
def CustomRenewTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Time (in seconds) after the client will start renewing the leases from the server.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CustomRenewTime']))
@property
def DefaultLeaseTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Life Time length in seconds that will be assigned to a lease if the requesting DHCP Client does not specify a specific expiration time.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DefaultLeaseTime']))
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableAddressMatchDuid(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, the requests with DUIDs matching the mask and pattern will be assigned addresses from this pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAddressMatchDuid']))
@property
def EnablePrefixMatchDuid(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, the requests with DUIDs matching DUID start and increment will be given a specific prefix from this pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnablePrefixMatchDuid']))
@property
def IaType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Identity Association type supported by IPv6 address pools .
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaType']))
@property
def Ignore(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, the requests with DUIDs matching the mask and pattern will be ignored by the Server.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ignore']))
@property
def IgnoreMask(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The mask based on which the DUIDs of ignored addresses are chosen.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IgnoreMask']))
@property
def IgnorePattern(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The pattern based on which the DUIDs of ignored addresses are chosen.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IgnorePattern']))
@property
def IpAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The IP address of the first lease pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddress']))
@property
def IpAddressIncrement(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The increment value for the lease address within the lease pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressIncrement']))
@property
def IpAddressPD(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The prefix of the first lease pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressPD']))
@property
def IpPrefix(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Subnet Address length used to compute the subnetwork the advertised lease is part of.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpPrefix']))
@property
def IpPrefixIncrement(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The increment value for the lease prefix within the lease pool.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpPrefixIncrement']))
@property
def LeaseTimeIncrement(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Increment step for Lease Time.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LeaseTimeIncrement']))
@property
def Nak(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, the requests with DUIDs matching the mask and pattern will be NAKed by the Server.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Nak']))
@property
def NakMask(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The mask based on which the DUIDs of NAKed addresses are chosen.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NakMask']))
@property
def NakPattern(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The pattern based on which the DUIDs of NAKed addresses are chosen.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NakPattern']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PoolPrefixSize(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The number of leases to be allocated per each server prefix.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PoolPrefixSize']))
@property
def PoolSize(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The number of leases to be allocated per each server address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PoolSize']))
@property
def PrefixDuidIncrement(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The increment used to generate the DUIDs which will be chosen for prefix assignment.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixDuidIncrement']))
@property
def PrefixDuidStart(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The first DUID which will be chosen for prefix assignment.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixDuidStart']))
@property
def PrefixLength(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The subnet address length advertised in DHCP Offer and Reply messages.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixLength']))
@property
def PrefixesPerIA(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of prefixes to be advertised in a single IANA option.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixesPerIA']))
@property
def UseCustomTimes(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): >Use Custom Renew/Rebind Times instead of the ones computed from the valability times of the leases.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseCustomTimes']))
def update(self, Name=None):
# type: (str) -> Dhcp6ServerSessions
"""Updates dhcp6ServerSessions resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> Dhcp6ServerSessions
"""Finds and retrieves dhcp6ServerSessions resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcp6ServerSessions resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcp6ServerSessions resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching dhcp6ServerSessions resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcp6ServerSessions data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcp6ServerSessions resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, AddressDuidMask=None, AddressDuidPattern=None, AddressesPerIA=None, CustomRebindTime=None, CustomRenewTime=None, DefaultLeaseTime=None, EnableAddressMatchDuid=None, EnablePrefixMatchDuid=None, IaType=None, Ignore=None, IgnoreMask=None, IgnorePattern=None, IpAddress=None, IpAddressIncrement=None, IpAddressPD=None, IpPrefix=None, IpPrefixIncrement=None, LeaseTimeIncrement=None, Nak=None, NakMask=None, NakPattern=None, PoolPrefixSize=None, PoolSize=None, PrefixDuidIncrement=None, PrefixDuidStart=None, PrefixLength=None, PrefixesPerIA=None, UseCustomTimes=None):
"""Base class infrastructure that gets a list of dhcp6ServerSessions device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AddressDuidMask (str): optional regex of addressDuidMask
- AddressDuidPattern (str): optional regex of addressDuidPattern
- AddressesPerIA (str): optional regex of addressesPerIA
- CustomRebindTime (str): optional regex of customRebindTime
- CustomRenewTime (str): optional regex of customRenewTime
- DefaultLeaseTime (str): optional regex of defaultLeaseTime
- EnableAddressMatchDuid (str): optional regex of enableAddressMatchDuid
- EnablePrefixMatchDuid (str): optional regex of enablePrefixMatchDuid
- IaType (str): optional regex of iaType
- Ignore (str): optional regex of ignore
- IgnoreMask (str): optional regex of ignoreMask
- IgnorePattern (str): optional regex of ignorePattern
- IpAddress (str): optional regex of ipAddress
- IpAddressIncrement (str): optional regex of ipAddressIncrement
- IpAddressPD (str): optional regex of ipAddressPD
- IpPrefix (str): optional regex of ipPrefix
- IpPrefixIncrement (str): optional regex of ipPrefixIncrement
- LeaseTimeIncrement (str): optional regex of leaseTimeIncrement
- Nak (str): optional regex of nak
- NakMask (str): optional regex of nakMask
- NakPattern (str): optional regex of nakPattern
- PoolPrefixSize (str): optional regex of poolPrefixSize
- PoolSize (str): optional regex of poolSize
- PrefixDuidIncrement (str): optional regex of prefixDuidIncrement
- PrefixDuidStart (str): optional regex of prefixDuidStart
- PrefixLength (str): optional regex of prefixLength
- PrefixesPerIA (str): optional regex of prefixesPerIA
- UseCustomTimes (str): optional regex of useCustomTimes
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
8fcc98ef1e17399e656b2982f4f3df049be9a227 | 4fd7e936dd38213a74f19abd760cc2b5f2c9be3f | /119-guild-incorrectly-importing-flags-from-other-modules/test.py | ad3831b26d04ee89bf89cde77a645ab19b3aed9c | [
"Apache-2.0"
] | permissive | guildai/issue-resolution | 8eae7c74ffd71f018e62d7374ac173671e81c0be | 7fc5f6dac9090c7a7838715e99cef2e8d9867729 | refs/heads/master | 2023-08-04T00:44:29.549711 | 2023-07-31T18:42:46 | 2023-07-31T18:43:44 | 200,896,019 | 0 | 3 | null | 2023-07-03T08:13:20 | 2019-08-06T17:29:37 | Python | UTF-8 | Python | false | false | 231 | py | import argparse
import submod # Unused but triggers the bug. See submod.py
p = argparse.ArgumentParser()
p.add_argument("--foo", default=123)
if __name__ == "__main__":
args = p.parse_args()
print("foo: %s" % args.foo)
| [
"[email protected]"
] | |
0fbb084607b6a8f4c9a5e8d59df82a86c66aefe8 | 5b58a332c6bea0688d196aabedfc8ccc49bdd134 | /experiments/models_angles_10s/train.py | ed7fba7dc84e9805e65f5d67ac01dc274ec22035 | [] | no_license | ver228/classify_strains | 5420c2b3ea8e93b6ba46900c385f52f664f1cbd7 | dc61e7431410e25ab7c2da0acb6d090cc2ebaabb | refs/heads/master | 2021-09-20T08:52:14.505868 | 2018-08-07T12:26:22 | 2018-08-07T12:26:22 | 108,448,619 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 10:43:03 2017
@author: ajaver
"""
import os
import sys
import time
import torch
from torch import nn
#Be sure to use abspath linux does not give the path if one uses __file__
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(_BASEDIR, os.pardir, os.pardir, 'src')
sys.path.append(src_dir)
from classify.trainer import init_generator, Trainer
import models
def main(
model_name = 'resnet18',
dataset = 'SWDB',
data_file = None, #get defaults
is_reduced = True,
sample_size_seconds = 10,
sample_frequency_s = 0.04,
n_batch = 32,
transform_type = 'eigenworms_full',
is_normalized = False,
n_epochs = 200,
):
if sys.platform == 'linux':
log_dir_root = '/work/ajaver/classify_strains/results'
else:
log_dir_root = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/logs/'
#flag to check if cuda is available
is_cuda = torch.cuda.is_available()
#add the parent directory to the log results
pdir = os.path.split(_BASEDIR)[-1]
log_dir_root = os.path.join(log_dir_root, pdir)
params = dict(
is_reduced = is_reduced,
dataset = dataset,
data_file = data_file,
sample_size_seconds = sample_size_seconds,
sample_frequency_s = sample_frequency_s,
n_batch = n_batch,
transform_type = transform_type,
is_normalized = is_normalized,
is_cuda = is_cuda
)
gen_details, train_generator, test_generator = init_generator(**params)
assert model_name in dir(models)
get_model_func = getattr(models, model_name)
model = get_model_func(train_generator)
log_dir = os.path.join(log_dir_root, '{}_{}_{}'.format(model_name, gen_details, time.strftime('%Y%m%d_%H%M%S')))
#show some data for debugging purposes
print(model)
print(test_generator.valid_strains)
print(log_dir)
#maybe i should include the criterion and optimizer as input parameters
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
if is_cuda:
print('This is CUDA!!!!')
torch.backends.cudnn.benchmark = True #useful for arrays of fix dimension
model = model.cuda()
criterion = criterion.cuda()
t = Trainer(model,
optimizer,
criterion,
train_generator,
test_generator,
n_epochs,
log_dir
)
t.fit()
if __name__ == '__main__':
import fire
fire.Fire(main)
| [
"[email protected]"
] | |
df90a8b291201afd2ac6f43b22b4e233d8ae03ba | 185f30795be9a8fec6539fe17753fb909e258e4c | /ljy_16并发编程/ljy_06守护进程.py | 4bc8faf01e00183b143fd3a907f0a7d528374fa2 | [] | no_license | OPBrother/LearningPython | bd375430ce013abd9a4279f60e5f9457e965bdf7 | 9d264acb269a6191f7ec49abba25c98002f4fcd1 | refs/heads/main | 2023-03-31T06:47:43.071370 | 2021-04-12T07:09:16 | 2021-04-12T07:09:16 | 350,307,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | """
守护进程:父进程活着子进程活着,父进程死亡子进程死亡,该子进程就是守护进程
"""
from multiprocessing import Process
import time
def task(name):
print("%s总管正在活着" % name)
time.sleep(3)
print("%s总管正在死亡" % name)
if __name__ == '__main__':
p = Process(target=task, args=("egon", ))
p.daemon = True # 将p设置成守护进程,这句一定要放在start前面,否则报错
p.start()
time.sleep(2)
print("皇帝寿终正寝") | [
"[email protected]"
] | |
e3bf33f81a6bf24febb67c78fecdd9915a355ad3 | b6d475893a3d5a83d17c4219eaa2c154d1f77ec6 | /app/auth/views.py | af6e8155eb39d7642ebc5d16fafcbb90bec1f4ba | [
"MIT"
] | permissive | MungaiKeren/Pitch-It | 6de28bac0ef7392952bfe2e9df6ec40b2a4962a8 | ae0d85ea9437da4aacadc297e9e0a20ae955debf | refs/heads/master | 2020-07-25T10:09:52.369506 | 2019-10-02T06:46:20 | 2019-10-02T06:46:20 | 208,254,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | from flask import render_template,request,redirect, url_for, flash
from . import auth
from ..models import User
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
# from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Pitch-It Login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
#mail_message("We are glad to recieve you from Pitch it","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index")) | [
"[email protected]"
] | |
4c383fb4bc9641f2ae9c5ba17ecc3744790fd8fd | 19c6e23d87d7b304f5674ebcdb0f56e602663446 | /amqpstorm/tests/connection_tests.py | 008fa1300af618af9e16bfff8c5984dfc2deb678 | [
"MIT"
] | permissive | gitter-badger/amqpstorm | 088bf0d697bd545ec5a5a497315acce0b701bb8f | 47023ab652a63487dcbcd89ebf30768217aef113 | refs/heads/master | 2020-12-11T01:50:55.828417 | 2016-07-14T23:11:34 | 2016-07-14T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,807 | py | import logging
import socket
import ssl
import threading
from mock import MagicMock
try:
import unittest2 as unittest
except ImportError:
import unittest
from amqpstorm.io import IO
from amqpstorm import Connection
from amqpstorm.exception import *
from pamqp.specification import Basic as spec_basic
from pamqp import specification as pamqp_spec
from pamqp import frame as pamqp_frame
from amqpstorm.tests.utility import FakeChannel
from amqpstorm.tests.utility import MockLoggingHandler
logging.basicConfig(level=logging.DEBUG)
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.logging_handler = MockLoggingHandler()
logging.root.addHandler(self.logging_handler)
def test_connection_with_statement(self):
with Connection('127.0.0.1', 'guest', 'guest', lazy=True) as con:
self.assertIsInstance(con, Connection)
def test_connection_with_statement_when_failing(self):
try:
with Connection('127.0.0.1', 'guest', 'guest', lazy=True) as con:
con.exceptions.append(AMQPConnectionError('error'))
con.check_for_errors()
except AMQPConnectionError as why:
self.assertIsInstance(why, AMQPConnectionError)
self.assertEqual(self.logging_handler.messages['warning'][0],
'Closing connection due to an unhandled exception: '
'error')
def test_connection_server_is_blocked_default_value(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.is_blocked, False)
def test_connection_server_properties_default_value(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.server_properties, {})
def test_connection_socket_property(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
connection._io.socket = 'FakeSocket'
self.assertEqual(connection.socket, 'FakeSocket')
def test_connection_socket_none_when_closed(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertFalse(connection.socket)
def test_connection_fileno_property(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
connection.set_state(connection.OPENING)
io = IO(connection.parameters, [])
io.socket = MagicMock(name='socket', spec=socket.socket)
connection._io = io
io.socket.fileno.return_value = 5
self.assertEqual(connection.fileno, 5)
def test_connection_fileno_none_when_closed(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertIsNone(connection.fileno)
def test_connection_close_state(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
connection.set_state(Connection.OPEN)
connection.close()
self.assertTrue(connection.is_closed)
def test_connection_open_channel_on_closed_connection(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertRaises(AMQPConnectionError, connection.channel)
def test_connection_basic_read_buffer(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
cancel_ok_frame = spec_basic.CancelOk().marshal()
self.assertEqual(connection._read_buffer(cancel_ok_frame), b'\x00')
def test_connection_handle_read_buffer_none_returns_none(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertIsNone(connection._read_buffer(None))
def test_connection_basic_handle_amqp_frame(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
cancel_ok_frame = spec_basic.CancelOk().marshal()
self.assertEqual(connection._handle_amqp_frame(cancel_ok_frame),
(b'\x00', None, None))
def test_connection_handle_amqp_frame_none_returns_none(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
result = connection._handle_amqp_frame('')
self.assertEqual(result[0], '')
self.assertIsNone(result[1])
self.assertIsNone(result[2])
def test_connection_handle_amqp_frame_error(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
def throw_error(*_):
raise pamqp_spec.AMQPFrameError()
restore_func = pamqp_frame.unmarshal
try:
pamqp_frame.unmarshal = throw_error
result = connection._handle_amqp_frame('error')
self.assertEqual(result[0], 'error')
self.assertIsNone(result[1])
self.assertIsNone(result[2])
finally:
pamqp_frame.unmarshal = restore_func
def test_connection_handle_unicode_error(self):
"""This test covers an unlikely issue triggered by network corruption.
pamqp.decode._maybe_utf8 raises:
UnicodeDecodeError: 'utf8' codec can't
decode byte 0xc5 in position 1: invalid continuation byte
The goal here is not to fix issues caused by network corruption,
but rather to make sure that the exceptions raised when
connections do fail are always predictable.
Fail fast and reliably!
:return:
"""
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
def throw_error(_):
raise UnicodeDecodeError(str(), bytes(), 1, 1, str())
restore_func = pamqp_frame.unmarshal
try:
pamqp_frame.unmarshal = throw_error
result = connection._handle_amqp_frame('error')
self.assertEqual(result[0], 'error')
self.assertIsNone(result[1])
self.assertIsNone(result[2])
finally:
pamqp_frame.unmarshal = restore_func
def test_connection_handle_value_error(self):
"""This test covers an unlikely issue triggered by network corruption.
pamqp.decode._embedded_value raises:
ValueError: Unknown type: b'\x13'
The goal here is not to fix issues caused by network corruption,
but rather to make sure that the exceptions raised when
connections do fail are always predictable.
Fail fast and reliably!
:return:
"""
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
def throw_error(_):
raise ValueError("Unknown type: b'\x13'")
restore_func = pamqp_frame.unmarshal
try:
pamqp_frame.unmarshal = throw_error
result = connection._handle_amqp_frame('error')
self.assertEqual(result[0], 'error')
self.assertIsNone(result[1])
self.assertIsNone(result[2])
finally:
pamqp_frame.unmarshal = restore_func
def test_connection_wait_for_connection(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=5,
lazy=True)
connection.set_state(connection.OPENING)
io = IO(connection.parameters, [])
io.socket = MagicMock(name='socket', spec=socket.socket)
connection._io = io
self.assertFalse(connection.is_open)
def func(conn):
conn.set_state(conn.OPEN)
threading.Timer(function=func, interval=1, args=(connection,)).start()
connection._wait_for_connection_to_open()
self.assertTrue(connection.is_open)
def test_connection_wait_for_connection_raises_on_timeout(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=0.1,
lazy=True)
connection.set_state(connection.OPENING)
io = IO(connection.parameters, [])
io.socket = MagicMock(name='socket', spec=socket.socket)
connection._io = io
self.assertRaises(AMQPConnectionError,
connection._wait_for_connection_to_open)
def test_connection_close_channels(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=1,
lazy=True)
connection._channels[0] = FakeChannel()
connection._channels[1] = FakeChannel()
connection._channels[2] = FakeChannel(FakeChannel.CLOSED)
self.assertTrue(connection._channels[0].is_open)
self.assertTrue(connection._channels[1].is_open)
self.assertTrue(connection._channels[2].is_closed)
connection._close_channels()
self.assertTrue(connection._channels[0].is_closed)
self.assertTrue(connection._channels[1].is_closed)
self.assertTrue(connection._channels[2].is_closed)
def test_connection_closed_on_exception(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=1,
lazy=True)
connection.set_state(connection.OPEN)
connection.exceptions.append(AMQPConnectionError('error'))
self.assertTrue(connection.is_open)
self.assertRaises(AMQPConnectionError, connection.check_for_errors)
self.assertTrue(connection.is_closed)
def test_connection_heartbeat_stopped_on_close(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=1,
lazy=True)
connection.set_state(connection.OPEN)
connection.heartbeat.start(connection.exceptions)
connection.exceptions.append(AMQPConnectionError('error'))
self.assertTrue(connection.heartbeat._running.is_set())
self.assertRaises(AMQPConnectionError, connection.check_for_errors)
self.assertFalse(connection.heartbeat._running.is_set())
class ConnectionParameterTests(unittest.TestCase):
def test_connection_set_hostname(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_username(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_password(self):
connection = Connection('127.0.0.1', 'guest', 'guest', lazy=True)
self.assertEqual(connection.parameters['username'], 'guest')
def test_connection_set_parameters(self):
connection = Connection('127.0.0.1', 'guest', 'guest',
virtual_host='travis',
heartbeat=120,
timeout=180,
ssl=True,
ssl_options={
'ssl_version': ssl.PROTOCOL_TLSv1
},
lazy=True)
self.assertEqual(connection.parameters['virtual_host'], 'travis')
self.assertEqual(connection.parameters['heartbeat'], 120)
self.assertEqual(connection.parameters['timeout'], 180)
self.assertEqual(connection.parameters['ssl'], True)
self.assertEqual(connection.parameters['ssl_options']['ssl_version'],
ssl.PROTOCOL_TLSv1)
def test_connection_invalid_hostname(self):
self.assertRaises(AMQPInvalidArgument, Connection, 1,
'guest', 'guest', lazy=True)
def test_connection_invalid_username(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
2, 'guest', lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
None, 'guest', lazy=True)
def test_connection_invalid_password(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 3, lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', None, lazy=True)
def test_connection_invalid_virtual_host(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', virtual_host=4, lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', virtual_host=None, lazy=True)
def test_connection_invalid_port(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', port='', lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', port=None, lazy=True)
def test_connection_invalid_heartbeat(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', heartbeat='5', lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', heartbeat=None, lazy=True)
def test_connection_invalid_timeout(self):
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', timeout='6', lazy=True)
self.assertRaises(AMQPInvalidArgument, Connection, '127.0.0.1',
'guest', 'guest', timeout=None, lazy=True)
def test_connection_invalid_timeout_on_channel(self):
connection = Connection('127.0.0.1', 'guest', 'guest', timeout=1,
lazy=True)
self.assertRaisesRegexp(AMQPInvalidArgument,
'rpc_timeout should be an integer',
connection.channel, None)
| [
"[email protected]"
] | |
7a7f0c99c26ddd39486ccc9e7cac0ca8934dce27 | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Facebook/Actions/General/Follows/ReadFollows.py | 91b6517f195a642d85b17b6b03768d222d7f5d93 | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,265 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# ReadFollows
# Retrieves one or more follow actions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadFollows(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadFollows Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadFollows, self).__init__(temboo_session, '/Library/Facebook/Actions/General/Follows/ReadFollows')
def new_input_set(self):
return ReadFollowsInputSet()
def _make_result_set(self, result, path):
return ReadFollowsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadFollowsChoreographyExecution(session, exec_id, path)
class ReadFollowsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadFollows
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(ReadFollowsInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((optional, string) The id of an action to retrieve. If an id is not provided, a list of all follow actions will be returned.)
"""
super(ReadFollowsInputSet, self)._set_input('ActionID', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
super(ReadFollowsInputSet, self)._set_input('Fields', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
super(ReadFollowsInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
super(ReadFollowsInputSet, self)._set_input('Offset', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the user's profile. Defaults to "me" indicating the authenticated user.)
"""
super(ReadFollowsInputSet, self)._set_input('ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(ReadFollowsInputSet, self)._set_input('ResponseFormat', value)
class ReadFollowsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadFollows Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class ReadFollowsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadFollowsResultSet(response, path)
| [
"[email protected]"
] | |
3d592a84a2929f37cce6ce6273455084be52287f | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Cocoa/PyObjCTest/test_nsdebug.py | a28db7783a27caf49e4e9f84f4f5836d944f21a2 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 1,110 | py | import Foundation
from PyObjCTools.TestSupport import TestCase
class TestNSDebug(TestCase):
def testFunctions(self):
self.assertResultIsBOOL(Foundation.NSIsFreedObject)
Foundation.NSRecordAllocationEvent
Foundation.NSFrameAddress
Foundation.NSReturnAddress
Foundation.NSCountFrames
Foundation.NSRecordAllocationEvent
def testConstants(self):
self.assertEqual(Foundation.NSObjectAutoreleasedEvent, 3)
self.assertEqual(Foundation.NSObjectExtraRefIncrementedEvent, 4)
self.assertEqual(Foundation.NSObjectExtraRefDecrementedEvent, 5)
self.assertEqual(Foundation.NSObjectInternalRefIncrementedEvent, 6)
self.assertEqual(Foundation.NSObjectInternalRefDecrementedEvent, 7)
self.assertIsInstance(Foundation.NSDebugEnabled, bool)
self.assertIsInstance(Foundation.NSZombieEnabled, bool)
self.assertIsInstance(Foundation.NSDeallocateZombies, bool)
self.assertIsInstance(Foundation.NSHangOnUncaughtException, bool)
self.assertIsInstance(Foundation.NSKeepAllocationStatistics, bool)
| [
"[email protected]"
] | |
4d0e2b1e5bf9f1ce7f7948c69c1941894264bfbf | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/dhcp/lbldef.py | 41ea0cb28d90949a7936280f0ea202625be425e1 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,448 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LblDef(Mo):
"""
Definition of the label.
"""
meta = ClassMeta("cobra.model.dhcp.LblDef")
meta.moClassName = "dhcpLblDef"
meta.rnFormat = "dhcplbldef-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "DHCP Relay Label"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.dhcp.OptionDef")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.dhcp.RsLblDefToRelayP")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.dhcp.RsLblDefToRelayP", "rsLblDefToRelayP"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.dhcp.OptionDef", "opt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.IfConn")
meta.parentClasses.add("cobra.model.l3ext.LIfPDef")
meta.parentClasses.add("cobra.model.fv.BDDef")
meta.superClasses.add("cobra.model.pol.ConsElem")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Lbl")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.dhcp.ALbl")
meta.rnPrefixes = [
('dhcplbldef-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14122, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 6138, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "owner", "owner", 1122, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "infra"
prop._addConstant("infra", "infra", 0)
prop._addConstant("tenant", "tenant", 1)
meta.props.add("owner", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tag", "tag", 4992, PropCategory.REGULAR)
prop.label = "Tag"
prop.isConfig = True
prop.isAdmin = True
prop._addConstant("alice-blue", "alice-blue", 15792383)
prop._addConstant("antique-white", "antique-white", 16444375)
prop._addConstant("aqua", "aqua", 65535)
prop._addConstant("aquamarine", "aquamarine", 8388564)
prop._addConstant("azure", "azure", 15794175)
prop._addConstant("beige", "beige", 16119260)
prop._addConstant("bisque", "bisque", 16770244)
prop._addConstant("black", "black", 0)
prop._addConstant("blanched-almond", "blanched-almond", 16772045)
prop._addConstant("blue", "blue", 255)
prop._addConstant("blue-violet", "blueviolet", 9055202)
prop._addConstant("brown", "brown", 10824234)
prop._addConstant("burlywood", "burlywood", 14596231)
prop._addConstant("cadet-blue", "cadet-blue", 6266528)
prop._addConstant("chartreuse", "chartreuse", 8388352)
prop._addConstant("chocolate", "chocolate", 13789470)
prop._addConstant("coral", "coral", 16744272)
prop._addConstant("cornflower-blue", "cornflower-blue", 6591981)
prop._addConstant("cornsilk", "cornsilk", 16775388)
prop._addConstant("crimson", "crimson", 14423100)
prop._addConstant("cyan", "cyan", 65535)
prop._addConstant("dark-blue", "dark-blue", 139)
prop._addConstant("dark-cyan", "dark-cyan", 35723)
prop._addConstant("dark-goldenrod", "dark-goldenrod", 12092939)
prop._addConstant("dark-gray", "dark-gray", 11119017)
prop._addConstant("dark-green", "dark-green", 25600)
prop._addConstant("dark-khaki", "dark-khaki", 12433259)
prop._addConstant("dark-magenta", "dark-magenta", 9109643)
prop._addConstant("dark-olive-green", "dark-olive-green", 5597999)
prop._addConstant("dark-orange", "dark-orange", 16747520)
prop._addConstant("dark-orchid", "dark-orchid", 10040012)
prop._addConstant("dark-red", "dark-red", 9109504)
prop._addConstant("dark-salmon", "dark-salmon", 15308410)
prop._addConstant("dark-sea-green", "dark-sea-green", 9419919)
prop._addConstant("dark-slate-blue", "dark-slate-blue", 4734347)
prop._addConstant("dark-slate-gray", "dark-slate-gray", 3100495)
prop._addConstant("dark-turquoise", "dark-turquoise", 52945)
prop._addConstant("dark-violet", "dark-violet", 9699539)
prop._addConstant("deep-pink", "deep-pink", 16716947)
prop._addConstant("deep-sky-blue", "deep-sky-blue", 49151)
prop._addConstant("dim-gray", "dim-gray", 6908265)
prop._addConstant("dodger-blue", "dodger-blue", 2003199)
prop._addConstant("fire-brick", "fire-brick", 11674146)
prop._addConstant("floral-white", "floral-white", 16775920)
prop._addConstant("forest-green", "forest-green", 2263842)
prop._addConstant("fuchsia", "fuchsia", 16711935)
prop._addConstant("gainsboro", "gainsboro", 14474460)
prop._addConstant("ghost-white", "ghost-white", 16316671)
prop._addConstant("gold", "gold", 16766720)
prop._addConstant("goldenrod", "goldenrod", 14329120)
prop._addConstant("gray", "gray", 8421504)
prop._addConstant("green", "green", 32768)
prop._addConstant("green-yellow", "green-yellow", 11403055)
prop._addConstant("honeydew", "honeydew", 15794160)
prop._addConstant("hot-pink", "hot-pink", 16738740)
prop._addConstant("indian-red", "indian-red", 13458524)
prop._addConstant("indigo", "indigo", 4915330)
prop._addConstant("ivory", "ivory", 16777200)
prop._addConstant("khaki", "khaki", 15787660)
prop._addConstant("lavender", "lavender", 15132410)
prop._addConstant("lavender-blush", "lavender-blush", 16773365)
prop._addConstant("lawn-green", "lawn-green", 8190976)
prop._addConstant("lemon-chiffon", "lemon-chiffon", 16775885)
prop._addConstant("light-blue", "light-blue", 11393254)
prop._addConstant("light-coral", "light-coral", 15761536)
prop._addConstant("light-cyan", "light-cyan", 14745599)
prop._addConstant("light-goldenrod-yellow", "light-goldenrod-yellow", 16448210)
prop._addConstant("light-gray", "light-gray", 13882323)
prop._addConstant("light-green", "light-green", 9498256)
prop._addConstant("light-pink", "light-pink", 16758465)
prop._addConstant("light-salmon", "light-salmon", 16752762)
prop._addConstant("light-sea-green", "light-sea-green", 2142890)
prop._addConstant("light-sky-blue", "light-sky-blue", 8900346)
prop._addConstant("light-slate-gray", "light-slate-gray", 7833753)
prop._addConstant("light-steel-blue", "light-steel-blue", 11584734)
prop._addConstant("light-yellow", "light-yellow", 16777184)
prop._addConstant("lime", "lime", 65280)
prop._addConstant("lime-green", "lime-green", 3329330)
prop._addConstant("linen", "linen", 16445670)
prop._addConstant("magenta", "magenta", 16711935)
prop._addConstant("maroon", "maroon", 8388608)
prop._addConstant("medium-aquamarine", "medium-aquamarine", 6737322)
prop._addConstant("medium-blue", "medium-blue", 205)
prop._addConstant("medium-orchid", "medium-orchid", 12211667)
prop._addConstant("medium-purple", "medium-purple", 9662683)
prop._addConstant("medium-sea-green", "medium-sea-green", 3978097)
prop._addConstant("medium-slate-blue", "medium-slate-blue", 8087790)
prop._addConstant("medium-spring-green", "medium-spring-green", 64154)
prop._addConstant("medium-turquoise", "medium-turquoise", 4772300)
prop._addConstant("medium-violet-red", "medium-violet-red", 13047173)
prop._addConstant("midnight-blue", "midnight-blue", 1644912)
prop._addConstant("mint-cream", "mint-cream", 16121850)
prop._addConstant("misty-rose", "misty-rose", 16770273)
prop._addConstant("moccasin", "moccasin", 16770229)
prop._addConstant("navajo-white", "navajo-white", 16768685)
prop._addConstant("navy", "navy", 128)
prop._addConstant("old-lace", "old-lace", 16643558)
prop._addConstant("olive", "olive", 8421376)
prop._addConstant("olive-drab", "olive-drab", 7048739)
prop._addConstant("orange", "orange", 16753920)
prop._addConstant("orange-red", "orange-red", 16729344)
prop._addConstant("orchid", "orchid", 14315734)
prop._addConstant("pale-goldenrod", "pale-goldenrod", 15657130)
prop._addConstant("pale-green", "pale-green", 10025880)
prop._addConstant("pale-turquoise", "pale-turquoise", 11529966)
prop._addConstant("pale-violet-red", "pale-violet-red", 14381203)
prop._addConstant("papaya-whip", "papaya-whip", 16773077)
prop._addConstant("peachpuff", "peachpuff", 16767673)
prop._addConstant("peru", "peru", 13468991)
prop._addConstant("pink", "pink", 16761035)
prop._addConstant("plum", "plum", 14524637)
prop._addConstant("powder-blue", "powder-blue", 11591910)
prop._addConstant("purple", "purple", 8388736)
prop._addConstant("red", "red", 16711680)
prop._addConstant("rosy-brown", "rosy-brown", 12357519)
prop._addConstant("royal-blue", "royal-blue", 4286945)
prop._addConstant("saddle-brown", "saddle-brown", 9127187)
prop._addConstant("salmon", "salmon", 16416882)
prop._addConstant("sandy-brown", "sandy-brown", 16032864)
prop._addConstant("sea-green", "sea-green", 3050327)
prop._addConstant("seashell", "seashell", 16774638)
prop._addConstant("sienna", "sienna", 10506797)
prop._addConstant("silver", "silver", 12632256)
prop._addConstant("sky-blue", "sky-blue", 8900331)
prop._addConstant("slate-blue", "slate-blue", 6970061)
prop._addConstant("slate-gray", "slate-gray", 7372944)
prop._addConstant("snow", "snow", 16775930)
prop._addConstant("spring-green", "spring-green", 65407)
prop._addConstant("steel-blue", "steel-blue", 4620980)
prop._addConstant("tan", "tan", 13808780)
prop._addConstant("teal", "teal", 32896)
prop._addConstant("thistle", "thistle", 14204888)
prop._addConstant("tomato", "tomato", 16737095)
prop._addConstant("turquoise", "turquoise", 4251856)
prop._addConstant("violet", "violet", 15631086)
prop._addConstant("wheat", "wheat", 16113331)
prop._addConstant("white", "white", 16777215)
prop._addConstant("white-smoke", "white-smoke", 16119285)
prop._addConstant("yellow", "yellow", 16776960)
prop._addConstant("yellow-green", "yellow-green", 10145074)
meta.props.add("tag", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToHcloudIgw", "Tenant to IGW", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToHcloudSecurityGroup", "Tenant to Security Group", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVzCPIf", "Tenant to vzCPIf", "cobra.model.vz.CPIf"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVzFilter", "From fvTenant to vzFilter", "cobra.model.vz.Filter"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVnsAbsGraph", "From fvTenant to vnsAbsGraph", "cobra.model.vns.AbsGraph"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToCloudLB", "From fvTenant to cloudLB", "cobra.model.cloud.LB"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToCloudZone", "From fvTenant to cloudZone", "cobra.model.cloud.Zone"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudCtxProfile", "Tenant to cloudCtxProfile", "cobra.model.cloud.CtxProfile"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToVzBrCP", "Tenant to vzBrCP", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudCsr", "Tenant to hcloudCsr", "cobra.model.hcloud.Csr"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudExtEPg", "fv:Tenant to cloud:ExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudRegion", "From fvTenant to cloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudRegion", "Tenant to hcloudRegion", "cobra.model.hcloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToFvCtx", "fvTenant to fvCtx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudCtx", "Tenant to Hcloud context", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHCloudEndPoint", "Tenant to hcloudEndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudApp", "Tenant to Application profile", "cobra.model.cloud.App"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudEPg", "Tenant to cloud EPg", "cobra.model.cloud.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("LIfCtxToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("LIfCtxToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpgPolToLocale", "Fabric Nodes", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
82fe38e752875c45d8e077fbd4e2bd5dd55b4f04 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_middleweights.py | 56d926d6a9b798ce9d565d446c1f67605078abf6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
from xai.brain.wordbase.nouns._middleweight import _MIDDLEWEIGHT
#calss header
class _MIDDLEWEIGHTS(_MIDDLEWEIGHT, ):
def __init__(self,):
_MIDDLEWEIGHT.__init__(self)
self.name = "MIDDLEWEIGHTS"
self.specie = 'nouns'
self.basic = "middleweight"
self.jsondata = {}
| [
"[email protected]"
] | |
2fba743f49ec0a3a286e945e1e913510556c9323 | d7f2007f2f9d87b314f59027d591226152a8aa8b | /pcaps/dnsreduce.py | 3451f328624af2ebbde081bac998cc094e0876a0 | [] | no_license | jwde/comp116-jdestories | ad824d44c54cabfe4546113dfac113338e329a1f | 8b0756f13e79c73b8da9e345788017dbda6a6a70 | refs/heads/master | 2021-01-18T13:20:36.410966 | 2015-12-15T22:26:49 | 2015-12-15T22:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | import fileinput
import re
lines = []
for line in fileinput.input():
lines.append(line)
for i in range(len(lines)):
if not i == 0:
match = re.search('.* (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*', lines[i])
if match:
ip = match.group(1)
print "\t".join([ip, lines[i - 1][18:]])
| [
"[email protected]"
] | |
250f0548ad501d762e33ffc702fc874c85f97b85 | fdec477002fb0c5f013faf369d2a1e782172a1d6 | /COVID19/Vaccine/views.py | 4ac499c5b50e3f84443b7b97adaadf63287c01d4 | [] | no_license | aimiranarzhigitova/API_projects | 19fb416479e5a76dab760f38621e643e2db609cb | 8256cc1bc8dc939453c61a39215e89dbd96fecb1 | refs/heads/master | 2023-05-16T08:52:51.209458 | 2021-06-06T09:44:53 | 2021-06-06T09:44:53 | 374,322,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,644 | py | from collections import OrderedDict
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from .serializers import CategorySerializer, BaseVaccineSerializer, CustomerSerializer, \
ReviewCreateSerializers, MadeInSerializers, StatisticsSerializer, VoiceSerializer
from .models import Category, Vaccine, Customer, Review, MadeIn, Voice, Statistics
class VaccinePagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 10
def get_paginated_response(self, data):
return Response(OrderedDict([
('objects_count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('items', data)
]))
class MadeInListApiView(ListAPIView):
serializer_class = MadeInSerializers
queryset = MadeIn.objects.all()
class CategoryListApiView(ListCreateAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class CategoryApiView(RetrieveUpdateDestroyAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class VaccineListApiView(ListCreateAPIView):
serializer_class = BaseVaccineSerializer
pagination_class = VaccinePagination
queryset = Vaccine.objects.all()
filter_backends = [SearchFilter]
search_fields = ['price', 'title', 'ip']
class VaccineDetailApiView(RetrieveUpdateDestroyAPIView):
serializer_class = BaseVaccineSerializer
queryset = Vaccine.objects.all()
class CustomersListApiView(ListAPIView):
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
class ReviewCreateView(ListAPIView):
queryset = Review.objects.all()
serializer_class = ReviewCreateSerializers
def post(self, request):
review = ReviewCreateSerializers(data=request.data)
if review.is_valid:
review.save()
class StatisticsListAPiView(ListAPIView):
queryset = Statistics.objects.all()
serializer_class = StatisticsSerializer
class SaveAudioListApiView(ListAPIView):
queryset = Review.objects.all()
serializer_class = ReviewCreateSerializers
def post(self, request):
audio_file = request.FILES.get('recorded_audio')
myObj = Voice
myObj.voice_record = audio_file
myObj.save()
return JsonResponse({
'success': True,
})
| [
"[email protected]"
] | |
9e7b09a6a4c2d1b2618300fb8c999147a2987994 | 700f9f9e319ebd26d2557d64ea3827808dfad2f5 | /tests/fixtures/test_references_json/content_16_expected.py | 44ae2665689e10ebe81b405d34d6a9fb9c33332b | [
"MIT"
] | permissive | elifesciences/elife-tools | 1b44e660e916a82ef8ff64dd5a6ee5506e517359 | bc16e7dd5d6245077e39f8561b99c9acd510ddf7 | refs/heads/develop | 2023-03-06T08:37:47.424282 | 2023-02-20T20:40:49 | 2023-02-20T20:40:49 | 30,274,058 | 13 | 11 | MIT | 2023-02-20T20:40:50 | 2015-02-04T01:14:41 | Python | UTF-8 | Python | false | false | 2,083 | py | from collections import OrderedDict
expected = [
OrderedDict(
[
("type", "unknown"),
("id", u"bib11"),
("date", u"2006"),
(
"authors",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Cutler DM"),
("index", u"Cutler, DM"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Deaton AS"),
("index", u"Deaton, AS"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Lleras-Muney A"),
("index", u"Lleras-Muney, A"),
]
),
),
]
),
],
),
("title", u"The determinants of mortality (No. w11963)"),
("details", u"Cambridge, National Bureau of Economic Research"),
]
)
]
| [
"[email protected]"
] | |
9beb538b5dcf7efc3b834e8a9a8cf283d7bb8f56 | f3d7aad9fae3275f232cdfd6417f1c9c8a610cc1 | /titlesFotos.py | 33d0eeb575494eab64b8877cc711de14be6bc006 | [] | no_license | LKingJ23/Python_Flask_Headlines | 7b4a8c0f675e7e92cab29e99737ef487239a276e | 886ca997846dcc82ed31033468e35d9958b1061b | refs/heads/master | 2020-03-17T12:21:07.545431 | 2018-05-15T23:56:58 | 2018-05-15T23:56:58 | 133,584,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | from lxml import etree
import urllib2
ns={"Atom" : "http://www.w3.org/2005/Atom"}
parser=etree.XMLParser()
tree=etree.parse(urllib2.urlopen('https://api.flickr.com/services/feeds/photos_public.gne?tags=sevilla'),parser)
for node in tree.xpath('//Atom:entry/Atom:title', namespaces=ns) :
print node.text
| [
"[email protected]"
] | |
bece8694dfc647bf047112a1a9968ab83d6c43a9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03860/s734303213.py | 5c5cec8efcb1a3649bc607fcd3b9df520b3850e0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | inp=list(input().split())
index=inp[1][0]
print("A"+index+"C") | [
"[email protected]"
] | |
87ff49ec6b62039abc4b66959c36345ef52853ab | 19bc4d44dc7303e23a6949b1bc7b98b65bcf80e9 | /python/Hypothesis_Testing_with_Python/Experimental_Design/Sample_Size_Determination_with_Simulation/introduction.py | c3366c4c4a904b84707b4f9e2198df81eb5a06b7 | [] | no_license | henry1034/Challenge-Project-of-CodeCademy | c66190ff3a318e22f263fcf78344632773065c24 | 61ebe84696cec120393acca62b4fce4bdea0fb30 | refs/heads/master | 2023-07-04T01:04:16.978374 | 2021-07-29T17:27:56 | 2021-07-29T17:27:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | import pandas as pd
from scipy.stats import chi2_contingency
data = pd.read_csv("ab_data.csv")
print(data.head())
# calculate contingency table here
ab_contingency = pd.crosstab(data.Web_Version, data.Purchased)
print(ab_contingency)
# run your chi square test here
pval = chi2_contingency(ab_contingency)[1]
print(pval)
| [
"[email protected]"
] | |
d4f4358369b0e45816e7faef51d89207e706197f | 6ed86bcacca9d065251171a0b53498d630a3b340 | /src/edrn/labcas/ui/views/_metadata.py | d2541bd601b050ff06a6233d1794ae1919ebb079 | [] | no_license | EDRN/edrn.labcas.ui | 595f56b60a72632d1b816d2414a6887f0099e70a | 54a5c947d1a93d73c13b33517e3f1fd0acaec3b5 | refs/heads/master | 2021-01-24T07:12:40.324545 | 2020-03-12T19:49:45 | 2020-03-12T19:49:45 | 38,981,139 | 1 | 0 | null | 2018-02-22T20:18:12 | 2015-07-12T22:55:12 | Python | UTF-8 | Python | false | false | 4,148 | py | # encoding: utf-8
from edrn.labcas.ui import PACKAGE_NAME
from edrn.labcas.ui.interfaces import IBackend
from edrn.labcas.ui.utils import (
LabCASWorkflow, re_python_rfc3986_URI_reference, LabCASCollection, createSchema, addIdentifiersForStringFields,
ID_NUMBER_HUNTER
)
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config, view_defaults
from zope.component import getUtility
import deform, os, os.path, logging, uuid
# Logging
_logger = logging.getLogger(__name__)
# Metadata fields for NIST pipelines that generate dataset IDs
_nistMetadataFields = frozenset((u'LabNumber', u'ProtocolName', u'SampleId'))
@view_defaults(renderer=PACKAGE_NAME + ':templates/metadata.pt')
class MetadataView(object):
def __init__(self, request):
self.request = request
def _getDatasetDir(self, metadata, dir, collectionName):
u'''Create and return the path to the dataset directory.'''
if u'DatasetName' not in metadata:
raise ValueError(u'DatasetName is a required metadata')
datasetName = metadata[u'DatasetName'].replace(u' ', u'_')
collectionName = collectionName.replace(u' ', u'_')
datasetDir = os.path.join(dir, collectionName, datasetName)
if not os.path.isdir(datasetDir):
os.makedirs(datasetDir, 0775)
return datasetDir
@view_config(route_name='metadata', permission='upload')
def __call__(self):
backend = getUtility(IBackend)
workflowID = self.request.matchdict['workflowID']
wfInfo = backend.getWorkflowMgr().getWorkflowById(workflowID)
workflow = LabCASWorkflow(
wfInfo.get('id', u'unknown'),
wfInfo.get('name', u'unknown'),
wfInfo.get('conditions', []),
wfInfo.get('tasks', [])
)
form = deform.Form(createSchema(workflow, self.request), buttons=('submit',))
if 'submit' in self.request.params:
try:
metadataAppstruct = form.validate(self.request.POST.items())
# CA-1382 ugly kludge, CA-1540 reformat
if _nistMetadataFields <= frozenset(metadataAppstruct.keys()):
ln = metadataAppstruct[u'LabNumber']
pn = metadataAppstruct[u'ProtocolName']
si = metadataAppstruct[u'SampleId']
metadataAppstruct[u'DatasetName'] = metadataAppstruct[u'DatasetId'] = u'{}_{}_{}'.format(ln, pn, si)
elif u'DatasetName' in metadataAppstruct.keys():
metadataAppstruct[u'DatasetId'] = metadataAppstruct[u'DatasetName'].replace(u' ', u'_')
else:
metadataAppstruct[u'DatasetId'] = unicode(uuid.uuid4())
metadataAppstruct[u'DatasetName'] = metadataAppstruct[u'DatasetId']
addIdentifiersForStringFields(metadataAppstruct)
collectionName = workflow.collectionName
if not collectionName:
collectionName = metadataAppstruct[u'CollectionName']
datasetDir = self._getDatasetDir(metadataAppstruct, backend.getStagingDirectory(), collectionName)
if not os.path.isdir(datasetDir):
os.makedirs(datasetDir)
self.request.session['metadata'] = metadataAppstruct
self.request.session['metadataForm'] = form.render(metadataAppstruct, readonly=True)
self.request.session['datasetDir'] = datasetDir
self.request.session['workflow'] = workflow
return HTTPFound(self.request.url + u'/accept')
except deform.ValidationFailure as ex:
return {
u'message': u"Some required metadata don't make sense or are missing.",
u'form': ex.render(),
u'widgetResources': form.get_widget_resources(),
u'pageTitle': u'Upload Metadata'
}
return {
u'form': form.render(),
u'widgetResources': form.get_widget_resources(),
u'pageTitle': u'Upload Metadata'
}
| [
"[email protected]"
] | |
41d226f467c29b6749b5ff10392a729eccb01326 | 015efe8cf8e2740d76a8d0b378f1e75de182103a | /test/unitTestSuite.py | c3c6bfc137471219de330689fae11f42a4c88238 | [
"MIT"
] | permissive | Samakwa/PyGeodesy | d6dbe7f825ee1858cd58e677aae37fd6c60570ee | 4a5b6ac584c12bafc243d08dfc18d872707126d0 | refs/heads/master | 2020-03-28T04:11:51.548133 | 2018-09-05T17:35:01 | 2018-09-05T17:35:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py |
# -*- coding: utf-8 -*-
# Module to run all PyGeodesy tests as python setup.py test
from glob import glob
from os.path import abspath, dirname, join
import sys
import unittest
_test_dir = dirname(abspath(__file__))
# extend sys.path to include the ../.. directory
if _test_dir not in sys.path: # Python 3+ ModuleNotFoundError
sys.path.insert(0, _test_dir)
from base import runner
__all__ = ('TestSuite',)
__version__ = '18.08.21'
class TestSuite(unittest.TestCase):
'''Combine all test modules into a test suite/case
and run each test module as a separate test.
'''
_runs = 0 # pseudo global
def _run(self, test):
TestSuite._runs += 1 # pseudo global
x, _ = runner(join(_test_dir, test + '.py'))
self.assertEqual(x, 0)
def test_Bases(self):
self._run('testBases')
def test_Classes(self):
self._run('testClasses')
def test_Datum(self):
self._run('testDatum')
def test_Dms(self):
self._run('testDms')
def test_Elevations(self):
self._run('testElevations')
def test_Ellipsoidal(self):
self._run('testEllipsoidal')
def test_Fmath(self):
self._run('testFmath')
def test_Geohash(self):
self._run('testGeohash')
def test_GreatCircle(self):
self._run('testGreatCircle')
def test_LatLon(self):
self._run('testLatLon')
def test_Lcc(self):
self._run('testLcc')
def test_Mgrs(self):
self._run('testMgrs')
def test_Modules(self):
self._run('testModules')
def test_NavlabExamples(self):
self._run('testNavlabExamples')
def test_Osgr(self):
self._run('testOsgr')
def test_Points(self):
self._run('testPoints')
def test_Routes(self):
self._run('testRoutes')
def test_Simplify(self):
self._run('testSimplify')
def test_Spherical(self):
self._run('testSpherical')
def test_Utils(self):
self._run('testUtils')
def test_Utm(self):
self._run('testUtm')
def test_UtmTMcoords(self):
self._run('testUtmTMcoords')
def test_Vectorial(self):
self._run('testVectorial')
def test_WebMercator(self):
self._run('testWebMercator')
def test_Ztotal(self):
# final test to make sure all tests were run
t = len(glob(join(_test_dir, 'test[A-Z]*.py')))
self.assertEqual(TestSuite._runs, t)
# t = sum(1 for t in dir(TestSuite) if t.startswith('test_'))
# self.assertEqual(TestSuite._runs, t)
if __name__ == '__main__':
unittest.main(argv=sys.argv) # catchbreak=None, failfast=None, verbosity=2
| [
"[email protected]"
] | |
bb4072ebe6c3e4a99fb1a57b8d0f722c97f38521 | 3e276ce46afcdaf365fd62b45ceba19327535f14 | /src/libs/github/request.py | 183377038e90b8e73a10a612875176a72e106be5 | [
"MIT"
] | permissive | 17Y9E81/QQ-GitHub-Bot | 1ca28ccc4b1a2bbbbb24419271389599dcd8ceb4 | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | refs/heads/master | 2023-07-13T12:26:33.201661 | 2021-08-25T09:17:20 | 2021-08-25T09:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-09 17:34:53
@LastEditors : yanyongyu
@LastEditTime : 2021-06-15 22:14:45
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
import base64
import urllib.parse
from typing import Any, Optional
import httpx
class Requester:
def __init__(self, token_or_client_id: Optional[str],
client_secret: Optional[str], base_url: str, timeout: int,
user_agent: str, per_page: int, verify: bool):
if client_secret:
b64 = base64.b64encode(
f"{token_or_client_id}:{client_secret}".encode()).decode()
self._authorization: str = f"Basic {b64}"
elif token_or_client_id:
self._authorization: str = f"token {token_or_client_id}"
else:
self._authorization: str = ""
self._base_url = base_url
self._timeout = timeout
self._user_agent = user_agent
self._per_page = per_page
self._verify = verify
self._client: Optional[httpx.AsyncClient] = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
@property
def client(self) -> httpx.AsyncClient:
if not self._client:
headers = {
"User-Agent": self._user_agent,
"Authorization": self._authorization,
"Accept": "application/vnd.github.v3+json"
}
self._client = httpx.AsyncClient(headers=headers,
verify=self._verify,
timeout=self._timeout)
return self._client
async def request_json(self,
method: str,
url: str,
params: Optional[dict] = None,
headers: Optional[dict] = None,
json: Any = None):
return await self.request(method, url, params, headers, None, json)
async def request(self,
method: str,
url: str,
params: Optional[dict] = None,
headers: Optional[dict] = None,
data: Optional[dict] = None,
json: Any = None):
url = urllib.parse.urljoin(self._base_url, url)
response = await self.client.request(method,
url,
params=params,
headers=headers,
data=data,
json=json)
response.raise_for_status()
return response
async def close(self):
if self._client:
await self._client.aclose()
self._client = None
| [
"[email protected]"
] | |
3def651405ae307c54ab931f32c357ee7db71021 | fae559bba4c7b7818628c808e909acfe0856662a | /java-debug-class | d99d620d93a5bca08b66895621e8697000081f69 | [] | no_license | pixzels69/cmd-tools | 01811536b04603810a9547e827673d9569bcc655 | 91e9f3647379b30899d94daf5812f279014f7878 | refs/heads/master | 2020-05-01T12:11:04.313317 | 2017-09-30T07:20:10 | 2017-09-30T07:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,575 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os as mod_os
import os.path as mod_path
import sys as mod_sys
import utils.optparser as mod_optparser
opt_parser = mod_optparser.OptParser()
delete = opt_parser.has_param( short_name = 'd', long_name = 'delete', comment = 'Delete' )
no_compress = opt_parser.has_param( short_name = 'n', long_name = 'no-compress', comment = 'Compress class' )
path = None
src_paths = [ 'src', 'src/main/java' ]
def compress( java_code ):
""" SO that with git diff, the diff will be smaller """
result = ''
for line in java_code.split( '\n' ):
line = line.strip()
if line.startswith( '//' ):
pass
elif line.startswith( '/*' ) and line.endswith( '*/' ):
pass
elif line.startswith( 'public ' ):
result += '\n' + line.strip()
else:
result += line.strip()
return result
for src_path in src_paths:
if mod_path.exists( src_path ):
path = '%s/temp' % src_path
try:
mod_os.makedirs( path )
except Exception, e:
pass
if not path:
print 'Path not found'
mod_sys.exit( 1 )
java_file_name = 'DBG.java'
file_name = '%s/%s' % ( path, java_file_name )
if delete:
try:
mod_os.remove( file_name )
print 'Removed {0}'.format( file_name )
except:
print 'Error removing {0}'.format( file_name )
try:
mod_os.rmdir( path )
print 'Removed {0}'.format( path )
except:
print 'Error removing {0}'.format( path )
else:
debug_class = """package temp;
import java.beans.BeanInfo;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.HashMap;
import javax.swing.JOptionPane;
/** Temporary, do not commit! */
public class DBG {
public static final boolean TRUE = true;
private static final String JAVA_HOME = System.getProperty( "java.home" );
private static final String FILE_SEPARATOR = System.getProperty( "file.separator" );
private static final String CLASS_PATH = System.getProperty( "java.class.path" );
static class TimerData {
private double time;
private String name;
private TimerData( double time, String name ) {
this.time = time;
this.name = name;
}
double getTime() {
return time;
}
String getName() {
return name;
}
}
private static ThreadLocal<ArrayList<TimerData>> THREAD_LOCAL = new ThreadLocal<ArrayList<TimerData>>() {
protected synchronized ArrayList<TimerData> initialValue() {
return new ArrayList<TimerData>();
}
};
public static final void alert( Object... objects ) {
JOptionPane.showMessageDialog( null, joinObjects( "", objects ) );
}
public static final void printWithLocation( Object... objects ) {
StackTraceElement stackTraceElement = Thread.currentThread().getStackTrace()[ 2 ];
print( stackTraceElement.getClassName() + "." + stackTraceElement.getMethodName() + "(), line:" + stackTraceElement.getLineNumber() );
String result = joinObjects( "", objects );
if( result != null && result.trim().length() > 0 ) {
printLine( "TEMPDEBUG> ", objects );
}
}
public static final boolean ask( Object... objects ) {
return JOptionPane.YES_OPTION == JOptionPane.showConfirmDialog( null, joinObjects( "", objects ), "?", JOptionPane.YES_NO_OPTION );
}
public static final void print( Object... objects ) {
printLine( "TEMPDEBUG>", objects );
}
public static final void printBinaryRepresentation( byte[] bytes ) {
print( getBinaryRepresentation( bytes ) );
}
public static final String getBinaryRepresentation( byte[] bytes ) {
if( bytes == null ) {
return "null";
}
StringBuilder result = new StringBuilder();
for( byte b : bytes ) {
String binary = Integer.toBinaryString( ( 256 + (int) b ) % 256 );
for( int i = 0; i < 8 - binary.length(); i++ ) {
result.append( "0" );
}
result.append( binary );
result.append( " " );
}
return result.toString();
}
public static final void todo( Object... objects ) {}
public static final void comment( Object... objects ) {}
public static final <T extends Object> T get( T object ) {
return object;
}
private static final void printLine( String prefix, Object[] objects ) {
String result = joinObjects( prefix, objects );
System.out.println( result );
}
private static String joinObjects( String prefix, Object[] objects ) {
if( objects == null || objects.length == 0 )
return "";
StringBuilder result = new StringBuilder( prefix );
for( Object object : objects ) {
result.append( objectToString( object ) );
}
String string = result.toString();
if( prefix != null && prefix.length() > 0 ) {
return string.replace( "\\n", "\\n ".substring( 0, prefix.length() + 1 ) );
}
return string;
}
public static final StackTraceElement getCurrentStackTraceElement() {
return Thread.currentThread().getStackTrace()[ 2 ];
}
public static final void printStackTrace() {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
for( int i = 2; i < stackTrace.length; i++ ) {
StackTraceElement stackTraceElement = stackTrace[ i ];
print( " at " + stackTraceElement.getClassName() + "." + stackTraceElement.getMethodName() + "(" + stackTraceElement.getLineNumber() + ")" );
}
}
private static String objectToString( Object object ) {
if( object == null ) {
return "null";
} else if( object.getClass().isArray() ) {
StringBuilder result = new StringBuilder();
result.append( "[" );
for( int i = 0 ; i < Array.getLength( object ); i++ ) {
result.append( objectToString( Array.get( object, i ) ) );
result.append( ", " );
}
result.append( "]" );
return result.toString();
} else if( object instanceof Throwable ) {
Throwable throwable = (Throwable) object;
ByteArrayOutputStream out = new ByteArrayOutputStream();
throwable.printStackTrace( new PrintStream( out ) );
try {
out.close();
} catch( Exception ignore ) {}
return new String( out.toByteArray() );
}
return String.valueOf( object );
}
public static final String propertiesToString( Object object ) {
if( object == null ) {
return null;
}
HashMap<String, Object> result = new HashMap<String, Object>();
try {
BeanInfo beanInfo = Introspector.getBeanInfo( object.getClass() );
PropertyDescriptor[] propertyDescriptors = beanInfo.getPropertyDescriptors();
for( PropertyDescriptor propertyDescriptor : propertyDescriptors ) {
String name = propertyDescriptor.getName();
java.lang.reflect.Method readMethod = propertyDescriptor.getReadMethod();
try {
Object value = readMethod.invoke( object, null );
result.put( name, objectToString( value ) );
} catch( Exception ignore ) {}
}
}
catch( Exception e ) {
e.printStackTrace();
}
return result.toString();
}
public static final String getCommandLine( Class<?> classWithMainMethod, String... additionalArguments ) {
String javaCommand = "java";
if( FILE_SEPARATOR.equals( "\\\\" ) ) {
// Probably windows:
javaCommand += ".exe";
}
String arguments = " ";
if( additionalArguments != null ) {
for( String arg : additionalArguments ) {
if( arg != null ) {
arguments += arg + " ";
}
}
}
String commandLine = JAVA_HOME + FILE_SEPARATOR + "bin" + FILE_SEPARATOR + javaCommand + CLASS_PATH + " " + classWithMainMethod.getName() + " " + arguments;
return commandLine;
}
public static final String saveTime( Object... name ) {
ArrayList<TimerData> times = THREAD_LOCAL.get();
times.add( new TimerData( System.nanoTime() / 1000000., joinObjects( "", name ) ) );
return getTimerTimes( 1 );
}
public static final void resetTimer() {
ArrayList<TimerData> times = THREAD_LOCAL.get();
times.clear();
}
/** Reset and start saving times again */
public static final void resetTimer( Object... name ) {
resetTimer();
saveTime( name );
}
public static final String getTimerTimes() {
return getTimerTimes( 1000 );
}
public static final String getTimerTimes( int n ) {
StringBuilder result = new StringBuilder();
ArrayList<TimerData> times = THREAD_LOCAL.get();
int startIndex = Math.max( 0, times.size() - n );
if( times.size() == 0 ) {
return "No timer data";
}
double previous = -1;
double start = times.get( 0 ).getTime();
for( int i = startIndex; i < times.size(); i ++ ) {
TimerData timerData = times.get( i );
if( i > 0 ) {
previous = times.get( i - 1 ).getTime();
} else {
previous = timerData.getTime();
}
String fromPrevious = String.format( "%10.4f", timerData.getTime() - previous );
String fromStart = String.format( "%10.4f", timerData.getTime() - start );
result.append( fromPrevious ).append( "ms" ).append( fromStart ).append( "ms" ).append( " - " ).append( timerData.getName() ).append( '\\n' );
}
return result.toString();
}
public static final void printTimer() {
print( getTimerTimes() );
}
public static final void printTimer( int n ) {
print( getTimerTimes( n ) );
}
public static final void sleep( long milliseconds ) {
try {
Thread.sleep( milliseconds );
} catch( Throwable ignore ) {}
}
}"""
if no_compress:
code = debug_class
else:
code = compress( debug_class )
f = open( '%s/%s' % ( path, java_file_name ), 'w' )
f.write( code )
f.close()
print 'Written temp class: %s' % file_name
| [
"[email protected]"
] | ||
2759d8c95ba6f43470bfd97c99a7dbf69b9fdb76 | 077de1b3c5b1e5531e96f999be95a63c02a0208a | /yabgp/config.py | 6db8b9fa05bb91bc46dd9943570b1090f96d741a | [
"Apache-2.0"
] | permissive | unixian97/yabgp | 5171f3dfa9070fbf91bd2a34fad6d4f32b0b9534 | 1b6752376a43f3c2958ead0afbf3f33ec311ddbd | refs/heads/master | 2021-01-24T18:12:31.133442 | 2017-02-10T08:10:14 | 2017-02-10T08:10:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,436 | py | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" basic config """
import logging
import sys
import os
from oslo_config import cfg
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.BoolOpt('standalone', default=True, help='The BGP Agent running mode'),
cfg.StrOpt('pid-file', default=None, help='pid file name')
])
msg_process_opts = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ['HOME'], 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk'),
cfg.StrOpt('format',
default='json',
choices=['json', 'list'],
help='The output format of bgp messagees.')
]
CONF.register_opts(msg_process_opts, group='message')
bgp_config_opts = [
cfg.IntOpt('peer_start_interval',
default=10,
help='The interval to start each BGP peer'),
cfg.BoolOpt('four_bytes_as',
default=True,
help='If support 4bytes AS'),
cfg.BoolOpt('route_refresh',
default=True,
help='If support sending and receiving route refresh message'),
cfg.BoolOpt('cisco_route_refresh',
default=True,
help='If support sending and receiving cisco route refresh message'),
cfg.BoolOpt('enhanced_route_refresh',
default=True,
help='If support enhanced route refresh'),
cfg.StrOpt('add_path',
choices=['ipv4_send', 'ipv4_receive', 'ipv4_both'],
help='BGP additional path feature and supported address family'),
cfg.BoolOpt('graceful_restart',
default=True,
help='if support graceful restart'),
cfg.BoolOpt('cisco_multi_session',
default=True,
help='if support cisco multi session'),
cfg.DictOpt('running_config',
default={},
help='The running configuration for BGP'),
cfg.StrOpt('config_file',
help='BGP peers configuration file')
]
CONF.register_opts(bgp_config_opts, group='bgp')
bgp_peer_conf_cli_opts = [
cfg.IntOpt('remote_as',
help='The remote BGP peer AS number'),
cfg.IntOpt('local_as',
help='The Local BGP AS number'),
cfg.IPOpt('remote_addr',
help='The remote address of the peer'),
cfg.IPOpt('local_addr',
default='0.0.0.0',
help='The local address of the BGP'),
cfg.StrOpt('md5',
help='The MD5 string use to auth',
secret=True),
cfg.BoolOpt('rib',
default=False,
help='Whether maintain BGP rib table'),
cfg.StrOpt('tag',
choices=['SRC', 'DST', 'BOTH', 'MON'],
help='The agent role tag'
),
cfg.ListOpt('afi_safi',
default=['ipv4'],
help='The Global config for address family and sub address family')
]
CONF.register_cli_opts(bgp_peer_conf_cli_opts, group='bgp')
LOG = logging.getLogger(__name__)
def get_bgp_config():
"""
Get BGP running config
:return:
"""
# check bgp_conf_file
if CONF.bgp.config_file:
LOG.info('Try to load BGP configuration from %s', CONF.bgp.config_file)
LOG.error('Failed to load BGP configuration')
# TODO parse xml config file to get multi bgp config
# will be supported in future
sys.exit()
else:
# check bgp configuration from CLI input
LOG.info('Try to load BGP configuration from CLI input')
if CONF.bgp.local_as and CONF.bgp.remote_as and CONF.bgp.local_addr and CONF.bgp.remote_addr:
CONF.bgp.running_config[CONF.bgp.remote_addr] = {
'remote_as': CONF.bgp.remote_as,
'remote_addr': CONF.bgp.remote_addr,
'local_as': CONF.bgp.local_as,
'local_addr': CONF.bgp.local_addr,
'md5': CONF.bgp.md5,
'afi_safi': CONF.bgp.afi_safi,
'capability': {
'local': {
'four_bytes_as': CONF.bgp.four_bytes_as,
'route_refresh': CONF.bgp.route_refresh,
'cisco_route_refresh': CONF.bgp.cisco_route_refresh,
'enhanced_route_refresh': CONF.bgp.enhanced_route_refresh,
'graceful_restart': CONF.bgp.graceful_restart,
'cisco_multi_session': CONF.bgp.cisco_multi_session,
'add_path': CONF.bgp.add_path},
'remote': {}
},
'tag': CONF.bgp.tag
}
LOG.info('Get BGP running configuration for peer %s', CONF.bgp.remote_addr)
for item in CONF.bgp.running_config[CONF.bgp.remote_addr]:
if item == 'capability':
LOG.info('capability local:')
for capa in CONF.bgp.running_config[CONF.bgp.remote_addr][item]['local']:
LOG.info('-- %s: %s' % (
capa,
CONF.bgp.running_config[CONF.bgp.remote_addr][item]['local'][capa]
))
continue
LOG.info("%s = %s", item, CONF.bgp.running_config[CONF.bgp.remote_addr][item])
return
else:
LOG.error('Please provide enough parameters!')
sys.exit()
| [
"[email protected]"
] | |
9debf401bbd7759e2274873f21dbb4f2e291d155 | 892a20e473b51538a1297842c05e3dddc13d55d7 | /indigo_pl/toc.py | 5c61df032672fce31ffa1e95f6bbdf97b5ac11ae | [] | no_license | epforgpl/pl-indigo | 1eaa5662ed287610bc80bec8c3b363a036ea6de7 | 2722f65c27572c935b838979defcd1b282499419 | refs/heads/master | 2021-06-27T17:06:10.208890 | 2019-05-02T15:32:31 | 2019-05-02T15:32:31 | 143,410,269 | 1 | 1 | null | 2019-05-02T15:32:33 | 2018-08-03T09:52:39 | Python | UTF-8 | Python | false | false | 860 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from indigo.analysis.toc.base import TOCBuilderBase
from indigo.plugins import plugins
@plugins.register('toc')
class TOCBuilderPL(TOCBuilderBase):
locale = ('pl', 'pol', None)
toc_elements = ["article", "chapter", "conclusions", "coverpage", "division", "paragraph", "preamble", "preface", "section", "subdivision"]
toc_non_unique_components = ['chapter', 'subdivision', 'paragraph']
titles = {
'article': lambda t: 'Art. %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'chapter': lambda t: 'Rozdział %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'division': lambda t: 'Dział %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'paragraph': lambda t: t.num,
'section': lambda t: '§ %s' % t.num,
}
| [
"[email protected]"
] | |
7cbadac299d6d1aa1aa7c1bf6d8c12eef42f1ec9 | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /dnce/synth8.py | 09101dc78c48d9e56e0944a1335bcecf7fe268ec | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,399 | py | #An experiment with synthetic data to test whether DNCE works.
#For the data-dependent noise model we use additive gaussian noise
#The data is just samples from a zero mean, unit precision univariate
#gaussian. We initialize the model with the wrong precision and see
#how close we can come to recovering the correct precision, and which
#noise precisions are the best.
#Imports
from matplotlib import pyplot as plt
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
from pylearn2.models.mnd import DiagonalMND
from pylearn2.models.mnd import kl_divergence
from pylearn2.distributions.mnd import MND
from pylearn2.distributions.mnd import AdditiveDiagonalMND
import numpy as np
from pylearn2.utils import sharedX
from theano import function
from pylearn2.costs.ebm_estimation import NCE
from galatea.dnce.dnce import DNCE
import theano.tensor as T
#====Options controlling the experiment=========
#the dimension of the data
dim = 100
#number of training examples
m = 20
#number of noise examples per training example
noise_per_clean = 30
#the parameters of the data distribution
true_mu = 1.
true_beta = 1.
#for each of the noise components, we try
#num_beta different values of beta, spaced
#uniformly in log space from 10^min_exp
#to 10^max_exp
num_beta = 5
min_exp = -.5
max_exp = 0.1
#number of trials to run
trials = 3
#Generate the values of beta to consider
idxs = np.arange(num_beta)
pos = idxs / float(num_beta-1)
scaled_shifted = pos * (max_exp-min_exp) + min_exp
betas = 10 ** scaled_shifted
kls = np.zeros((trials,num_beta))
ml_kls = np.zeros((trials,))
for trial in xrange(trials):
#generate the data
data_distribution = MND( sigma = np.identity(dim) / true_beta,
mu = np.zeros((dim,)), seed = 17 * (trial+1) )
true = DiagonalMND( nvis = dim, init_beta = true_beta, init_mu = 0.,
min_beta = .1, max_beta = 10.)
X = sharedX(function([],data_distribution.random_design_matrix(m))())
Xv = X.get_value()
mu = Xv.mean(axis=0)
print 'maximum likelihood mu: ',mu
diff = Xv - mu
var = np.square(diff).mean(axis=0)
mlbeta = 1./var
print 'maximum likelihood beta: ',mlbeta
ml_model = DiagonalMND( nvis = dim, init_mu = mu, init_beta = mlbeta,
min_beta = 0.0,
max_beta = 1e6)
ml_kl = kl_divergence( true, ml_model)
ml_kl = function([],ml_kl)()
assert ml_kl >= 0.0
ml_kls[trial] = ml_kl
print 'maximum likelihood kl divergence:',ml_kl
best_mse = None
#Try each noise beta
for idx1 in xrange(num_beta):
beta = betas[idx1]
print 'Running experiment for ',beta
#Allocate a fresh model
model = DiagonalMND(
nvis = dim,
init_mu = 0.,
init_beta = .1,
min_beta = .001,
max_beta = 1e30)
#Make the noise distribution
noise_distribution = AdditiveDiagonalMND(
init_beta = beta,
nvis = dim
)
#generate the noise samples
noise_func = function([], noise_distribution.random_design_matrix(T.zeros_like(X)))
Y = []
for i in xrange(noise_per_clean):
Y.append(sharedX(noise_func()))
#Get the objective function
nce = NCE( DiagonalMND( nvis = dim,
init_beta = beta, init_mu = 0.,
min_beta = beta,
max_beta = beta),-1)
J = nce(model,X,T.concatenate(Y,axis=0))
#Add DNCE
#Make the noise distribution
noise_distribution = AdditiveDiagonalMND(
init_beta = 100.,
nvis = dim
)
#generate the noise samples
noise_func = function([], noise_distribution.random_design_matrix(X))
Y = []
for i in xrange(noise_per_clean):
Y.append(sharedX(noise_func()))
#Get the objective function
dnce = DNCE(noise_distribution)
J = J + dnce(model,X,Y)
#Minimize the objective function with batch gradient descent
minimizer = BatchGradientDescent( objective = J,
params = model.get_params(),
param_constrainers = [ model.censor_updates ])
print '\tinit obj:',minimizer.obj()
#minimizer.verbose = True
minimizer.minimize()
print '\tfinal obj:',minimizer.obj()
recovered_beta = model.beta.get_value()
recovered_mu = model.mu.get_value()
print '\trecovered beta:',recovered_beta
print '\trecovered mu:',recovered_mu
kl = kl_divergence(true, model)
kl = function([],kl)()
assert kl >= 0.0
print '\tkl was ',kl
kls[trial,idx1] = kl
plt.hold(True)
plt.plot(betas, kls.mean(axis=0),'b')
plt.plot(betas, kls.mean(axis=0)+kls.std(axis=0),'b--')
plt.plot(betas, kls.mean(axis=0)-kls.std(axis=0),'b--')
plt.plot(betas, ml_kls.mean() *np.ones((num_beta,)),'g')
plt.plot(betas, (ml_kls.mean()+ml_kls.std()) *np.ones((num_beta,)),'g--')
plt.plot(betas, (ml_kls.mean()-ml_kls.std()) *np.ones((num_beta,)),'g--')
plt.ylabel('KL divergence')
plt.xlabel('Noise precision')
ax = plt.gca()
ax.set_xscale('log')
plt.show()
| [
"[email protected]"
] | |
2aecf525c7041dcb9c7c2cf97f75b7d1334847e7 | e2ae5c6d1d3ff9c512d526b1b4d7d7b64d50e87d | /py/leetcode/405.py | f3f4193e0aac1cae5a84217d9e3aab33fa3a9ee4 | [] | no_license | wfeng1991/learnpy | 59ed66d0abc2947c2f73c0bfe3901ef45ba5eb56 | e5b018493bbd12edcdcd0434f35d9c358106d391 | refs/heads/master | 2021-01-23T07:35:08.376547 | 2018-09-28T02:16:31 | 2018-09-28T02:16:31 | 86,430,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if num==0:
return '0'
d = {'0000':'0',
'0001':'1',
'0010':'2',
'0011':'3',
'0100':'4',
'0101':'5',
'0110':'6',
'0111':'7',
'1000':'8',
'1001':'9',
'1010':'a',
'1011':'b',
'1100':'c',
'1101':'d',
'1110':'e',
'1111':'f'}
i=0
r=''
m=1
t=''
while i<32:
t=str((num>>i) & m)+t
i+=1
if i%4==0:
r+=d[t]
t=''
r = r[::-1]
n0=0
for c in r:
if c=='0':
n0+=1
else:
break
return r[n0:]
def toHex1(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return '0'
HEX = '0123456789abcdef'
cnt = 0
output = ''
while num != 0 and cnt < 8:
output = HEX[num & 0xF] + output
num = num >> 4
cnt += 1
return output
print(Solution().toHex(0)) | [
"[email protected]"
] | |
1496d3188e68081c95d6988cf1f24525fc62d41d | 2fc6766f36a9f8c0abc536d7c582102315bce1ba | /services/common/msg_service.py | e95e8e47df679ced6c81046bc1978d23e4e00521 | [
"MIT"
] | permissive | freedream520/loonblog | 0cb153f28c14cced89738e297ee909494ba057b6 | 63d1f06d04047f220f550de914e542f535bb61a3 | refs/heads/master | 2021-06-22T13:53:24.492717 | 2017-08-25T01:53:33 | 2017-08-25T01:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | import os
import multiprocessing
from multiprocessing import Process
from django.core.mail import send_mail,EmailMultiAlternatives
from services.base_service import BaseService
from services.common.auto_log_service import auto_log
import logging
logger = logging.getLogger('default')
class MsgService(BaseService):
"""
消息服务
"""
def __init__(self):
pass
@staticmethod
@auto_log
def send_email_by_process(subject, content, mail_to_list):
"""
发送邮件
:param subject:
:param content:
:param mail_to_list:收件人
:return:
"""
# logger.info('同步发送')
# a = send_mail(subject, content, 'LOONAPP<[email protected]>', mail_to_list)
# logger.info(a)
# logger.info('后台发送')
logger.info('发送邮件:{}-{}-{}'.format(subject, content, mail_to_list))
p = multiprocessing.Process(target=send_mail, args=(subject, content, 'LOONAPP<[email protected]>', mail_to_list))
p.start()
return True, ''
@staticmethod
@auto_log
def send_multi_email_by_process(subject, content, mail_to_list):
logger.info('发送html邮件:{}-{}-{}'.format(subject, content, mail_to_list))
msg = EmailMultiAlternatives(subject, content,from_email='LOONAPP<[email protected]>',to=mail_to_list)
msg.content_subtype = "html"
p = multiprocessing.Process(target=msg.send, args=())
p.start()
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
MsgService().send_email_by_process('test', 'testt',['[email protected]'])
MsgService().send_multi_email_by_process('test', '<a href="http://www.baidu.com">百度</a>',['[email protected]'])
| [
"[email protected]"
] | |
d728ab2d2395985d3d16ea123321c8da086c8be3 | e9744e750f02674235bb2748bf6b2f88b2b6015e | /python/ql/test/query-tests/analysis/suppression/test.py | 17c495ff1a44ed622ff66cbb2aadb5b62b51767e | [
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"MIT"
] | permissive | lecoursen/codeql | 88b9887cfa3021b19aa3fa881ec2fc0160dc8ce7 | 0f710b1981031a00f8f223effe57a7da5e66b727 | refs/heads/main | 2023-04-02T21:08:39.295521 | 2021-03-29T17:18:45 | 2021-03-29T17:18:45 | 352,731,731 | 12 | 0 | MIT | 2021-03-29T17:40:05 | 2021-03-29T17:40:05 | null | UTF-8 | Python | false | false | 1,536 | py |
# Formatting tests:
"" # lgtm
"" # lgtm[py/line-too-long]
"" # lgtm[py/line-too-long, py/non-callable-called]
"" # lgtm[@tag:security]
"" # lgtm[@tag:security,py/line-too-long]
"" # lgtm[@expires:2017-06-11]
"" # lgtm[py/non-callable-called] because I know better than lgtm
"" # lgtm: blah blah
"" # lgtm blah blah #falsepositive
"" # lgtm blah blah -- falsepositive
"" #lgtm [py/non-callable-called]
"" # lgtm[]
"" # lgtmfoo
"" #lgtm
"" # lgtm
"" # lgtm [py/line-too-long]
"" # lgtm lgtm
#lgtm -- Ignore this -- No line or scope.
#On real code:
def foo(): #lgtm [func]
# lgtm -- Blank line (ignore for now, maybe scope wide in future).
"docstring" # lgtm on docstring
return { #lgtm [py/duplicate-key-in-dict]
"a": 1,
"a": 2
}
class C: # lgtm class
def meth(self): # lgtm method
pass
"" #noqa
"" # noqa
"The following should be ignored"
"" # flake8: noqa
"" # noqa: F401
"" # noqa -- Some extra detail.
"" #Ignore
#Suppression for multiple tools
#LGTM-1929
class frozenbidict(BidictBase): # noqa: E501; (line too long) pylint: disable=invalid-name; lgtm [py/missing-equals]
pass
"" # noqa: E501; (line too long) pylint: disable=invalid-name; lgtm
"" # random nonsense lgtm [py/missing-equals] and then some more commentary...
# Case insensitive comments
"" # LGTM
"" # LGTM[py/line-too-long]
#Avoid some erroneous matches
"" # foolgtm[py/missing-equals]
"" # foolgtm
"" # lgtm[py/line-too-long] and lgtm[py/non-callable-called]
"" # lgtm[py/line-too-long]; lgtm
| [
"[email protected]"
] | |
d9c24f7f6e438efd40050a757067aa092a6f3a23 | c083825cabec4920e3e24ea79d907112b1b3497d | /bin/sshtool.py | 876c0c2e650d46eeeb54cfbaa7509a8c1a1ade80 | [] | no_license | fsxchen/ssh-tool | bf767187c8c8354a39dc1f6e211fe96999cafec8 | b5e65b4f071ff7494d8c75a7885d8acc70701c54 | refs/heads/master | 2021-01-18T22:47:36.032425 | 2016-11-23T09:40:27 | 2016-11-23T09:40:27 | 62,549,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | #!/usr/bin/env python
#coding:utf-8
import os
import sys
import pwd
import getpass
from paramiko import SSHClient
from paramiko import AutoAddPolicy, SSHException
client = SSHClient()
# client.load_system_host_keys()
LOCAL_USER_NAME = pwd.getpwuid(os.getuid()).pw_name
def sync_public_key(host, port=22, username=None, password=None):
try:
client = SSHClient()
client.connect(hostname=host, username=username, password=password)
except SSHException, e:
client.close()
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(hostname=host, username=username, password=password)
sftp_client = client.open_sftp()
id_rsa_pub = "/home/%s/.ssh/id_rsa.pub" % LOCAL_USER_NAME
if username == "root":
remote_rsa_pub = "/root/.ssh/%s.pub" % (LOCAL_USER_NAME)
else:
remote_rsa_pub = "/home/%s/.ssh/%s.pub" % (username, LOCAL_USER_NAME)
print remote_rsa_pub
try:
sftp_client.put(id_rsa_pub , remote_rsa_pub)
except Exception, e:
"""
if the remote host did have .ssh dirctory
"""
print e
remote_authorized_keys = os.path.join(os.path.dirname(remote_rsa_pub), "authorized_keys")
remote_cmd = "cat %s >> %s && echo OK" % (remote_rsa_pub, remote_authorized_keys)
stdin, stdout, stderr = client.exec_command(remote_cmd)
# pirnt stdin
else:
print("OK!")
def main():
import sys
username, ip = None, None
if len(sys.argv) < 2:
print("usage: %s <ipaddress>" % sys.argv[0])
sys.exit(-1)
if "@" in sys.argv[1]:
username, ip = sys.argv[1].split("@")
else:
ip = sys.argv[1]
if not username:
username = raw_input("Input username:")
pwd = getpass.getpass("password:")
sync_public_key(ip, 22, username, pwd)
if __name__ == '__main__':
main()
| [
"--global"
] | --global |
2c8a7ffc46f660df02634a1f90d9b9bf6f612e88 | d8a1e25c1af97abc651e1fc7883adb201c85eac2 | /income/urls.py | ac6abd25555e12b9f704c72ad1ea0d459fb5822c | [] | no_license | taeheechoi/python-incomeexpense-api | 710709fa29418f038e9dd68f453eec3d43e646a6 | c3f95d7c02ab897f6b7ccb0fdca184c4fb877aa3 | refs/heads/main | 2023-07-13T21:03:43.876212 | 2021-08-16T01:37:02 | 2021-08-16T01:37:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.IncomeListAPIView.as_view(), name='incomes'),
path('<int:id>', views.IncomeDetailAPIView.as_view(), name='income'),
]
| [
"[email protected]"
] | |
8e51c1b3480b0e04785035065d04d12fb381a05b | 2a922e742c5a914b8eea504f992b2f253944cb4f | /Importing-data-in-python-part-2_course6/importing-data-from-the-internet_lesson-1/Turning_a_webpage_into_data_using_BeautifulSoup_getting_the_hyperlinks.py | 86a757203ef257a2004b34601ef84f4981427a5d | [] | no_license | anujaraj10/DataCampPythonCourses | f2bc7a0d4062c056f920c45fbd454227874aca1f | 51e74bf15703dfeae49ab1c64c5e680819a425cc | refs/heads/master | 2021-08-16T22:04:53.310531 | 2017-11-20T11:11:22 | 2017-11-20T11:11:22 | 108,404,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | #In this exercise, you'll figure out how to extract the URLs of the hyperlinks from the BDFL's webpage. In the process, you'll become close #friends with the soup method find_all().
#Instructions
#Use the method find_all() to find all hyperlinks in soup, remembering that hyperlinks are defined by the HTML tag <a>; store the result in the #variable a_tags.
#The variable a_tags is a results set: your job now is to enumerate over it, using a for loop and to print the actual URLs of the hyperlinks; to #do this, for every element link in a_tags, you want to print() link.get('href').
# Import packages
import requests
from bs4 import BeautifulSoup
# Specify url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extracts the response as html: html_doc
html_doc = r.text
# create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Print the title of Guido's webpage
print(soup.title)
# Find all 'a' tags (which define hyperlinks): a_tags
a_tags = soup.find_all('a')
# Print the URLs to the shell
for link in a_tags:
print(link.get('href'))
| [
"[email protected]"
] | |
a7aaec1ba8d079c0d38ada0c435bd9504e86795b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02394/s756995345.py | 31e1e4013e6d13e2664d67f309eb1480666dffa1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | w,h,x,y,r=map(int,input().split())
print("Yes" if x-r>=0 and x+r<=w and y-r>=0 and y+r<=h else "No") | [
"[email protected]"
] | |
98ea4e85d3a130b7d8ed60081050f61defefe8fe | 34ef83114e02b173bd2d55eb53ad399e738a8e3c | /django/search2/sample_app/sample_app/settings.py | 76e5f88c03cc4cee9ab9e147c3eecc5b148713bc | [] | no_license | vavilon/Python3 | e976a18eb301e4953696d1e3f4730ed890da015a | 8c79729747ce51d60ad685e6a2e58292954ed7eb | refs/heads/master | 2023-01-09T13:44:37.408601 | 2018-01-25T22:41:14 | 2018-01-25T22:41:14 | 100,892,055 | 0 | 1 | null | 2022-12-26T20:29:27 | 2017-08-20T22:23:06 | Python | UTF-8 | Python | false | false | 3,220 | py | """
Django settings for sample_app project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
lib_path = os.path.abspath(os.path.join(BASE_DIR, '../'))
sys.path.append(lib_path)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*&z9_n^drew!pd)znnyfvhp9#uk)i&)di=7^8buvjktc243*q0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'actors',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sample_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sample_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
a50bdabdce5a97a9861b18e6086594080a3a8d8e | 03abf1d207d8e2d2f9387617dcf7cd49663cf41d | /tests/test_array.py | 5100f412a996f6570749db525fd3c809235b2093 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mgeier/sfs-python | 8cd1bd3d7fd2737de53fdf78d3cbfd1d0b89297f | c89f53d08a1f631e41dfe39f6cafe57c8ca48055 | refs/heads/master | 2021-07-13T18:02:01.732602 | 2019-09-04T12:45:40 | 2019-09-04T12:45:40 | 228,433,867 | 0 | 0 | MIT | 2019-12-16T16:58:08 | 2019-12-16T16:58:07 | null | UTF-8 | Python | false | false | 2,107 | py | import numpy as np
from numpy.testing import assert_array_equal
import pytest
import sfs
def vectortypes(*coeffs):
return [
list(coeffs),
tuple(coeffs),
np.array(coeffs),
np.array(coeffs).reshape(1, -1),
np.array(coeffs).reshape(-1, 1),
]
def vector_id(vector):
if isinstance(vector, np.ndarray):
return 'array, shape=' + repr(vector.shape)
return type(vector).__name__
@pytest.mark.parametrize('N, spacing, result', [
(2, 1, sfs.array.SecondarySourceDistribution(
x=[[0, -0.5, 0], [0, 0.5, 0]],
n=[[1, 0, 0], [1, 0, 0]],
a=[1, 1],
)),
(3, 1, sfs.array.SecondarySourceDistribution(
x=[[0, -1, 0], [0, 0, 0], [0, 1, 0]],
n=[[1, 0, 0], [1, 0, 0], [1, 0, 0]],
a=[1, 1, 1],
)),
(3, 0.5, sfs.array.SecondarySourceDistribution(
x=[[0, -0.5, 0], [0, 0, 0], [0, 0.5, 0]],
n=[[1, 0, 0], [1, 0, 0], [1, 0, 0]],
a=[0.5, 0.5, 0.5],
)),
])
def test_linear_with_defaults(N, spacing, result):
a = sfs.array.linear(N, spacing)
assert a.x.dtype == np.float64
assert a.n.dtype == np.float64
assert a.a.dtype == np.float64
assert_array_equal(a.x, result.x)
assert_array_equal(a.n, result.n)
assert_array_equal(a.a, result.a)
def test_linear_with_named_arguments():
a = sfs.array.linear(N=2, spacing=0.5)
assert_array_equal(a.x, [[0, -0.25, 0], [0, 0.25, 0]])
assert_array_equal(a.n, [[1, 0, 0], [1, 0, 0]])
assert_array_equal(a.a, [0.5, 0.5])
@pytest.mark.parametrize('center', vectortypes(-1, 0.5, 2), ids=vector_id)
def test_linear_with_center(center):
a = sfs.array.linear(2, 1, center=center)
assert_array_equal(a.x, [[-1, 0, 2], [-1, 1, 2]])
assert_array_equal(a.n, [[1, 0, 0], [1, 0, 0]])
assert_array_equal(a.a, [1, 1])
@pytest.mark.parametrize('orientation', vectortypes(0, -1, 0), ids=vector_id)
def test_linear_with_center_and_orientation(orientation):
a = sfs.array.linear(2, 1, center=[0, 1, 2], orientation=orientation)
assert_array_equal(a.x, [[-0.5, 1, 2], [0.5, 1, 2]])
| [
"[email protected]"
] | |
217b1f98f4c9c25d443e1b7559c4c49ca79d54ee | 60b48df762a515a734cfbedd7ca101df43f04824 | /python/ray/air/train/integrations/rl/__init__.py | 391b59c60777c8c27ad21e3de27eebd269a3b844 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | LuBingtan/ray | a02b13c4dceab2b0d54870fd3abae5c11bae916e | 298742d7241681ee1f307ec0dd3cd7e9713a3c7d | refs/heads/master | 2023-03-05T16:32:35.596725 | 2022-06-05T23:21:53 | 2022-06-05T23:21:53 | 223,334,544 | 0 | 1 | Apache-2.0 | 2023-03-04T08:56:53 | 2019-11-22T06:01:51 | Python | UTF-8 | Python | false | false | 124 | py | from ray.air.train.integrations.rl.rl_trainer import RLTrainer, load_checkpoint
__all__ = ["RLTrainer", "load_checkpoint"]
| [
"[email protected]"
] | |
554809587637d919463df1b5db0b15218f11beca | b6f8b81f2b895b1b0b27c1a6267b6f5c1ea52e81 | /eventsourcing/application/multiprocess.py | 10ce0c26367849071f776187e764d60f33734e38 | [
"BSD-3-Clause"
] | permissive | pjvds/eventsourcing | 62c79d80c94adf7b2121b1b03c544f00d73738d6 | 8404c5b26719ed9d9d1d257ebba774879c7243c4 | refs/heads/master | 2020-04-18T09:42:17.037336 | 2018-07-15T09:17:22 | 2018-07-15T09:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,120 | py | import multiprocessing
from multiprocessing import Manager
from time import sleep
import six
from eventsourcing.application.process import Prompt
from eventsourcing.application.system import System
from eventsourcing.domain.model.decorators import retry
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.exceptions import CausalDependencyFailed
from eventsourcing.interface.notificationlog import RecordManagerNotificationLog
DEFAULT_POLL_INTERVAL = 5
class Multiprocess(object):
def __init__(self, system, pipeline_ids=(-1,), poll_interval=None, notification_log_section_size=5,
pool_size=1, setup_tables=False, sleep_for_setup_tables=0):
self.pool_size = pool_size
self.system = system
self.pipeline_ids = pipeline_ids
self.poll_interval = poll_interval or DEFAULT_POLL_INTERVAL
assert isinstance(system, System)
self.os_processes = None
self.notification_log_section_size = notification_log_section_size
self.setup_tables = setup_tables or system.setup_tables
self.sleep_for_setup_tables = sleep_for_setup_tables
def start(self):
assert self.os_processes is None, "Already started"
self.os_processes = []
self.manager = Manager()
self.inboxes = {}
self.outboxes = {}
# Setup queues.
for pipeline_id in self.pipeline_ids:
for process_class, upstream_classes in self.system.followings.items():
inbox_id = (pipeline_id, process_class.__name__.lower())
if inbox_id not in self.inboxes:
self.inboxes[inbox_id] = self.manager.Queue()
for upstream_class in upstream_classes:
outbox_id = (pipeline_id, upstream_class.__name__.lower())
if outbox_id not in self.outboxes:
self.outboxes[outbox_id] = Outbox()
if inbox_id not in self.outboxes[outbox_id].downstream_inboxes:
self.outboxes[outbox_id].downstream_inboxes[inbox_id] = self.inboxes[inbox_id]
# Subscribe to broadcast prompts published by a process
# application in the parent operating system process.
subscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)
# Start operating system process.
for pipeline_id in self.pipeline_ids:
for process_class, upstream_classes in self.system.followings.items():
os_process = OperatingSystemProcess(
application_process_class=process_class,
upstream_names=[cls.__name__.lower() for cls in upstream_classes],
poll_interval=self.poll_interval,
pipeline_id=pipeline_id,
notification_log_section_size=self.notification_log_section_size,
pool_size=self.pool_size,
setup_tables=self.setup_tables,
inbox=self.inboxes[(pipeline_id, process_class.__name__.lower())],
outbox=self.outboxes[(pipeline_id, process_class.__name__.lower())],
)
os_process.daemon = True
os_process.start()
self.os_processes.append(os_process)
if self.setup_tables:
# Avoid conflicts when creating tables.
sleep(self.sleep_for_setup_tables)
def broadcast_prompt(self, prompt):
outbox_id = (prompt.pipeline_id, prompt.process_name)
assert outbox_id in self.outboxes, (outbox_id, self.outboxes.keys())
self.outboxes[outbox_id].put(prompt)
@staticmethod
def is_prompt(event):
return isinstance(event, Prompt)
def close(self):
unsubscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)
for os_process in self.os_processes:
os_process.inbox.put('QUIT')
for os_process in self.os_processes:
os_process.join(timeout=10)
for os_process in self.os_processes:
os_process.is_alive() and os_process.terminate()
self.os_processes = None
self.manager = None
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class Outbox(object):
def __init__(self):
self.downstream_inboxes = {}
def put(self, msg):
for q in self.downstream_inboxes.values():
q.put(msg)
class OperatingSystemProcess(multiprocessing.Process):
def __init__(self, application_process_class, upstream_names, pipeline_id=-1,
poll_interval=DEFAULT_POLL_INTERVAL, notification_log_section_size=None,
pool_size=5, setup_tables=False, inbox=None, outbox=None, *args, **kwargs):
super(OperatingSystemProcess, self).__init__(*args, **kwargs)
self.application_process_class = application_process_class
self.upstream_names = upstream_names
self.daemon = True
self.pipeline_id = pipeline_id
self.poll_interval = poll_interval
self.notification_log_section_size = notification_log_section_size
self.pool_size = pool_size
self.inbox = inbox
self.outbox = outbox
self.setup_tables = setup_tables
def run(self):
# Construct process application object.
self.process = self.application_process_class(
pipeline_id=self.pipeline_id,
notification_log_section_size=self.notification_log_section_size,
pool_size=self.pool_size,
setup_table=self.setup_tables,
)
# Follow upstream notification logs.
for upstream_name in self.upstream_names:
# Obtain a notification log object (local or remote) for the upstream process.
if upstream_name == self.process.name:
# Upstream is this process's application,
# so use own notification log.
notification_log = self.process.notification_log
else:
# For a different application, we need to construct a notification
# log with a record manager that has the upstream application ID.
# Currently assumes all applications are using the same database
# and record manager class. If it wasn't the same database,we would
# to use a remote notification log, and upstream would need to provide
# an API from which we can pull. It's not unreasonable to have a fixed
# number of application processes connecting to the same database.
record_manager = self.process.event_store.record_manager
notification_log = RecordManagerNotificationLog(
record_manager=record_manager.clone(
application_name=upstream_name,
pipeline_id=self.pipeline_id
),
section_size=self.process.notification_log_section_size
)
# Todo: Support upstream partition IDs different from self.pipeline_id.
# Todo: Support combining partitions. Read from different partitions but write to the same partition,
# could be one os process that reads from many logs of the same upstream app, or many processes each
# reading one partition with contention writing to the same partition).
# Todo: Support dividing partitions Read from one but write to many. Maybe one process per
# upstream partition, round-robin to pick partition for write. Or have many processes reading
# with each taking it in turn to skip processing somehow.
# Todo: Dividing partitions would allow a stream to flow at the same rate through slower
# process applications.
# Todo: Support merging results from "replicated state machines" - could have a command
# logging process that takes client commands and presents them in a notification log.
# Then the system could be deployed in different places, running independently, receiving
# the same commands, and running the same processes. The command logging process could
# be accompanied with a result logging process that reads results from replicas as they
# are available. Not sure what to do if replicas return different things. If one replica
# goes down, then it could resume by pulling events from another? Not sure what to do.
# External systems could be modelled as commands.
# Make the process follow the upstream notification log.
self.process.follow(upstream_name, notification_log)
# Subscribe to broadcast prompts published by the process application.
subscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)
try:
self.loop_on_prompts()
finally:
unsubscribe(handler=self.broadcast_prompt, predicate=self.is_prompt)
@retry(CausalDependencyFailed, max_attempts=100, wait=0.1)
def loop_on_prompts(self):
# Run once, in case prompts were missed.
self.process.run()
# Loop on getting prompts.
while True:
try:
# Todo: Make the poll interval gradually increase if there are only timeouts?
item = self.inbox.get(timeout=self.poll_interval)
self.inbox.task_done()
if item == 'QUIT':
self.process.close()
break
else:
self.process.run(item)
except six.moves.queue.Empty:
# Basically, we're polling after a timeout.
self.process.run()
def broadcast_prompt(self, prompt):
self.outbox.put(prompt)
@staticmethod
def is_prompt(event):
return isinstance(event, Prompt)
| [
"[email protected]"
] | |
4ece04618e14620c18d05f65a12122fcf3f71fca | 996fd22214f9d83ecdb0163e6f38568c4596bf56 | /union.py | 88c0950145a20502c5634d6cac8d9a5bdd7d913c | [] | no_license | 1024Person/ProxyPool | cdabe5d6e29fd98109e4ae1dbb86391bb511310f | 1ec3a79ca02f5e7d6d3d39bb34d6ba922a217a55 | refs/heads/master | 2023-03-21T19:16:30.370551 | 2021-03-14T05:24:52 | 2021-03-14T05:24:52 | 344,514,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | # 整合模块
# 发现在扩展的时候,因为这个只调用调度模块,导致第二个调度模块的检查器会将之前检查的代理ip再次检查一遍这样非常的费事,
# 已经检查过一次了就不需要在重复检查了,所以这里有两个解决方案:
# 1、重构scheduler模块,让他可以实现多个爬取器,然后一个检查器
# 2、添加整合模块,将每一个调度器的爬取器,爬去下来的ip存放到不同的文件中,然后检查的时候也只是检查这个调度器下的文件中的ip,
# 所有的调度器都结束工作之后,利用整合模块将所有文件中的ip整合到一个文件中
import sys
import os
import pandas as pd
from scheduler import Scheduler
import extension
from setting import csv_file_path
class Union(object):
# 参数 file_list: 需要整合的文件路径列表
# 参数 is_del_file : 是否需要删除中间文件,默认为不删除
def __init__(self,file_list,is_del_file = False):
# self.save = SaveIp()
self.file_list = file_list
self.perpare_work()
self.is_del_file = is_del_file
# 检查工作:检查传入的file_list中的文件是否都存在,
# 将不存在的文件路径移除删除
def perpare_work(self):
self.file_list = list(set(self.file_list))
for path in self.file_list:
if not os.path.exists(path):
self.file_list.remove(path)
def run(self):
# save = SaveIp(mode='a')
df = pd.DataFrame(data=[],columns=["ip","scores"])
for file_path in self.file_list:
file_ips = self.read(file_path)
if file_ips is not None:
df = df.append(file_ips)
# scores = [10 for _ in range(len(ips))]
# df = pd.DataFrame({"ip":ips,"scores":scores})
df.to_csv(csv_file_path,index=None,mode='a',columns=None,header=False) # 都保存到混沌代理池中
print("文件整合成功")
if self.is_del_file:
print("正在删除临时文件。。。")
self.delete_file()
print("临时文件删除成功")
def delete_file(self):
for file_path in self.file_list:
print(f"正在删除{file_path}")
os.remove(file_path)
def read(self,file_path):
try:
dt = pd.read_csv(file_path)
dt.columns=["ip","scores"]
return dt
except:
return None
if __name__ == "__main__":
current_path = os.path.dirname(os.path.abspath(__file__))
# f_path = current_path+"\\89_ip.csv"
f_name = ["\\qingting.csv",'\\kuai.csv',"\\89_ip.csv","\\tomato.csv"]
f_path_list = [current_path+_ for _ in f_name]
kuai_scheduler = Scheduler(ip_from="web",base_url=extension.kuai_base_url,crawler_parse_fn=extension.kuai_parse,crawler_pages=200,save_m="a",save_path=f_path_list[1],client_path=f_path_list[1],name="快代理调度器")
kuai_scheduler.start_scheduler()
kuai_scheduler.shutdown()
qingting_scheduler = Scheduler(ip_from="web",base_url=extension.qingting_base_url,crawler_pages=4,crawler_parse_fn=extension.qingting_parse,save_path=f_path_list[0],save_m="a",client_path=f_path_list[0],name="蜻蜓代理调度器")
qingting_scheduler.start_scheduler()
qingting_scheduler.shutdown()
_89_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[1],client_path=f_path[1],name="89代理调度器")
_89_scheduler.start_scheduler()
_89_scheduler.shutdown()
tomato_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[2],client_path=f_path[2],name="番茄代理调度器")
tomato_scheduler.start_scheduler()
tomato_scheduler.shutdown()
union = Union(f_path_list,True)
union.run()
| [
"[email protected]"
] | |
e995a49bf1c95011475c2e732e5b352d136705b9 | 5b8d55b89f7f33e1a2dcdd36e406eee8334e4678 | /convert.py | f0a9ce47e91333881d030eebe5a88d1e59fa885e | [] | no_license | manasRK/document_rating | b9fda56511d8cbe0a438a924718ca1e6fb63a154 | e3caced4586c058d8239235cfe44fac6802e097f | refs/heads/master | 2020-04-08T17:33:07.090176 | 2015-11-12T07:12:36 | 2015-11-12T07:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | __author__ = 'NLP-PC'
from load_data import load_pickle
from save_data import dump_picle
def convert(source_file):
s = load_pickle(source_file)
dump_picle(s, str(source_file)[:-2] + '_v2.7.p', protocol=2)
convert('./web_api/embedding_matrix_CVAT.p')
convert('./web_api/word_idx_map_CVAT.p')
| [
"[email protected]"
] | |
e6ba65eb534a04b8a0a6d2d01a017523e94bf4b8 | 547df2c76e6c3f4b7ac84441a14541eb0122f369 | /input.py | bd899bfd115eddce0a04a80fcaedc1ff8b53ab29 | [] | no_license | daeken/space_game | 8fa0d727faa7bb79f802546ca530f70312c3e48a | a02195f2ea9f442d5775ec75caa3464a3fa001de | refs/heads/master | 2021-01-15T17:29:30.210249 | 2010-09-09T04:06:25 | 2010-09-09T04:06:25 | 897,841 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import pygame
MOVE_DOWN = 1
MOVE_UP = 2
MOVE_LEFT = 4
MOVE_RIGHT = 8
class Input:
def __init__(self, spaceship):
self.spaceship = spaceship
pygame.key.set_repeat(1, 500)
pygame.mouse.set_visible(False)
def Handler(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.spaceship.Move(MOVE_UP)
elif event.key == pygame.K_DOWN:
self.spaceship.Move(MOVE_DOWN)
elif event.key == pygame.K_LEFT:
self.spaceship.Move(MOVE_LEFT)
elif event.key == pygame.K_RIGHT:
self.spaceship.Move(MOVE_RIGHT)
elif event.unicode == ' ':
self.spaceship.Fire()
elif event.unicode == 's':
self.spaceship.Fire()
elif event.key == pygame.K_ESCAPE:
return False
elif event.type == pygame.MOUSEMOTION:
self.spaceship.pos = list(event.pos)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.spaceship.Fire()
elif event.type == pygame.QUIT:
return False
return True
| [
"[email protected]"
] | |
fc7e6c14a12e070749479e2756b316ba15f6fed5 | 8fc49ff594f49977f1a920c4be31b54981b41ada | /plastering/inferencers/scrabble_new.py | d6644868bc47686aa9c009f8aaca25148ae3bae8 | [
"MIT"
] | permissive | PeterYang21/plastering | 02a342ceaf938d67de62866d0eb60d3643711e5c | 7c7a21b2f18df78a9d8ec29f3d1d9f47d82c658f | refs/heads/master | 2020-04-27T03:52:45.088534 | 2019-04-08T18:33:02 | 2019-04-08T18:33:02 | 174,037,728 | 0 | 0 | MIT | 2019-03-05T23:44:11 | 2019-03-05T23:44:11 | null | UTF-8 | Python | false | false | 9,328 | py | import os
import sys
import importlib.util
import pdb
from . import Inferencer
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/scrabble')
# The above line is just for the convenience of the dev.
from ..metadata_interface import *
from ..rdf_wrapper import *
from ..common import *
POINT_POSTFIXES = ['sensor', 'setpoint', 'alarm', 'command', 'meter']
from scrabble import Scrabble # This may imply incompatible imports.
from scrabble.common import *
class ScrabbleInterface(Inferencer):
"""docstring for ScrabbleInterface"""
def __init__(self,
target_building,
target_srcids,
source_buildings,
config=None
):
config['required_label_types'] = [POINT_TAGSET,
FULL_PARSING,
ALL_TAGSETS]
super(ScrabbleInterface, self).__init__(
target_building=target_building,
target_srcids=target_srcids,
source_buildings=source_buildings,
config=config,
framework_name='scrabble')
self.target_label_type = ALL_TAGSETS
if not config:
config = {}
# Prepare config for Scrabble object
if 'seed_num' in config:
seed_num = config['seed_num']
else:
seed_num = 10
if 'sample_num_list' in config:
sample_num_list = config['sample_num_list']
else:
sample_num_list = [seed_num] * len(set(source_buildings +
[target_building]))
if self.target_building not in self.source_buildings:
self.source_buildings = self.source_buildings + [self.target_building]
if len(self.source_buildings) > len(sample_num_list):
sample_num_list.append(0)
if 'use_cluster_flag' not in config:
config['use_cluster_flag'] = True
if 'use_brick_flag' not in config:
config['use_brick_flag'] = True
if 'negative_flag' not in config:
config['negative_flag'] = True
if 'tagset_classifier_type' not in config:
config['tagset_classifier_type'] = 'MLP'
if 'crfqs' not in config:
config['crfqs'] = 'confidence'
if 'entqs' not in config:
config['entqs'] = 'phrase_util'
if 'n_jobs' not in config:
config['n_jobs'] = 10
if 'use_known_tags' not in config:
config['use_known_tags'] = False
if 'apply_filter_flag' not in config:
self.apply_filter_flag = False
else:
self.apply_filter_flag = config['apply_filter_flag']
if 'apply_validating_samples' not in config:
self.apply_validating_samples = False
else:
self.apply_validating_samples = config['apply_validating_samples']
# TODO: This should be migrated into Plastering
building_sentence_dict, target_srcids, building_label_dict,\
building_tagsets_dict, known_tags_dict = load_data(target_building,
source_buildings)
self.scrabble = Scrabble(target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
building_tagsets_dict,
source_buildings,
sample_num_list,
known_tags_dict,
config=config,
)
#self.update_model([])
new_srcids = deepcopy(self.scrabble.learning_srcids)
if self.hotstart:
new_srcids = [obj.srcid for obj in LabeledMetadata.objects(
building=target_building)]
self.scrabble.clear_training_samples()
self.update_model(new_srcids)
self.zodiac_good_preds = {}
def learn_auto(self, iter_num=25, inc_num=10):
for i in range(0, iter_num):
print('--------------------------')
print('{0}th iteration'.format(i))
new_srcids = self.select_informative_samples(inc_num)
self.update_model(new_srcids)
self.evaluate(self.target_srcids)
print('curr new srcids: {0}'.format(len(new_srcids)))
print('training srcids: {0}'.format(len(self.training_srcids)))
print('f1: {0}'.format(self.history[-1]['metrics']['f1']))
print('macrof1: {0}'.format(self.history[-1]['metrics']['macrof1']))
def update_model(self, new_srcids):
super(ScrabbleInterface, self).update_model(new_srcids)
self.scrabble.update_model(new_srcids)
def postprocessing_pred(self, pred):
# Currently only ingest point tagsets.
pred_g = self.new_graph(empty=True)
for srcid, tagsets in pred.items():
point_tagset = sel_point_tagset(tagsets, srcid)
point_prob = 1 # temporary
pred_g.add_pred_point_result(pred_g, srcid,
point_tagset, point_prob)
return pred_g
def predict(self, target_srcids=None, all_tagsets=False):
if not target_srcids:
target_srcids = self.target_srcids
pred = self.scrabble.predict(target_srcids)
if self.apply_filter_flag:
pred = self.apply_filter_by_zodiac(pred)
self.pred_g = self.postprocessing_pred(pred)
if all_tagsets:
return self.pred_g, pred # This should be generalized inside
# postprocessing_pred
else:
return self.pred_g
def predict_proba(self, target_srcids=None):
return self.scrabble.predict_proba(target_srcids)
def apply_prior_zodiac(self, sample_num):
if not self.prior_g:
return []
instances = get_instance_tuples(self.prior_g)
good_preds = {}
for srcid, point_tagset in instances.items():
triple = (BASE[srcid], RDF.type, BRICK[point_tagset])
if self.prior_confidences[triple] > 0.9:
good_preds[srcid] = point_tagset
pred_g = self.predict()
incorrect_srcids = []
for srcid, good_point_tagset in good_preds.items():
pred_point_tagset = get_point_type(pred_g, BASE[srcid])
if (good_point_tagset != pred_point_tagset) or\
(good_point_tagset == 'unknown' and pred_point_tagset == 'none') or\
(good_point_tagset == 'none' and pred_point_tagset == 'unknown'):
incorrect_srcids.append(srcid)
if not incorrect_srcids:
return []
new_srcids = select_random_samples(
building=self.target_building,
srcids=incorrect_srcids,
n=sample_num,
use_cluster_flag=True,
sentence_dict=self.scrabble.char2ir.sentence_dict,
unique_clusters_flag=True,
)
return new_srcids
def is_same_tagset(self, tagset1, tagset2):
if tagset1 == tagset2:
return True
elif tagset1 == 'none' and tagset2 == 'unknown':
return True
elif tagset1 == 'unknown' and tagset2 == 'none':
return True
else:
return False
def apply_filter_by_zodiac(self, pred):
if not self.prior_g:
return pred
instances = get_instance_tuples(self.prior_g)
self.zodiac_good_preds = {}
for srcid, point_tagset in instances.items():
triple = (BASE[srcid], RDF.type, BRICK[point_tagset])
if self.prior_confidences[triple] > 0.8:
self.zodiac_good_preds[srcid] = point_tagset
fixed_cnt = 0
for srcid, pred_tagsets in pred.items():
pred_point_tagset = sel_point_tagset(pred_tagsets, srcid)
good_point_tagset = self.zodiac_good_preds.get(srcid, None)
if not good_point_tagset:
continue
if not self.is_same_tagset(pred_point_tagset, good_point_tagset):
pred_tagsets = [tagset for tagset in pred_tagsets
if not is_point_tagset(tagset)]
pred_tagsets.append(good_point_tagset)
print('FIXED {0}, {1} -> {2}'.format(srcid,
pred_point_tagset,
good_point_tagset))
fixed_cnt += 1
pred[srcid] = pred_tagsets
print('TOTAL_FIXED_POINTS: {0}'.format(fixed_cnt))
return pred
def select_informative_samples(self, sample_num=10):
# Use prior (e.g., from Zodiac.)
new_srcids = []
#if self.apply_validating_samples:
# new_srcids += self.apply_prior_zodiac(sample_num)
if len(new_srcids) < sample_num:
new_srcids += self.scrabble.select_informative_samples(
sample_num - len(new_srcids))
#new_srcids = [srcid for srcid in new_srcids
# if srcid not in self.zodiac_good_preds][0:sample_num]
return new_srcids
| [
"[email protected]"
] | |
6a4ebac6a8704073f44d41ac912763d71549f5fc | d571d407cfda435fcab8b7ccadb1be812c7047c7 | /guild/tests/samples/projects/op-main-package/src/pkg/main_impl.py | 08fd61a59013765c65c20cd7f671f686d23f1706 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | guildai/guildai | 2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af | 149055da49f57eaf4aec418f2e339c8905c1f02f | refs/heads/main | 2023-08-25T10:09:58.560059 | 2023-08-12T20:19:05 | 2023-08-12T20:19:05 | 105,057,392 | 833 | 86 | Apache-2.0 | 2023-08-07T19:34:27 | 2017-09-27T18:57:50 | Python | UTF-8 | Python | false | false | 70 | py | def run():
print("hello from %s in %s" % (__name__, __package__))
| [
"[email protected]"
] | |
b5fe3e11facb05abce4691b745886774e4236525 | e71d132abc96a77ca8a4cc8bebc229ff2c403e4a | /src/sphinx_bulma/logo/__init__.py | b6a7114d781c34f2522d6e143993789149a8f973 | [
"Apache-2.0"
] | permissive | pauleveritt/sphinx_bulma | 2b2d423412c7ddcbc322d2c38f845ec4a6429e1e | 01a18be12f1c896a8bcd80e01b78ab6bec760b4b | refs/heads/master | 2021-06-20T07:37:16.314710 | 2017-07-22T15:27:47 | 2017-07-22T15:27:47 | 94,641,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from sphinx_bulma.base import BaseComponent
class LogoComponent(BaseComponent):
name = 'logo'
| [
"[email protected]"
] | |
672fa16630e8a40cf8b1e7b43f8c5612ba1a17f4 | 25b4fc4a54faf0f4217f3661477fa8f26cd60164 | /Basket/views.py | 2bcc37ebdf2ff013c67b218b2632fbe21f46d96d | [] | no_license | AshtiNematian/Book_Store_Nematian_ | 6f601f69f0a25522ac351e4ad963f17011254289 | b83ea7319dbead2be5812e2d001c58e7d906fff9 | refs/heads/master | 2023-07-21T03:56:48.386869 | 2021-09-03T17:03:17 | 2021-09-03T17:04:24 | 402,333,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render
from Basket.models import Basket
from Coupon.forms import CouponApplyForm
from Product.models import Book
def basket_summary(request):
basket = Basket(request)
return render(request, 'summary.html', {'basket': basket})
def basket_add(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
product_qty = int(request.POST.get('productqty'))
product = get_object_or_404(Book, id=product_id)
basket.add(product=product, qty=product_qty)
coupon_apply_forms = CouponApplyForm()
if product.inventory != 0 and product_qty < product.inventory:
basket.add(product=product, qty=product_qty)
product.remove_items_from_inventory(product_qty)
basketqty = basket.__len__()
response = JsonResponse({'qty': basketqty})
return response
def basket_delete(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
basket.delete(product=product_id)
basketqty = basket.__len__()
baskettotal = basket.get_total_price()
response = JsonResponse({'qty': basketqty, 'subtotal': baskettotal})
return response
def basket_update(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
product_qty = int(request.POST.get('productqty'))
basket.update(product=product_id, qty=product_qty)
basketqty = basket.__len__()
baskettotal = basket.get_total_price()
response = JsonResponse({'qty': basketqty,
'subtotal': baskettotal})
return response
| [
"[email protected]"
] | |
71e602bd3e84c341393cd3c2e541ba225c4b8f71 | 99da8a6d2392472cb66e5b12c03142c90640186a | /BOJ/Tree/1967.py | ffb3ad1267e97478a1e3e17f42bfe629009b13f3 | [] | no_license | chorwonkim/__Algorithms__ | cf6cf4ae5cf091d856397369b6db1bb41f925377 | 0c1e58410ae90b72c0d7e44a6179b8fedc786131 | refs/heads/master | 2022-09-28T16:59:20.841482 | 2022-09-25T09:57:58 | 2022-09-25T09:57:58 | 130,082,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | from sys import stdin
from collections import deque
Read = stdin.readline
n = int(Read())
graph = [[] for _ in range(n)]
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
# graph[y-1].extend((x-1, z))
#
# print(graph)
#
#
# def func_1967(start, node):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# if visited[root]:
# node += 1
# continue
#
# visited[root] += length
# visited_path[node].append(root)
#
# for sub in graph[root]:
# d.append(sub)
#
#
# visited = [1] + [0 for _ in range(n-1)]
# visited_path = [[] for _ in range(n)]
# func_1967(graph[0], 0)
# print(visited)
# print(visited_path)
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
#
# print(graph)
#
#
# def func_1967(start):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# visited[root] += length
#
#
# visited = [1] + [0 for _ in range(n-1)]
# for i in range(n):
# func_1967(graph[i])
# print(visited)
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
# graph[y-1].extend((x-1, z))
#
# print(graph)
#
#
# def func_1967(start, node):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# if root > node:
# visited[root] += length
# else:
# visited[node] += visited[root]
#
#
# visited = [0 for _ in range(n)]
#
# for i in range(n):
# func_1967(graph[i], i)
#
# print(visited)
for _ in range(n-1):
x, y, z = map(int, Read().split())
graph[x-1].extend((x-1, y-1, z))
graph[y-1].extend((y-1, x-1, z))
def func_1967(start):
d = deque(graph[start])
visited[start] = True
while d:
node = d.popleft()
root = d.popleft()
length = d.popleft()
if visited[root]:
visited_path[node] += visited_path[root]
continue
visited[root] = True
visited_path[root] += length
for sub in graph[root]:
d.append(sub)
visited_path = [0 for _ in range(n)]
visited = [False for _ in range(n)]
func_1967(0)
temp = max(visited_path)
t1 = visited_path.index(temp)
visited_path = [0 for _ in range(n)]
visited = [False for _ in range(n)]
func_1967(t1)
print(max(visited_path))
| [
"[email protected]"
] | |
678b47d50d09ef3f8ac499494056aa244eebee12 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_373/ch5_2020_03_03_19_35_15_420500.py | 6f2309b27579eb0ede06aa46817d5dd51c1d1ec1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def libras_para_kg(l,kg):
l= 0.45359*kg
return l
| [
"[email protected]"
] | |
ad5cbe674f4cef422f926c7db5c381867b4edf17 | 0744c9a278ad397efb8a6981f8a67014dfd78894 | /textt/apps.py | 0d8f304725a2575f0a7f3d7298de895ccefcb320 | [] | no_license | chris-baby/test | c2b7623b5ba253a7c412b5d0a2ffd42cf262dd13 | aa2e3c25a6c0191474e4a001113478f3b5daa980 | refs/heads/master | 2022-11-19T23:09:35.375254 | 2020-07-27T02:56:37 | 2020-07-27T02:56:37 | 282,777,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from django.apps import AppConfig
class TexttConfig(AppConfig):
name = 'textt'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.