blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74b87ca9cb07bcf0b829fb9f8d1acca0b0fd7381
|
182dd5305aedeaa197f302c0d830ab85413cdd53
|
/plugins/Filters/Convert2Gray/Convert2Gray.py
|
68772b2605be2aa6796c95576bfe72f1a8208b5f
|
[
"MIT"
] |
permissive
|
UmSenhorQualquer/workflow-editor
|
016dbf47759b2572a811b80fc8bc79c88404c4ab
|
6f836f99e155c2f503cf59adf4e8b8b574184e6d
|
refs/heads/master
| 2021-01-24T18:58:13.224476 | 2017-07-20T10:00:10 | 2017-07-20T10:00:10 | 86,163,117 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,274 |
py
|
import core.utils.tools as tools, cv2
from core.modules.OTModulePlugin import OTModulePlugin
from core.modules.ModuleConnection import ModuleConnection
from datatypes.TypeComponentsVideoPipe import TypeComponentsVideoPipe
from datatypes.TypeColorVideoPipe import TypeColorVideoPipe
from datatypes.TypeColorVideo import TypeColorVideo
from pyforms.Controls import ControlPlayer
from pyforms.Controls import ControlCombo
from pyforms.Controls import ControlButton
class Convert2Gray(OTModulePlugin,TypeColorVideoPipe):
def __init__(self, name):
icon_path = tools.getFileInSameDirectory(__file__, 'iconsubbg.jpg')
OTModulePlugin.__init__(self, name, iconFile=icon_path)
TypeColorVideoPipe.__init__(self)
self._video = ModuleConnection("Video", connecting=TypeColorVideo)
self._player = ControlPlayer("Video player")
self._video.changed = self.newVideoInputChoosen
self._player.processFrame = self.processFrame
self._formset = [
'_video',
"_player",
]
def newVideoInputChoosen(self):
ModuleConnection.changed_event(self._video)
value = self._video.value
if value:
self.open(value)
self._player.value = value
def processFrame(self, frame):
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
[
"[email protected]"
] | |
f084f2434510565b6756fe2e22ff2eee7bd9ae65
|
52272ef3de9036a1b23b35047ceb90a2027df1f3
|
/selenium_doc/TEST/test1.py
|
23254c71b70f3e7247cd31f2a4df400aa90ba122
|
[] |
no_license
|
luokeke/selenium_python
|
9f2883cc158e473902e0c4bbf9fca20ecb61bfda
|
3cc05034afd0bc0930921812393bd572db868fb3
|
refs/heads/master
| 2020-08-25T02:51:16.064938 | 2020-04-20T10:03:52 | 2020-04-20T10:03:52 | 216,950,778 | 3 | 1 | null | 2019-10-23T03:39:52 | 2019-10-23T02:29:06 | null |
UTF-8
|
Python
| false | false | 849 |
py
|
#!/usr/bin/env python
#-*- coding:utf8 -*-
#@author: 刘慧玲 2018/5/22 19:15
from selenium import webdriver
from time import sleep
from login01 import *
'''
脚本作用 :服务器ssp功能
'''
#批量删除存储空间
driver = webdriver.Firefox()
driver.delete_all_cookies()
driver.maximize_window()
# 直接访问景安站存储空间链接,并登录。用户名密码可修改
Login().Ky_login(driver,"luokeke", "1")
sleep(3)
driver.get("https://mc.kuaiyun.cn/host/hostList")
sleep(3)
driver.find_element_by_link_text(u"管理").click()
sleep(5)
#打开管理跳转到新页面,涉及到多窗口操作。
all_window_handle = driver.window_handles # 获取打开的所有窗口句柄
driver.switch_to.window(all_window_handle[-1]) # 激活最顶层窗口句柄
#重装系统标签
driver.find_element_by_id("tab3_7").click()
sleep(5)
|
[
"[email protected]"
] | |
6991166ac2811bf5b5871d798c5766c22ed204be
|
32cb0be487895629ad1184ea25e0076a43abba0a
|
/LifePictorial/top/api/rest/HotelRoomImgUploadRequest.py
|
ff545c17c529a9cad6b04e9031dcf6168e228996
|
[] |
no_license
|
poorevil/LifePictorial
|
6814e447ec93ee6c4d5b0f1737335601899a6a56
|
b3cac4aa7bb5166608f4c56e5564b33249f5abef
|
refs/heads/master
| 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
'''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class HotelRoomImgUploadRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.gid = None
self.pic = None
self.position = None
def getapiname(self):
return 'taobao.hotel.room.img.upload'
def getMultipartParas(self):
return ['pic']
|
[
"[email protected]"
] | |
99f94f0fc3ee9a38ec3c34db968e6e99a9ea7e86
|
f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2
|
/2016/AST2/Bili/letnji/ponovo/konacno1.py
|
7482940ccb3f15b8866efb9be0b7760bf88d483d
|
[] |
no_license
|
ispastlibrary/Titan
|
a4a7e4bb56544d28b884a336db488488e81402e0
|
f60e5c6dc43876415b36ad76ab0322a1f709b14d
|
refs/heads/master
| 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,475 |
py
|
from scipy.special import wofz
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import interpolate
e=1.60217657e-19 #elementarno naelektrisanje [C]
eV=1.60217657e-19 #eV u J
AU=149597871000 #Astronomska jedinica [m]
Na=6.02*1e+23 #Avogadrov broj
M=23*1e-3 #molarna masa Na [kg/mol]
me=9.1e-31 #masa elektrona[kg]
Rk=400000 #poluprecnik kome[m] ?????????????????????????????
k=1.38*10e-23 #Bolcmanova konst [J\K]
dE=3.37*1e-19 #razlika energetskih stanja 3p i 3s, povezana sa talasnom duzinom [J]
R=Na*k #Univerzalna gasna konstanta [J/(molK)]
L0=589e-9 #centralna talasna duzina D2 linije [m]
h=6.63*1e-34 #Plankova konstanta [Js]
c=299792458 #brzina svetlosti [m/s]
A=6.14e+7 #Ajnstajnov koef za verovatnocu spontane emisije[s^-1] 3p-3s
g0=1 # statisticka tezina 3s orbitale (s)
g1=3 # statisticka tezina 3p orbitale (px,py,pz)
V0=c/L0 #centralna frekvencija [Hz]
#Tef=5777 #efektivna temperatura Sunca [K]
d=0 #rastojanje prave (posmatranog pravca) od centra jezgra
#niz koef za NaI
#niz koef za NaII
#koeficijenti aproksimativne polinomne funkcije za izracunavanje particione f-je
a=[-2.60507178e+3,1.71419244e+3,-4.50632658e+2,5.91751503e+1,-3.88164070e+0,1.01752936e-1]
b=[-3.68868193e-6,2.28941495e-6,-5.66767833e-7,6.99552282e-8,-4.30495956e-9,1.05668164e-10]
Ro=1000 #gustina jezgra [kg/m^3], Andrija
Rn=2.5e4 #poluprecnik jezgra [m]
S=1/2 #funkcija izvora
AMU=1.66*10e-27 #jedinica atomske mase u kg
sigma=5.670373e-8 #Stefan - Bolcmanova konstanta[Wm^-2K^-4]
mna=22.9877*AMU #atomska masa Na u kg
mnaoh=39.998*AMU #masa NaOH u kg
mh20=18.015*AMU #masa H2O u kg
def voigt(x,y): #Fojtova funkcija, ali samo realni deo Faddeeva f-je
z = x + 1j*y
w = wofz(z).real
return w
def part_funk(a,T): #izracunavanje part f-je
K=0
for i in range(6):
K += a[i]*pow(np.log(T),i)
Zp = np.exp(K)
return Zp
def rastojanje(T): #heliocentricno rastojanje [AU]
#Lt=-582*(T-273)+2.62e6 #latentna toplota sublimacije [J/kg], T u c, a ne u K
Lt=2.62e6
Sc=1361 #solarna const na 1AU [W/m^2]
Pr=1.2*(10**12)*np.exp(-6000/T) #pritisak zasicene pare na povrsini komete
Zm=Pr*np.sqrt(mh20/(2*np.pi*k*T)) #sublimacioni fluks H2O [kgs^-1m^-2] (masena stopa produktivnosti)
rasto=np.sqrt(Sc/(sigma*T**4+Lt*Zm))
return rasto
"""
def Q_H20(T):
mh20=18.015*AMU
Pr=1.2*(10e+12)*np.exp((-6000)/T)
prod=Pr*np.sqrt(1/(2*np.pi*k*T*mh20))
prod=prod*4*np.pi*Rn*Rn
return prod"""
def Q_Na(T): #ukupna stopa produktivnosti Na [s^-1]
Pr=1.2*(10e+12)*np.exp((-6000)/T) #pritisak zasicene pare
produ=Pr*np.sqrt(1/(2*np.pi*k*T*mna)) #stopa produktivnosti Na [m^-2s^-1]
produ=produ*4*np.pi*Rn*Rn #ukupna stopa
return produ
# 207.6 K -> 1 AU, ~1e+32 ukupna stopa produktivnosti Na
def brz_izb(T,masa): #brzina outflow (izlivanja)=termalna brzina [m/s]
#brz=20*pow(rastojanje(T),-0.41)*(10**3)
brz = np.sqrt((2*k*T)/masa)
return brz
tau_p0=10**3 #vreme dok se roditelj ne unisti [s]
tau_d0=1.67*(10**5) #vreme dok se cerka ne unisti [s]
def lp(T): #skalirana duzina roditelja [m]
roditelj = brz_izb(T,mnaoh)*tau_p0*(rastojanje(T))**2
return roditelj
def ld(T): #skalirana duzina cerke [m]
cerka = brz_izb(T,mna)*tau_d0*(rastojanje(T))**2
return cerka
#plt.gca().set_yscale('log')
def kon_Na_koma(T,r): #koncentracija Na u komi, Haserov model
Dr=r-Rn # redukovano trenutno rastojanje Rkome-Rjezgra
konc=(Q_Na(T)*ld(T)*(np.exp(-Dr/lp(T))-np.exp(-Dr/ld(T))))/(4*np.pi*brz_izb(T,mna)*r*r*(lp(T)-ld(T)))
#print(ld(T),lp(T))
return konc
def MaxBol(T,r): #koncentracija Na u osnovnom 3s stanju (n0) na rastojanju r od kome
NaI = kon_Na_koma(T,r) #ukupan broj Na iz Haserovog modela
Zp = part_funk(a,T) #particiona f-ja
Er=5.1390767*eV #energija 3s za Na [J]
#dE=h*c/L0
#Er=dE
#Er=0
n0 =(NaI*g0*np.exp(-Er/(k*T)))/Zp #konc Na u osnovnom stanju
return n0
def Bol(T,r):
NaI = kon_Na_koma(T,r)
dE = h*c/L0
odnos = g1*(np.exp(-dE/(k*T)))/g0
n0 = NaI/(1+odnos)
return n0
def ajnstajnB(A): # za A(koef emisije) (3p-3s) vraca B (Ajnstajnov koef apsorpcije za verovatnocu prelaza (3s-3p)[m^3s^-2J^-1])
B = (c**2*A*g1)/(8*np.pi*h*V0**3*g0)
return B
def Dopl_sirina(T): #Doplerova sirina u funkciji of temperature [Hz]
Dopler = np.sqrt(2*R*T/M)*(V0/c)
return Dopler
def koef_aps(V,T,r,av): #koeficijent apsorpcije
B = ajnstajnB(A)
konc_aps = n0 = Bol(T,r) #MAXBOLC statistika
Dopler = Dopl_sirina(T)
apsor = ((B*n0*h*V)/(4*np.pi*Dopler))*Fojtov_profil(V,av)
return apsor
br_ljuspica = 2500
dr = Rk/br_ljuspica #koma je podeljena na 50000 ljuspica
def opt_dub(d,V,T,av): #opticka dubina za nehomogenu komu
r1 = Rk
r2 = r1-dr
suma_opt = 0
broj = br_ljuspica - 1 - math.floor(d/dr)
"""for i in range(broj):
r2 = r1 - dr
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1 = r2"""
while (r1>(Rn+d)) and (r2>(Rn+d)):
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1=r2
r2=r1-dr
ds = np.sqrt(r1*r1 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
suma_opt *= 2
return suma_opt
"""def poc_intez(V,T): #pocetni intezitet preko plankove fje
plank=(2*h*V*V*V)/(c*c*(np.exp((h*V)/(k*T))-1))
return plank"""
N_tacaka = 150
V1 = c/(589.02e-9)
V2 = c/(588.98e-9)
dV = (V2 - V1)/N_tacaka
def izlazni(V,T,av): #izlazni intenzitet normiran tako da je I0=1, a funkcija izvora S=1/2
tau = opt_dub(d,V,T,av) #opticka dubina
#2*Rk*koef_aps(V,T,Rk)
q = S*(1-np.exp(-tau))
return q
def Fojtov_profil(V,av):
F=voigt((V-V0)/Dopl_sirina(T),av)/(np.pi*Dopl_sirina(T))
return F
def E_W(x,y): #ekvivalentna sirina, metodom cubic spline
EkW=0
tck=interpolate.splrep(x,y)
i=0
while(i<(N_tacaka-1)):
EkW+=interpolate.splint(x[i],x[i+1],tck)
i+=1
return EkW
"""T=207
av=A/Dopl_sirina(T)
x=np.linspace(V1,V2,N_tacaka)
y0=izlazni(x,av,0)
#y1=izlazni(x,av,1)
#plt.suptitle('Uporedjivanje dveju metoda za nalazenja broja Na u osnovnom 3s stanju u odnosu na ukupan broj Na')
plt.plot(x,y0,lw=5,label='Maksvel - Bolcmanova statistika')
#plt.plot(x,y1,label='Bolcmanova raspodela')
plt.legend(loc='best')
plt.show()"""
d=0
x=np.linspace(V1,V2,N_tacaka)
T=300
print(d)
av=A/Dopl_sirina(T)
y0=izlazni(x,T,av)
dd=Rk/6
d=d+dd
print(d)
y1=izlazni(x,T,av)
d=d+dd
print(d)
y2=izlazni(x,T,av)
d=d+dd
print(d)
y3=izlazni(x,T,av)
d=d+dd
print(d)
y4=izlazni(x,T,av)
plt.suptitle('Profil linije Na za različite preseke na T=220K')
plt.xlabel('Frekvencija[Hz]')
plt.ylabel('Relativni intenzitet')
plt.plot(x,y0,label='d=0 m')
plt.plot(x,y1,label='d=66666 m')
plt.plot(x,y2,label='d=133333 m')
plt.plot(x,y3,label='d=200000 m')
plt.plot(x,y4,label='d=266666 m')
#plt.plot(x,y5,label='0.06 AU, 260K')
plt.legend(loc='best')
plt.show()
"""xt=np.linspace(100,210,100) yt=Q_Na(xt) plt.plot(xt,yt)
plt.xlabel('Temperatura[K]') plt.ylabel('Stopa produkcije Na[s^-1]')
plt.title('Grafik zavisnosti stope produkcije od temperature')"""
#plt.yscale('log')
|
[
"[email protected]"
] | |
0ec055a25cc8a0344ce78bd9d4773178113d80f6
|
77ec9edf40b34b48477a627d149b6c2054b98a93
|
/abc_179_d.py
|
7b6ac7b449986dfe9275cb4f4fd8e0cb8b57219c
|
[] |
no_license
|
junkhp/atcorder
|
fa4eeb204e3a4ac713001ab89c205039703abc88
|
028ddf7a39534d5907232c4576a03af79feb6073
|
refs/heads/main
| 2023-04-11T02:15:10.088883 | 2021-04-22T07:06:06 | 2021-04-22T07:06:06 | 313,284,147 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
def main():
n, k = map(int, input().split())
move_set = set([])
for i in range(k):
a, b = map(int, input().split())
for j in range(a, b + 1):
move_set.add(j)
sorted_set = sorted(move_set)
# print(sorted_set)
dp = [0] * (n + 1)
dp[1] = 1
for i in range(2, n+1):
for num in sorted_set:
if num + 1 > i:
break
else:
dp[i] += dp[i - num]
print(dp[-1] % 998244353)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
281ad853afa50f156cc560eb9efde70e9130b00e
|
40e09fc848fac3bc523802e353c4e8bef9e3cf5e
|
/pyvore/pyvore/managers/sessions.py
|
9f5d60dbae366a036b85d684f0aee266d2320f5c
|
[] |
no_license
|
sontek/pycon2012
|
8ff24ce51770e0fb6a40ec9a510e958b9b9f309b
|
79d417d185030c0af247506b49903744088abe65
|
refs/heads/master
| 2016-09-05T19:56:18.702274 | 2012-03-17T05:53:46 | 2012-03-17T05:53:46 | 3,627,137 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 338 |
py
|
from pyvore.managers import BaseManager
from pyvore.models.sessions import Session
from pyvore.models.sessions import Chat
class SessionManager(BaseManager):
def get_sessions(self):
return self.session.query(Session).all()
def get_chatlog(self, pk):
return self.session.query(Chat).filter(Chat.session_pk == pk)
|
[
"[email protected]"
] | |
44e898de8b26e5a201cf475e7ab019e44ead146d
|
67379c2ae929266f303edc783c8c62edb521174b
|
/exception/TransactionException.py
|
255a542bbd984278db4669c881c1ac6ca58f723b
|
[] |
no_license
|
bbb11808/seata-python
|
d20be83093d6d084ad36d9292a8ee18ad3bfc8c6
|
c53b605be423c781d38e599e5bade8df8c81c2d9
|
refs/heads/master
| 2023-02-11T01:22:18.488881 | 2021-01-05T10:10:08 | 2021-01-05T10:10:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 257 |
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
class TransactionException(Exception):
def __init__(self, code, message=None, cause=None):
self.code = code
self.message = message
self.cause = cause
|
[
"[email protected]"
] | |
ef9a7c367bd1087b092f78ee9feb34f8fb220822
|
0e667a493715932d3dd45f6a59bd31c391c05b6a
|
/bin/pygmentize
|
9e0974abc30dcc7b01ffff006b0e612e8a1e5f35
|
[] |
no_license
|
Anubhav722/QR-Code-Scanner
|
84908069d6dc4082e94ce01c62085ce1ac380a62
|
455d28d5654bed3c9d3161897f7cead21d4c7f8e
|
refs/heads/master
| 2021-04-30T16:13:46.769315 | 2017-01-26T17:28:45 | 2017-01-26T17:28:45 | 79,985,166 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
#!/home/paras/Desktop/QR-Code-Scanner/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
85398c9153e63b9b53d9985b044400b6227c505f
|
6806bd3e24d2ec3382cce6964e817e279052f121
|
/sentry/plugins/sentry_sites/models.py
|
4e294b463ec6ef4e47deb100f73b3e68c629019e
|
[
"BSD-2-Clause"
] |
permissive
|
magicaltrevor/sentry
|
af70427a6930f555715362e8899e4269f844e57f
|
8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94
|
refs/heads/master
| 2021-01-18T11:53:55.770327 | 2012-07-29T22:00:35 | 2012-07-29T22:00:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 938 |
py
|
"""
sentry.plugins.sentry_sites.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class SitesPlugin(TagPlugin):
"""
Adds additional support for showing information about sites including:
* A panel which shows all sites a message was seen on.
* A sidebar module which shows the sites most actively seen on.
"""
slug = 'sites'
title = _('Sites')
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/dcramer/sentry"
tag = 'site'
tag_label = _('Site')
def get_tag_values(self, event):
if not event.site:
return []
return [event.site]
register(SitesPlugin)
|
[
"[email protected]"
] | |
dcbb7d2c6c118a3060c64cfee2dae6fd5aa40e9d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2453486_1/Python/dvolgyes/solution.py
|
3336b278088a91573eff663adc3c60e306e631a1
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,667 |
py
|
#!/usr/bin/python
#
# Google codejam solution
# David Volgyes
#
#
import sys, math, os
#import mpmath as mp # see https://code.google.com/p/mpmath/
import numpy as np # see http://www.numpy.org/
#import sympy as sp # see https://code.google.com/p/sympy/
import networkx as nx # see http://networkx.github.com/
import re
import random
T=int(sys.stdin.readline())
fieldX=np.zeros( (4,4), dtype=np.uint8 )
fieldO=np.zeros( (4,4), dtype=np.uint8 )
def solve(x):
solution=False
for i in range(0,4):
subsolution1=True
subsolution2=True
for j in range(0,4):
if x[i,j]==0: subsolution1=False
if x[j,i]==0: subsolution2=False
if subsolution1 or subsolution2: return True
if x[0,0]+x[1,1]+x[2,2]+x[3,3]==4: return True
if x[0,3]+x[1,2]+x[2,1]+x[3,0]==4: return True
return False
for i in range(0,T):
fieldX.fill(0)
fieldO.fill(0)
counter=0
empty=False
while counter<4:
sline=sys.stdin.readline().strip()
if len(sline)<4:continue
for j in range(0,4):
if sline[j]=='X' or sline[j]=='T':
fieldX[counter,j]=1
if sline[j]=='O' or sline[j]=='T':
fieldO[counter,j]=1
continue
if sline[j]=='.':
empty=True
counter+=1
if solve(fieldX):
print "Case #%i: X won" % (i+1,)
continue
if solve(fieldO):
print "Case #%i: O won" % (i+1,)
continue
if empty:
print "Case #%i: Game has not completed" % (i+1,)
continue
print "Case #%i: Draw" % (i+1,)
|
[
"[email protected]"
] | |
68a41b87ce93babc8cc9ff31ee191ed3942d9e11
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fv/afabricextconnp.py
|
6c8a4c7ee71ed4b11370d170b02722427f256c7d
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,912 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AFabricExtConnP(Mo):
meta = ClassMeta("cobra.model.fv.AFabricExtConnP")
meta.isAbstract = True
meta.moClassName = "fvAFabricExtConnP"
meta.moClassName = "fvAFabricExtConnP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Intrasite/Intersite Profile"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnP")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnPDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 21395, PropCategory.REGULAR)
prop.label = "Fabric ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rt", "rt", 21396, PropCategory.REGULAR)
prop.label = "Global EVPN Route Target"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rt", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
02d48bd2c223636e35624a38576f0a5412d9f2f8
|
2e06c0df26e3fbccc2af052301e8b486fd17d84c
|
/Line3D/line3d_rectangular_projection.py
|
66b986387a063fbb644ba6817cebe039bc9a5c45
|
[
"MIT"
] |
permissive
|
d8ye/pyecharts-gallery
|
54f44c0a78d88608ae83a678c105424113866f25
|
07995a7f2600983282eb37b1e94da9af2f1a25b5
|
refs/heads/master
| 2020-07-03T13:04:42.093830 | 2019-08-13T04:14:13 | 2019-08-13T04:14:13 | 201,913,794 | 0 | 0 |
MIT
| 2019-08-12T11:04:10 | 2019-08-12T11:04:09 | null |
UTF-8
|
Python
| false | false | 1,458 |
py
|
import math
import pyecharts.options as opts
from pyecharts.charts import Line3D
"""
Gallery 使用 pyecharts 1.1.0
参考地址: https://echarts.baidu.com/examples/editor.html?c=line3d-orthographic&gl=1
目前无法实现的功能:
1、
"""
week_en = "Saturday Friday Thursday Wednesday Tuesday Monday Sunday".split()
clock = (
"12a 1a 2a 3a 4a 5a 6a 7a 8a 9a 10a 11a 12p "
"1p 2p 3p 4p 5p 6p 7p 8p 9p 10p 11p".split()
)
data = []
for t in range(0, 25000):
_t = t / 1000
x = (1 + 0.25 * math.cos(75 * _t)) * math.cos(_t)
y = (1 + 0.25 * math.cos(75 * _t)) * math.sin(_t)
z = _t + 2.0 * math.sin(75 * _t)
data.append([x, y, z])
(
Line3D()
.add(
"",
data,
xaxis3d_opts=opts.Axis3DOpts(data=clock, type_="value"),
yaxis3d_opts=opts.Axis3DOpts(data=week_en, type_="value"),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(
dimension=2,
max_=30,
min_=0,
range_color=[
"#313695",
"#4575b4",
"#74add1",
"#abd9e9",
"#e0f3f8",
"#ffffbf",
"#fee090",
"#fdae61",
"#f46d43",
"#d73027",
"#a50026",
],
)
)
.render("line3d_rectangular_projection.html")
)
|
[
"[email protected]"
] | |
26c28d596fb8b6712cc4ba60a88c42f88de634df
|
959d6f7027a965f609a0be2885960b63c6dc97bc
|
/facebook/likers/steps.py
|
96cfda296f3d581fbb757246dd37896ae0d2495a
|
[] |
no_license
|
ameetbora/facebook-comments
|
0bf57f8e5b4a8ef7804aa999fa86d9913b7ee99c
|
7649c808164f978b147a4410795eadf374e3d3dc
|
refs/heads/master
| 2020-04-12T14:39:23.733965 | 2018-10-30T06:17:42 | 2018-10-30T06:17:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,905 |
py
|
import time
def login(driver, user_email: str, user_password: str):
driver.get("https://www.facebook.com")
email = driver.find_element_by_id("email")
password = driver.find_element_by_id("pass")
submit = driver.find_element_by_id("loginbutton")
email.send_keys(user_email)
password.send_keys(user_password)
submit.click()
def keep_scrolling(driver, times: int = 99999999999):
while times > 0:
times -= 1
results_end_notifiers = driver.find_elements_by_xpath("//div[text()='End of results']")
if len(results_end_notifiers) > 0:
print("Looks like we found all the likers.")
return True
else:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight + 1000);")
time.sleep(3)
def get_likers(driver):
likers = []
links = [link.get_attribute("href") for link in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]")]
names = [name.text for name in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]/div/div")]
if len(names) > 0 and len(names) == len(links):
for i in range(len(links)):
likers.append({
"name": names[i],
"link": links[i],
})
else:
print("The names And links didn't match, something is wrong with our xpathing.")
return likers
def get_next_likers(driver):
next_page_link = driver.find_elements_by_xpath("//div[@id='see_more_pager']/a")
if len(next_page_link) > 0:
next_page_link[0].click()
return True
return False
def get_facebook_warning(driver):
warning = driver.find_elements_by_xpath("//div[contains(text(), 'It looks like you’re using this feature in a way it wasn’t meant to be used.')]")
return len(warning) > 0
|
[
"[email protected]"
] | |
3e92f309ef61231db2fa56989217b3ba6eb86326
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/swagger_client/models/conflict_error.py
|
a40924bde282c121008b4b6801a38516e4f056f1
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,439 |
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class ConflictError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'ConflictErrorCode'
}
attribute_map = {
'code': 'code'
}
def __init__(self, code=None, _configuration=None): # noqa: E501
"""ConflictError - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._code = None
self.discriminator = None
if code is not None:
self.code = code
@property
def code(self):
"""Gets the code of this ConflictError. # noqa: E501
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:return: The code of this ConflictError. # noqa: E501
:rtype: ConflictErrorCode
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ConflictError.
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:param code: The code of this ConflictError. # noqa: E501
:type: ConflictErrorCode
"""
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConflictError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConflictError):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConflictError):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
6085a2cfbcde968d0ed001eb7a49d5bebfa6aa75
|
817a97680e85142634c3e7c66a3e0a0e5eceaffd
|
/sma_cross_vol.py
|
d1c0f856afe1d5f0f00c0bc6834541cf33e6a4d0
|
[] |
no_license
|
johndpope/algotrading
|
4cca78db99af8fef0d1fc57aac3104bd0e8a895c
|
f2f527f85aad6cce928f1c2e9794f9217efcce93
|
refs/heads/master
| 2021-06-24T15:24:53.136691 | 2017-08-27T16:13:55 | 2017-08-27T16:13:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,488 |
py
|
from datetime import datetime, timedelta
import backtrader as bt
class SMACrossVolumeStrategy(bt.SignalStrategy):
params = dict(
diff=0.01,
limit=0.005,
limdays=10,
limdays2=1000,
maperiod_small=30,
maperiod_big=30,
)
def __init__(self):
self.order = None
self.dataclose = self.datas[0].close
self.datavol = self.datas[0].volume
self.sma_small = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_small
)
self.sma_big = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_big
)
def log(self, txt, dt=None, doprint=False):
'''Logging function fot this strategy'''
if doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def stop(self):
self.log('(MA Period Small: %2d | MA Period Big: %2d) Ending Value %.2f' %
(self.p.maperiod_small, self.p.maperiod_big, self.broker.getvalue()), doprint=True)
def next(self):
if self.order:
return
if not self.position:
if self.sma_small[0] > self.sma_big[0] and self.sma_small[-1] < self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.buy()
else:
if self.sma_small[0] < self.sma_big[0] and self.sma_small[-1] > self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.sell()
cerebro = bt.Cerebro()
strats = cerebro.optstrategy(
SMACrossVolumeStrategy,
maperiod_small=range(2, 10),
maperiod_big=range(10, 20),
)
data = bt.feeds.GenericCSVData(
dataname='eur_usd_1d.csv',
separator=',',
dtformat=('%Y%m%d'),
tmformat=('%H%M00'),
datetime=0,
time=1,
open=2,
high=3,
low=4,
close=5,
volume=6,
openinterest=-1
)
# data = bt.feeds.YahooFinanceData(dataname='YHOO', fromdate=datetime(2011, 1, 1),
# todate=datetime(2012, 12, 31))
cerebro.adddata(data)
cerebro.addsizer(bt.sizers.FixedSize, stake=50)
# cerebro.addstrategy(SimpleSMAStrategy)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# cerebro.run()
# cerebro.plot()
|
[
"[email protected]"
] | |
40d4849bbc2eaf4a84128ba8c1fdc12a9548dde1
|
16450d59c820298f8803fd40a1ffa2dd5887e103
|
/baekjoon/5622.py
|
d81981f661aa57dc341a4a724cc55527ebc3158a
|
[] |
no_license
|
egyeasy/TIL_public
|
f78c11f81d159eedb420f5fa177c05d310c4a039
|
e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38
|
refs/heads/master
| 2021-06-21T01:22:16.516777 | 2021-02-02T13:16:21 | 2021-02-02T13:16:21 | 167,803,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,529 |
py
|
"""
상근이의 할머니는 아래 그림과 같이 오래된 다이얼 전화기를 사용한다.
전화를 걸고 싶은 번호가 있다면, 숫자를 하나를 누른 다음에 금속 핀이 있는 곳 까지 시계방향으로 돌려야 한다. 숫자를 하나 누르면 다이얼이 처음 위치로 돌아가고, 다음 숫자를 누르려면 다이얼을 처음 위치에서 다시 돌려야 한다.
숫자 1을 걸려면 총 2초가 필요하다. 1보다 큰 수를 거는데 걸리는 시간은 이보다 더 걸리며, 한 칸 옆에 있는 숫자를 걸기 위해선 1초씩 더 걸린다.
상근이의 할머니는 전화 번호를 각 숫자에 해당하는 문자로 외운다. 즉, 어떤 단어를 걸 때, 각 알파벳에 해당하는 숫자를 걸면 된다. 예를 들어, UNUCIC는 868242와 같다.
할머니가 외운 단어가 주어졌을 때, 이 전화를 걸기 위해서 필요한 시간을 구하는 프로그램을 작성하시오.
> 입력
첫째 줄에 알파벳 대문자로 이루어진 단어가 주어진다. 단어는 2글자~15글자로 이루어져 있다.
UNUCIC
> 출력
첫째 줄에 다이얼을 걸기 위해서 필요한 시간을 출력한다.
36
"""
num_list = [2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9]
word = input()
result = 0
for i in word:
idx = ord(i) - 65
result += num_list[idx] + 1
print(result)
# 생각
# 1. 숫자 - 알파벳 간 규칙을 최대한 찾아내서 적은 노가다로 짜는 방법도 있을 듯.
|
[
"[email protected]"
] | |
a3ee575e7318f6ded972fa7288d9b79b53f4f0e7
|
302442c32bacca6cde69184d3f2d7529361e4f3c
|
/cidtrsend-all/stage2-model/pytz/zoneinfo/Navajo.py
|
1b27ae20abe14d05ef0286e1b3a242389516aafd
|
[] |
no_license
|
fucknoob/WebSemantic
|
580b85563072b1c9cc1fc8755f4b09dda5a14b03
|
f2b4584a994e00e76caccce167eb04ea61afa3e0
|
refs/heads/master
| 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,131 |
py
|
'''tzinfo timezone information for Navajo.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Navajo(DstTzInfo):
'''Navajo timezone definition. See datetime.tzinfo for details'''
zone = 'Navajo'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,9,0,0),
d(1918,10,27,8,0,0),
d(1919,3,30,9,0,0),
d(1919,10,26,8,0,0),
d(1920,3,28,9,0,0),
d(1920,10,31,8,0,0),
d(1921,3,27,9,0,0),
d(1921,5,22,8,0,0),
d(1942,2,9,9,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,8,0,0),
d(1965,4,25,9,0,0),
d(1965,10,31,8,0,0),
d(1966,4,24,9,0,0),
d(1966,10,30,8,0,0),
d(1967,4,30,9,0,0),
d(1967,10,29,8,0,0),
d(1968,4,28,9,0,0),
d(1968,10,27,8,0,0),
d(1969,4,27,9,0,0),
d(1969,10,26,8,0,0),
d(1970,4,26,9,0,0),
d(1970,10,25,8,0,0),
d(1971,4,25,9,0,0),
d(1971,10,31,8,0,0),
d(1972,4,30,9,0,0),
d(1972,10,29,8,0,0),
d(1973,4,29,9,0,0),
d(1973,10,28,8,0,0),
d(1974,1,6,9,0,0),
d(1974,10,27,8,0,0),
d(1975,2,23,9,0,0),
d(1975,10,26,8,0,0),
d(1976,4,25,9,0,0),
d(1976,10,31,8,0,0),
d(1977,4,24,9,0,0),
d(1977,10,30,8,0,0),
d(1978,4,30,9,0,0),
d(1978,10,29,8,0,0),
d(1979,4,29,9,0,0),
d(1979,10,28,8,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MWT'),
i(-21600,3600,'MPT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Navajo = Navajo()
|
[
"[email protected]"
] | |
3729d9da023e6a5a84cc1c3bac5ff6e4ef5f87db
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/redis/redis/sentinel.pyi
|
ea13ae681287fa1353217d2e6d217fe0898b122b
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 2,820 |
pyi
|
from collections.abc import Iterable, Iterator
from typing import Any, TypeVar, overload
from typing_extensions import Literal, TypeAlias
from redis.client import Redis
from redis.commands.sentinel import SentinelCommands
from redis.connection import Connection, ConnectionPool, SSLConnection
from redis.exceptions import ConnectionError
_RedisT = TypeVar("_RedisT", bound=Redis[Any])
_AddressAndPort: TypeAlias = tuple[str, int]
_SentinelState: TypeAlias = dict[str, Any] # TODO: this can be a TypedDict
class MasterNotFoundError(ConnectionError): ...
class SlaveNotFoundError(ConnectionError): ...
class SentinelManagedConnection(Connection):
connection_pool: SentinelConnectionPool
def __init__(self, **kwargs) -> None: ...
def connect_to(self, address: _AddressAndPort) -> None: ...
def connect(self) -> None: ...
# The result can be either `str | bytes` or `list[str | bytes]`
def read_response(self, disable_decoding: bool = ...) -> Any: ...
class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection): ...
class SentinelConnectionPool(ConnectionPool):
is_master: bool
check_connection: bool
service_name: str
sentinel_manager: Sentinel
def __init__(self, service_name: str, sentinel_manager: Sentinel, **kwargs) -> None: ...
def reset(self) -> None: ...
def owns_connection(self, connection: Connection) -> bool: ...
def get_master_address(self) -> _AddressAndPort: ...
def rotate_slaves(self) -> Iterator[_AddressAndPort]: ...
class Sentinel(SentinelCommands):
sentinel_kwargs: dict[str, Any]
sentinels: list[Redis[Any]]
min_other_sentinels: int
connection_kwargs: dict[str, Any]
def __init__(
self,
sentinels: Iterable[_AddressAndPort],
min_other_sentinels: int = ...,
sentinel_kwargs: dict[str, Any] | None = ...,
**connection_kwargs,
) -> None: ...
def check_master_state(self, state: _SentinelState, service_name: str) -> bool: ...
def discover_master(self, service_name: str) -> _AddressAndPort: ...
def filter_slaves(self, slaves: Iterable[_SentinelState]) -> list[_AddressAndPort]: ...
def discover_slaves(self, service_name: str) -> list[_AddressAndPort]: ...
@overload
def master_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def master_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
@overload
def slave_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def slave_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
def execute_command(self, *args, **kwargs) -> Literal[True]: ...
|
[
"[email protected]"
] | |
d58cb7de2dbd4f821d0407b7ef618003f1f9fc9b
|
cb4db25a0b13f058f1a31b38d80d76a118d1e2dc
|
/venv/lib/python3.6/site-packages/google/api/usage_pb2.py
|
efe4f7945d109abc9613cb147c11bb4917bdf030
|
[
"MIT"
] |
permissive
|
Hackaton-Dragons/Never-Boils
|
73df2b65f54a77d961ce53dea350b7d2a4261154
|
2d43e6e07fb18409d5a964f44f481d28d2352531
|
refs/heads/master
| 2020-03-09T20:27:54.554616 | 2018-10-08T05:52:33 | 2018-10-08T05:52:33 | 128,985,616 | 1 | 0 |
MIT
| 2018-04-15T13:32:45 | 2018-04-10T19:35:32 |
Python
|
UTF-8
|
Python
| false | true | 4,244 |
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/usage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/usage.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n\x16google/api/usage.proto\x12\ngoogle.api\x1a\x1cgoogle/api/annotations.proto\"C\n\x05Usage\x12\x14\n\x0crequirements\x18\x01 \x03(\t\x12$\n\x05rules\x18\x06 \x03(\x0b\x32\x15.google.api.UsageRule\"?\n\tUsageRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12 \n\x18\x61llow_unregistered_calls\x18\x02 \x01(\x08\x42%\n\x0e\x63om.google.apiB\nUsageProtoP\x01\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_USAGE = _descriptor.Descriptor(
name='Usage',
full_name='google.api.Usage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requirements', full_name='google.api.Usage.requirements', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.Usage.rules', index=1,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=135,
)
_USAGERULE = _descriptor.Descriptor(
name='UsageRule',
full_name='google.api.UsageRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.UsageRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_unregistered_calls', full_name='google.api.UsageRule.allow_unregistered_calls', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=200,
)
_USAGE.fields_by_name['rules'].message_type = _USAGERULE
DESCRIPTOR.message_types_by_name['Usage'] = _USAGE
DESCRIPTOR.message_types_by_name['UsageRule'] = _USAGERULE
Usage = _reflection.GeneratedProtocolMessageType('Usage', (_message.Message,), dict(
DESCRIPTOR = _USAGE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.Usage)
))
_sym_db.RegisterMessage(Usage)
UsageRule = _reflection.GeneratedProtocolMessageType('UsageRule', (_message.Message,), dict(
DESCRIPTOR = _USAGERULE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.UsageRule)
))
_sym_db.RegisterMessage(UsageRule)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\nUsageProtoP\001\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
df11eb12d02f73346e7096e6039400e85381a2bb
|
ab5ef28065b0ad3f8d86fc894be569074a4569ea
|
/mirari/CRYE/migrations/0028_auto_20190406_1344.py
|
99b9026478cf2c6aaf96926be89b77b7d4bbecdd
|
[
"MIT"
] |
permissive
|
gcastellan0s/mirariapp
|
1b30dce3ac2ee56945951f340691d39494b55e95
|
24a9db06d10f96c894d817ef7ccfeec2a25788b7
|
refs/heads/master
| 2023-01-22T22:21:30.558809 | 2020-09-25T22:37:24 | 2020-09-25T22:37:24 | 148,203,907 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 522 |
py
|
# Generated by Django 2.0.5 on 2019-04-06 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRYE', '0027_auto_20190406_1331'),
]
operations = [
migrations.AlterField(
model_name='walletcredit',
name='walletcredit_tipo',
field=models.CharField(choices=[('ARRENDAMIENTO', 'ARRENDAMIENTO'), ('CREDITO', 'CREDITO')], default='CREDITO', max_length=250, verbose_name='Tipo de cartera'),
),
]
|
[
"[email protected]"
] | |
8df4144788164a6ec89107cc0ade23a41752bfe4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_scheduled.py
|
8a085638d8ee5bfb1423f7fbfc4217347f5939be
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
from xai.brain.wordbase.verbs._schedule import _SCHEDULE
#calss header
class _SCHEDULED(_SCHEDULE, ):
def __init__(self,):
_SCHEDULE.__init__(self)
self.name = "SCHEDULED"
self.specie = 'verbs'
self.basic = "schedule"
self.jsondata = {}
|
[
"[email protected]"
] | |
e94f6a0ef46d77df9c8d3ece79519b0d26d16bf7
|
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
|
/Python-OOP/Exam-Preparation/16-Aug-2020/project/software/light_software.py
|
6182deaf8edbfaa898d0623ff12527b07c73dd0b
|
[] |
no_license
|
Sheko1/SoftUni
|
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
|
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
|
refs/heads/main
| 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
from .software import Software
class LightSoftware(Software):
CAPACITY_INCREASE = 0.5
MEMORY_DECREASE = 0.5
def __init__(self, name: str, capacity_consumption: int, memory_consumption: int):
super().__init__(name, type="Light", capacity_consumption=int(
capacity_consumption + (capacity_consumption * self.CAPACITY_INCREASE)),
memory_consumption=int(memory_consumption * self.MEMORY_DECREASE))
|
[
"[email protected]"
] | |
dc06bceff161ff58ede64f0c6360bacc5fdbeee6
|
6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6
|
/venv/lib/python3.6/site-packages/pysnmp/proto/api/__init__.py
|
d742ecc76dec1386047d3cae28b450a5edff0f52
|
[
"MIT"
] |
permissive
|
PitCoder/NetworkMonitor
|
b47d481323f26f89be120c27f614f2a17dc9c483
|
36420ae48d2b04d2cc3f13d60d82f179ae7454f3
|
refs/heads/master
| 2020-04-25T11:48:08.718862 | 2019-03-19T06:19:40 | 2019-03-19T06:19:40 | 172,757,390 | 2 | 0 |
MIT
| 2019-03-15T06:07:27 | 2019-02-26T17:26:06 |
Python
|
UTF-8
|
Python
| false | false | 368 |
py
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.proto.api import v1, v2c, verdec
# Protocol versions
protoVersion1 = 0
protoVersion2c = 1
protoModules = {protoVersion1: v1, protoVersion2c: v2c}
decodeMessageVersion = verdec.decodeMessageVersion
|
[
"[email protected]"
] | |
46c5e2b2ed08ba91155f44d266097399816d6ca5
|
dcce56815dca2b18039e392053376636505ce672
|
/dumpscripts/atexit_simple.py
|
cadf7e713067bfdb1023e1a1054adc7c45bab915
|
[] |
no_license
|
robertopauletto/PyMOTW-it_3.0
|
28ff05d8aeccd61ade7d4107a971d9d2576fb579
|
c725df4a2aa2e799a969e90c64898f08b7eaad7d
|
refs/heads/master
| 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
# atexit_simple.py
import atexit
def all_done():
print('all_done()')
print('In registrazione')
atexit.register(all_done)
print('Registrato')
|
[
"[email protected]"
] | |
aac4db2e2f613a796ff33628461587fd26159cfb
|
db4d56e63c63cd577c3871349ffa2a7c39c80edc
|
/3.WEB/cxr_project/cxr_project/wsgi.py
|
b576c8906ada1a87940826b1a379206b6c76b16d
|
[] |
no_license
|
Lagom92/CXR_AI
|
33014b7471775e776ed51bfeb88128fd7ca4ce6f
|
bb4bbaf3fc984938f153bf6b58ed99324f779070
|
refs/heads/master
| 2023-06-09T11:20:57.613207 | 2021-06-20T11:34:21 | 2021-06-20T11:34:21 | 293,966,064 | 0 | 0 | null | 2021-06-18T00:09:48 | 2020-09-09T01:08:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 399 |
py
|
"""
WSGI config for cxr_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cxr_project.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
4f0a6cf506689d8331fef6df1a76b147b1ff06ad
|
82b495a208ebdeb71314961021fbfe767de57820
|
/chapter-13/sample02.py
|
5d7d05833306dc085a1573bee83e46cd05ba6b89
|
[
"MIT"
] |
permissive
|
krastin/pp-cs3.0
|
7c860794332e598aa74278972d5daa16853094f6
|
502be9aac2d84215db176864e443c219e5e26591
|
refs/heads/master
| 2020-05-28T02:23:58.131428 | 2019-11-13T13:06:08 | 2019-11-13T13:06:08 | 188,853,205 | 0 | 0 |
MIT
| 2019-11-13T13:06:09 | 2019-05-27T13:56:41 |
Python
|
UTF-8
|
Python
| false | false | 1,410 |
py
|
import time
from sample01 import linear_search_while
from sample01 import linear_search_for
from sample01 import linear_search_sentinel
from typing import Callable, Any
def time_it(search: Callable[[list, Any], Any], L: list, v: Any) -> float:
"""Time how long it takes to run function search to find
value v in list L.
"""
t1 = time.perf_counter()
search(L, v)
t2 = time.perf_counter()
return (t2 - t1) * 1000.0
def print_times(v: Any, L: list) -> None:
"""Print the number of milliseconds it takes for linear_search(v, L)
to run for list.index, the while loop linear search, the for loop
linear search, and sentinel search.
"""
# Get list.index's running time.
t1 = time.perf_counter()
L.index(v)
t2 = time.perf_counter()
index_time = (t2 - t1) * 1000.0
# Get the other three running times.
while_time = time_it(linear_search_while, L, v)
for_time = time_it(linear_search_for, L, v)
sentinel_time = time_it(linear_search_sentinel, L, v)
print("{0}\t\t{1:.2f}\t{2:.2f}\t{3:.2f}\t{4:.2f}".format(
v, while_time, for_time, sentinel_time, index_time))
L = list(range(10000001)) # A list with just over ten million values
print_times(10, L) # How fast is it to search near the beginning?
print_times(5000000, L) # How fast is it to search near the middle?
print_times(10000000, L) # How fast is it to search near the end?
|
[
"[email protected]"
] | |
f2515b3ea9d81b413d7f16c3fd76965b099723a9
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_5/ar_12/test_artificial_1024_Fisher_Lag1Trend_5_12_0.py
|
cdbf8396fc2e08ebfbdd54ac8c3f8c8a7b230896
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 262 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"[email protected]"
] | |
5e50c90e36940a756c0066a4f1a0415e5c585153
|
bc2a96e8b529b0c750f6bc1d0424300af9743904
|
/acapy_client/models/v20_pres_ex_record_list.py
|
637f5f3594379b6fb12a0376417eca62ccdfbc8b
|
[
"Apache-2.0"
] |
permissive
|
TimoGlastra/acapy-client
|
d091fd67c97a57f2b3462353459780281de51281
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
refs/heads/main
| 2023-06-29T22:45:07.541728 | 2021-08-03T15:54:48 | 2021-08-03T15:54:48 | 396,015,854 | 1 | 0 |
Apache-2.0
| 2021-08-14T13:22:28 | 2021-08-14T13:22:27 | null |
UTF-8
|
Python
| false | false | 1,983 |
py
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.v20_pres_ex_record import V20PresExRecord
from ..types import UNSET, Unset
T = TypeVar("T", bound="V20PresExRecordList")
@attr.s(auto_attribs=True)
class V20PresExRecordList:
""" """
results: Union[Unset, List[V20PresExRecord]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
results: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.results, Unset):
results = []
for results_item_data in self.results:
results_item = results_item_data.to_dict()
results.append(results_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if results is not UNSET:
field_dict["results"] = results
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
results = []
_results = d.pop("results", UNSET)
for results_item_data in _results or []:
results_item = V20PresExRecord.from_dict(results_item_data)
results.append(results_item)
v20_pres_ex_record_list = cls(
results=results,
)
v20_pres_ex_record_list.additional_properties = d
return v20_pres_ex_record_list
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
[
"[email protected]"
] | |
2c1fc8d25010246935865616a7f2d77dbf36a205
|
ff739149fb1091fcd090b5e68ab4b98d9fec9262
|
/tests/unit/test_sitemap.py
|
7f58445883b0626a64a1c800b55009991b5a7c33
|
[
"MIT"
] |
permissive
|
zhuoranmusic/dash-docs
|
dcdab8a5543f6f3f10cb20d196148969bfe01943
|
3518869b195a7827fe661a90f9a2054c31680d44
|
refs/heads/master
| 2022-04-18T17:37:44.647847 | 2020-04-20T18:13:14 | 2020-04-20T18:13:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
import pytest
import sys
from generate_sitemap import create_sitemap
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_sitemap_is_updated():
with open('dash_docs/assets/sitemap.xml', 'r') as f:
saved_sitemap = f.read()
assert create_sitemap() == saved_sitemap
|
[
"[email protected]"
] | |
1a41bd25d395783d808bbe7baa3ab53534669a7e
|
f5a82f7b2695ed08c9f7432013889590ed9cd1d0
|
/healthpoint/decorators.py
|
17bb337812dcdbf86156385ff894f6a57f2c31fe
|
[
"MIT"
] |
permissive
|
lordoftheflies/django-healthpoint
|
bb717f3a4f9a96b9d81f10fbb45e6982c020e93b
|
aaf8c77150b2ae5bf7d3f9050841b885e8cda17a
|
refs/heads/master
| 2020-08-03T02:55:15.244656 | 2019-09-18T16:13:10 | 2019-09-18T16:13:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 759 |
py
|
from functools import wraps
from healthpoint.registry import register_health_check
def health_check(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
if isinstance(result, bool):
success, detail = result, 'OK' if result else 'ERROR'
elif isinstance(result, tuple) and len(result) == 2:
success, detail = result
else:
raise ValueError(
'Your @health_check must return'
' a `bool`, or a tuple of (`bool`, `detail`)')
except Exception as e:
success, detail = False, str(e)
return success, detail
register_health_check(wrapper)
return wrapper
|
[
"[email protected]"
] | |
c8ce36e7f047b623defb9b3a946c5a7cb799aa02
|
be61a9f30274514857ea34297719157f1e5b8447
|
/fhir/resources/DSTU2/age.py
|
9975cdbeda716d349901880fad136791d72da6f6
|
[
"BSD-3-Clause"
] |
permissive
|
jwygoda/fhir.resources
|
ceff3a620100d2e875136b86d3e82816c0e60a33
|
5053565570d1ca992d9971d20db813c53fd350b9
|
refs/heads/master
| 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 951 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Age) on 2019-05-14.
# 2019, SMART Health IT.
from . import quantity
class Age(quantity.Quantity):
""" A duration (length of time) with a UCUM code.
There SHALL be a code if there is a value and it SHALL be an expression of
time. If system is present, it SHALL be UCUM. If value is present, it
SHALL be positive.
"""
resource_name = "Age"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
super(Age, self).__init__(jsondict=jsondict, strict=strict)
|
[
"[email protected]"
] | |
b3ae4dd8d3b6d3f3f5f2d0f12474ab0ea469bd94
|
7ad19e854135977ee5b789d7c9bdd39d67ec9ea4
|
/members/amit/clf/audio_processing.py
|
571343202183ddc05c2774531c7e5fd1d3a26acd
|
[
"MIT"
] |
permissive
|
Leofltt/rg_sound_generation
|
1b4d522507bf06247247f3ef929c8d0b93015e61
|
8e79b4d9dce028def43284f80521a2ec61d0066c
|
refs/heads/main
| 2023-05-02T19:53:23.645982 | 2021-05-22T16:09:54 | 2021-05-22T16:09:54 | 369,842,561 | 0 | 0 |
MIT
| 2021-05-22T15:27:28 | 2021-05-22T15:27:27 | null |
UTF-8
|
Python
| false | false | 1,248 |
py
|
import librosa
import numpy as np
from typing import Dict
def get_mel_spectrogram(audio: np.ndarray, params: Dict) -> np.ndarray:
mel_spec = librosa.feature.melspectrogram(
audio,
sr=params.get("sample_rate"),
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len"),
n_mels=params.get("n_mels")
)
return librosa.power_to_db(mel_spec)
def get_hpr(audio: np.ndarray, params: Dict) -> (np.ndarray, np.ndarray, np.ndarray):
D = librosa.stft(
audio,
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len")
)
H, P = librosa.decompose.hpss(D)
return H, P, D - (H + P)
def get_features(file_path: str, params: Dict):
audio, _ = librosa.load(file_path, sr=params.get("sample_rate"), mono=True)
audio = np.squeeze(audio)[:params.get("sample_rate") * params.get("clip_audio_at")]
h, p, r = get_hpr(audio, params)
h, p, r = np.abs(h).mean(axis=-1), np.abs(p).mean(axis=-1), np.abs(r).mean(axis=-1)
dim = h.shape[0]
hpss = np.concatenate([h, p, r], axis=-1)
hpss = np.reshape(hpss, (dim * 3, 1))
spec = get_mel_spectrogram(audio, params)
spec = np.clip(spec, params.get("clip_at"), np.max(spec))
return spec, hpss
|
[
"[email protected]"
] | |
5bdcae03801bc9263730f63678c10f2052be98f5
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/py全排列4.py
|
b1e46e018cad4170fe7d76313c34805ed586b0ef
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,133 |
py
|
# 列表的全排列的实现。
# 全排列算法
class Solution(object):
def permutations(self, nums):
if nums is None:
return []
res = []
def helper(start):
if start == len(nums):
res.append(nums[:])
for i in range(start, len(nums)):
nums[i], nums[start] = nums[start], nums[i]
helper(start + 1)
nums[i], nums[start] = nums[start], nums[i]
helper(0)
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3]
print(s.permutations(list2))
# 组合算法的实现
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return []
res = []
N = len(nums)
def helper(idx, temp_list):
res.append(temp_list)
for i in range(idx, N):
helper(i + 1, temp_list + [nums[i]])
helper(0, [])
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3,]
print(s.subsets(list2))
|
[
"[email protected]"
] | |
a1c16962e511343f6654c076de283096891c70f9
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/hard/3_1.py
|
5e5a923d3f652d3bb692c335928a84af29e9c3c5
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,994 |
py
|
GUI to Shutdown, Restart and Logout from the PC using Python
In this article, we are going to write a python script to shut down or Restart
or Logout your system and bind it with GUI Application.
The **OS module** in Python provides functions for interacting with the
operating system. OS is an inbuilt library python.
**Syntax :**
> **For shutdown your system :** os.system(“shutdown /s /t 1”)
>
> **For restart your system :** os.system(“shutdown /r /t 1”)
>
>
>
>
>
>
>
> **For Logout your system :** os.system(“shutdown -l”)
**Implementation GUI Application using Tkinter:**
## Python3
__
__
__
__
__
__
__
# import modules
from tkinter import *
import os
# user define funtion
def shutdown():
return os.system("shutdown /s /t 1")
def restart():
return os.system("shutdown /r /t 1")
def logout():
return os.system("shutdown -l")
# tkinter object
master = Tk()
# background set to grey
master.configure(bg='light grey')
# creating a button using the widget
# Buttons that will call the submit function
Button(master, text="Shutdown",
command=shutdown).grid(row=0)
Button(master, text="Restart", command=restart).grid(row=1)
Button(master, text="Log out", command=logout).grid(row=2)
mainloop()
---
__
__
**Output:**

**Note:** _Please ensure that you save and close all the_ programs _before
running this code on the IDLE, as this program will immediately shutdown and
restart your computer._
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] | |
13a4905ae7077bf34c1cfcef8d53ed482623a436
|
2ff4a38b86dfee4115c0a9280e95ff042d36f8bd
|
/programmers/lv2/emergency_boat_lv2.py
|
4e7c867c41c62802da3a4d88574770e17a8f9e71
|
[] |
no_license
|
ohtaehyun/algo_study
|
5e0adc6d18a186d959f0ad191af0d916f5c99793
|
87ac40b89b5ddbba09e8b3dd86ed0a3defc0590b
|
refs/heads/master
| 2023-02-24T13:52:37.323111 | 2021-01-25T09:32:55 | 2021-01-25T09:32:55 | 284,712,413 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 385 |
py
|
from collections import deque
def solution(people, limit):
answer = 0
people.sort()
p =deque(list(people))
while p :
weight = p.popleft()
remain_weight = limit - weight
while p :
w = p.pop()
if w <= remain_weight:
break
else :
answer += 1
answer += 1
return answer
|
[
"[email protected]"
] | |
dd1ed5bd20b5a60fd53bd43317230eb05bda02ff
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_267/ch80_2020_06_17_20_39_13_688401.py
|
d534bc9a3759e6fe0eb67eb6874f60c857066930
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
def interseccao_chaves(dicio1, dicio2):
lista_chaves = []
for i in dicio1.keys() and l in dicio2.keys():
if i == l:
lista_chaves.append(i)
return lista_chaves
|
[
"[email protected]"
] | |
e1d364e8012b8e88a5aa8ea7ea24b49307bae086
|
5064d0a44fb1e1af0205ae0bfa711bdbf2a33cc6
|
/test/main_json.py
|
495de07bbef5c2a94bb17969b852bb609d084a3b
|
[] |
no_license
|
lmxwade/DSCI_551
|
4e157ae87f370a5e0195ea64c1afb2cf385c2418
|
eecdc9222ae0e3441c167525609dfd54ed4134a8
|
refs/heads/master
| 2023-02-10T15:48:38.755414 | 2020-07-04T16:24:35 | 2020-07-04T16:24:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,092 |
py
|
#
__author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '9/6/2019 3:45 PM'
import json
if __name__ == "__main__":
data = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
print(type(data)) # <class 'list'>
# json.dumps 用于将 Python 对象编码成 JSON 字符串
jsonStr = json.dumps(data)
print(jsonStr) # <class 'str'>
jsonData = '{"a":1,"b":2,"c":3,"d":4,"e":{"1":23}}'
dict_obj = json.loads(jsonData)
print(dict_obj)
print(type(dict_obj))
# keys must be str, int, float, bool or None, not tuple
# print(json.dumps({(1,2): 3}))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
#print(json.loads("{'1': 3}"))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{(1): 3}'))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{1: 3}'))
print(json.loads('{"1": 3}'))
import urllib.parse
# str = 'l.a.a.c.-8th'
str = "HOUSE OF CURRY"
list = ' '.join(str.lower().split()).split(' ')
# list = str.lower().split(' ')
print(list)
|
[
"[email protected]"
] | |
df2687be95865187cd182c14c35da780e63fbbda
|
abc1a497c41ddd8669c8c41da18af65d08ca54e4
|
/AnalysisF/recon_wf/1ns/make_H1ns.py
|
106466fe5adefdd90b89f9c759c167aade3faeb5
|
[] |
no_license
|
gerakolt/direxeno_privet
|
fcef5e3b654720e277c48935acc168472dfd8ecc
|
75e88fb1ed44fce32fce02677f64106121259f6d
|
refs/heads/master
| 2022-12-20T22:01:30.825891 | 2020-10-04T06:01:07 | 2020-10-04T06:01:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import sys
from scipy.optimize import minimize
from scipy.stats import poisson, binom
from scipy.special import erf as erf
pmts=[0,1,4,7,8,14]
path='/home/gerak/Desktop/DireXeno/190803/Cs137B/EventRecon/'
rec=np.load(path+'recon1ns98999.npz')['rec']
blw_cut=15
init_cut=20
chi2_cut=5000
left=700
right=1000
rec=rec[np.all(rec['init_wf']>20, axis=1)]
rec=rec[np.sqrt(np.sum(rec['blw']**2, axis=1))<blw_cut]
rec=rec[np.sqrt(np.sum(rec['chi2']**2, axis=1))<chi2_cut]
init=np.sum(np.sum(rec['h'][:,:10,:], axis=2), axis=1)
full=np.sum(np.sum(rec['h'], axis=2), axis=1)
rec=rec[init/full<0.5]
up=np.sum(rec['h'][:,:100,0], axis=1)+np.sum(rec['h'][:,:100,0], axis=1)
dn=np.sum(rec['h'][:,:100,-1], axis=1)+np.sum(rec['h'][:,:100,-2], axis=1)+np.sum(rec['h'][:,:100,-3], axis=1)
rec=rec[dn<3*up+18]
spectrum=np.histogram(np.sum(np.sum(rec['h'], axis=1), axis=1), bins=np.arange(1000)-0.5)[0]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)>left]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)<right]
H=np.zeros((50, 200, len(pmts)))
G=np.zeros((300, 200))
for j in range(200):
G[:,j]=np.histogram(np.sum(rec['h'][:,j,:], axis=1), bins=np.arange(np.shape(G)[0]+1)-0.5)[0]
spectra=np.zeros((350, len(pmts)))
for i, pmt in enumerate(pmts):
h=rec['h'][:,:,i]
spectra[:,i]=np.histogram(np.sum(h[:,:100], axis=1), bins=np.arange(351)-0.5)[0]
for j in range(200):
H[:,j,i]=np.histogram(h[:,j], bins=np.arange(np.shape(H)[0]+1)-0.5)[0]
np.savez(path+'H', H=H, G=G, left=left, right=right, spectra=spectra, spectrum=spectrum, up_dn_cut='dn<3*up+18')
|
[
"[email protected]"
] | |
2d7752b5248ca30de42447503f8cb51b06fd5d1f
|
21e64f9410323a11d4550b889fd0bb0d68543fab
|
/config/munin/mongodb_conn
|
93f39733e6ea84ca3aa106275b63a88e87de9375
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
quanganhdo/NewsBlur
|
a7eaa3c5bdb2e57998651d736db861f88fcd1e75
|
cef29f01658c845564a5044b48b4cf19efcaa4d6
|
refs/heads/master
| 2021-03-05T23:56:27.976498 | 2020-02-27T15:23:23 | 2020-02-27T15:23:23 | 246,164,347 | 1 | 0 |
MIT
| 2020-03-09T23:34:18 | 2020-03-09T23:34:17 | null |
UTF-8
|
Python
| false | false | 791 |
#!/srv/newsblur/venv/newsblur/bin/python
# -*- coding: utf-8 -*-
from vendor.munin.mongodb import MuninMongoDBPlugin
class MongoDBConnectionsPlugin(MuninMongoDBPlugin):
args = "-l 0 --base 1000"
vlabel = "count"
title = "MongoDB current connections"
info = "Current connections"
fields = (
('connections', dict(
label = "# of Connections",
info = "connections",
type = "GAUGE",
min = "0",
)),
)
def execute(self):
status = self.connection.admin.command('serverStatus')
try:
value = status['connections']['current']
except KeyError:
value = "U"
return dict(connections=value)
if __name__ == "__main__":
MongoDBConnectionsPlugin().run()
|
[
"[email protected]"
] | ||
004329b3ddea39cfcdec79380491743f3b906eb9
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/leap/5b53afc49b5f418cb7d6bbf495c8fdd9.py
|
38933221b5f04fff5d26cc532c350159342a7cc9
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 239 |
py
|
'''This module will take in a year (positive integer) and return True if the
year is a leap year, False if it is not.
Lesson: Refactor.
'''
def is_leap_year(year):
return (year%400 == 0) or ((year%100 != 0) and (year%4 == 0))
|
[
"[email protected]"
] | |
e5187be6c2339cacd981c880a7bcc4f600452526
|
e1112bb6d54acb76e6e991fc4c3fc0d3a1f7b0d6
|
/02 - Sets and tuples/Exercise/02-Sets_of_elements.py
|
7bbb5ad6a390172f3d0bbbf55241d1a067f2744d
|
[] |
no_license
|
MiroslavPK/Python-Advanced
|
0326209d98254d4578a63dcd4c32b49be183baf2
|
0c696a220aa587edb2505e8d986b041cc90a46f3
|
refs/heads/master
| 2023-01-12T10:46:06.590096 | 2020-11-18T19:08:55 | 2020-11-18T19:08:55 | 295,449,832 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 224 |
py
|
n, m = map(int, input().split())
n_set = set()
m_set = set()
for i in range(n+m):
if i < n:
n_set.add(input())
else:
m_set.add(input())
intersection = n_set & m_set
print('\n'.join(intersection))
|
[
"[email protected]"
] | |
6f1bb4ff7967bfd1652c3e845f0f639580fcd308
|
a45b8075f3c3b247a3cac43cb12bf4d80103f608
|
/glamazer/urls.py
|
2df7d0d6d8a24ce521febc8454892b0cfa167c9e
|
[] |
no_license
|
kahihia/glamfame
|
c890a8772aa92b8ed9e3c0bb664c5dae187d1c09
|
af91d4d16d0c8847c42eb97be839bf08015274b6
|
refs/heads/master
| 2021-01-21T09:59:52.700945 | 2016-02-15T17:16:13 | 2016-02-15T17:16:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,906 |
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'glamazer.core.views.home', name='home'),
url(r'^hairstyle/', 'glamazer.core.views.base', name='hair'),
url(r'^nails-design/', 'glamazer.core.views.base', name='nails'),
url(r'^make-up/', 'glamazer.core.views.base', name='make_up'),
url(r'^style/', 'glamazer.core.views.base', name='style'),
url(r'^contest/', 'glamazer.core.views.base', name='contest'),
url(r'^leaderboards/', 'glamazer.core.views.base', name='leaderboards'),
url(r'^result/', 'glamazer.core.views.search', name='result'),
url(r'^get_notifications/', 'glamazer.notifications.views.get_notifications', name='short_notifications'),
url(r'^get_notifications_count/', 'glamazer.notifications.views.get_notification_count', name='notification_count'),
url(r'^autocomplete_tags/', 'glamazer.core.views.autocomplete_tags', name='autocomplete_tags'),
url(r'^sign_up/', TemplateView.as_view(template_name="sign_up.html"), name='signup'),
url(r'^terms/', TemplateView.as_view(template_name="core/terms.html"), name='terms'),
url(r'^imprint/', TemplateView.as_view(template_name="core/imprint.html"), name='imprint'),
url(r'^privacy/', TemplateView.as_view(template_name="core/privacy.html"), name='privacy'),
url(r'^faq/', TemplateView.as_view(template_name="core/faq.html"), name='faq'),
url(r'^about_us/', TemplateView.as_view(template_name="core/about_us.html"), name='about_us'),
url(r'^contacts/', TemplateView.as_view(template_name="core/contact_us.html"), name='contacts'),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include('glamazer.users.urls')),
url(r'^artists/', include('glamazer.artists.urls')),
url(r'^salons/', include('glamazer.salons.urls')),
url(r'^listings/', include('glamazer.listings.urls')),
url(r'^favorites/', include('glamazer.favorites.urls')),
url(r'^reviews/', include('glamazer.reviews.urls')),
url(r'^widget/', include('glamazer.widget.urls')),
url(r'^success/', 'glamazer.payments.views.success_payment', name='success'),
url(r'^error/', 'glamazer.payments.views.error_payment', name='error'),
url(r'^cancel/', 'glamazer.payments.views.cancel_payment', name='cancel'),
url(r'^get_hint/', 'glamazer.core.views.get_hint', name='get_hint'),
url(r'^start_payment/', 'glamazer.payments.views.start_payment', name='paypal_payment'),
url(r'^send_feedback/$', 'glamazer.users.views.send_feedback', name='send_feedback'),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
2fb3622d8520b0df9fdbf0783f3a2333622c2c5b
|
46ad22b772f0bb115e1192ca24c86b1593d51870
|
/eclipsed/src/cursor.py
|
f67a0516f50351fa94c3799522d029fba269422b
|
[
"CC0-1.0",
"WTFPL",
"CC-BY-4.0"
] |
permissive
|
cosmologicon/unifac
|
fb533abfbba7ebb33561a330f7be5d22dbc2a373
|
e7668c6736cd4db66f8d56e945afb69ec03f2160
|
refs/heads/master
| 2022-06-15T10:46:28.448477 | 2022-05-30T20:26:55 | 2022-05-30T20:26:55 | 37,033,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 209 |
py
|
tobuild = None
pointingto = None
# These don't really need to go here since they're redundant with hud.mode
# but this module was looking a little lonely so here they are.
disable = False
unbuild = False
|
[
"[email protected]"
] | |
74d8d9d2cc5d152126537573a927ca64f8afb791
|
5a7abc4537039860c49e9a80219efa759aad1b6f
|
/tests/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard_test.py
|
a3c14ce6680594654bebaca336460ebb88319e50
|
[
"Apache-2.0"
] |
permissive
|
sec-js/prowler
|
d5a06c72f5d7e490bade1167966f83f7a5d7ed15
|
f72be9a1e492ad593c9ac267d3ca07f626263ccd
|
refs/heads/master
| 2023-08-31T22:48:33.983360 | 2022-12-22T16:02:28 | 2022-12-22T16:02:28 | 243,866,744 | 0 | 0 |
Apache-2.0
| 2022-12-23T12:23:20 | 2020-02-28T22:37:02 |
Python
|
UTF-8
|
Python
| false | false | 4,834 |
py
|
from re import search
from unittest import mock
from boto3 import client, resource
from moto import mock_ec2
AWS_REGION = "us-east-1"
EXAMPLE_AMI_ID = "ami-12c6146b"
class Test_ec2_securitygroup_from_launch_wizard:
@mock_ec2
def test_ec2_default_sgs(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# All are compliant by default
assert result[0].status == "PASS"
@mock_ec2
def test_ec2_launch_wizard_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
sg_id = ec2_client.create_security_group(
GroupName="launch-wizard-1", Description="launch wizard sg"
)["GroupId"]
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region + created one
assert len(result) == 4
# Search changed sg
for sg in result:
if sg.resource_id == sg_id:
assert sg.status == "FAIL"
assert search(
"was created using the EC2 Launch Wizard",
sg.status_extended,
)
@mock_ec2
def test_ec2_compliant_default_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
default_sg_id = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]["GroupId"]
ec2 = resource("ec2", region_name=AWS_REGION)
ec2.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
SecurityGroupIds=[
default_sg_id,
],
)
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "PASS"
assert search(
"was not created using the EC2 Launch Wizard",
sg.status_extended,
)
|
[
"[email protected]"
] | |
46c75cfc66b41a8d03c8a63219aa1d6fb596b2ba
|
c7c001c9011f559b8b1c85d1c3e0387a86a99628
|
/y2018/day18/lumber_collection.py
|
ec0c9f79bbc2fe5449c89bab26476d8ddca92c8e
|
[] |
no_license
|
ericbgarnick/AOC
|
5ddfd18850b96f198e125f5d1f0978e852195ccf
|
a935faad3fcbbe3ac601e2583ed27b38bc60ef69
|
refs/heads/main
| 2023-04-12T18:50:09.926169 | 2023-04-09T12:47:35 | 2023-04-09T12:47:35 | 224,573,310 | 2 | 1 | null | 2021-12-15T14:25:33 | 2019-11-28T05:00:52 |
Python
|
UTF-8
|
Python
| false | false | 1,890 |
py
|
from typing import List
from acre import Acre
class LumberCollection:
def __init__(self, area_map: List[str]):
row_len = len(area_map[0])
num_rows = len(area_map)
self.collection_area = [] # type: List[Acre]
self.populate_area(area_map)
self.link_acres(row_len, num_rows)
def populate_area(self, area_map: List[str]):
for row in area_map:
for acre in row:
acre_type = Acre.TYPE_FOR_SYMBOL[acre]
self.collection_area.append(Acre(acre_type))
def link_acres(self, row_len: int, num_rows: int):
for i, acre in enumerate(self.collection_area):
if i % row_len:
# W
acre.neighbors.add(self.collection_area[i - 1])
if i >= row_len:
# NW
acre.neighbors.add(self.collection_area[i - row_len - 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SW
acre.neighbors.add(self.collection_area[i + row_len - 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
if i % row_len != row_len - 1:
# E
acre.neighbors.add(self.collection_area[i + 1])
if i >= row_len:
# NE
acre.neighbors.add(self.collection_area[i - row_len + 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SE
acre.neighbors.add(self.collection_area[i + row_len + 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
|
[
"[email protected]"
] | |
1c06d723254657701479f4b0179290148c45af44
|
0d76ba0da5446f20e500b7e31f53821b14cb49d8
|
/Codility/python/abs_distinct.py
|
a14e108198517a0a7f73819039d873dfe2b9a69b
|
[] |
no_license
|
filwaitman/playground
|
948aa687be06d456c86b65ee3ab5fb9792149459
|
dfdfab9002bff3a04f37e0c161363a864cd30f3e
|
refs/heads/master
| 2021-01-12T12:59:49.057832 | 2020-01-26T18:51:02 | 2020-01-26T18:51:02 | 68,865,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
# -*- coding: utf-8 -*-
# https://codility.com/programmers/task/abs_distinct
# 100/100
def solution(A):
positives = map(abs, A)
return len(set(positives))
assert solution([-5, -3, -1, 0, 3, 6]) == 5
assert solution([42]) == 1
assert solution(range(-1000, 0) + range(1, 1001)) == 1000
|
[
"[email protected]"
] | |
70e7af3e0751be27c0879cdd30eb63c48c35d1d0
|
a38670ee08ea64af33477899a68ee22936f70ce7
|
/luffy/第三模块/第6章网络编程/第6章每小节/2 加上循环/04 模拟ssh远程执行命令/客户端.py
|
a62f5c2f2197ac5dd07315df58754ce788d23051
|
[] |
no_license
|
foremostxiao/d
|
40ed37215f411e8b081a4cb92c8ecbd335cd9d76
|
fe80672adc6b2406365b05d5cedd02c6abf66c11
|
refs/heads/master
| 2020-03-29T13:51:19.589004 | 2018-09-23T09:29:56 | 2018-09-23T09:29:56 | 149,985,622 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
import socket
phone = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.connect(('127.0.0.1',8080))
while True:
# 1 发命令
cmd = input('>>>').strip()# 输入mingl
if not cmd:continue
phone.send(cmd.encode('utf-8')) # send 发给操作系统了
# 2 拿命令的结果并打印
data = phone.recv(1024) # 1024是个坑,有可能收到的超过1024,后续解决
print(data.decode('gbk'))
phone.close()
|
[
"[email protected]"
] | |
c0dfa6271b2327073a0a168b47640c937cbeee81
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/287/71629/submittedfiles/testes.py
|
967c3e410a7b976e11bd707dcfdc03122824963f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI
a=int(input('Que horas são? [0-23]'))
if a > 3 and a < 12:
print('Bom diiiia!')
elif a >= 12 and a < 18:
print('Boa tarde')
else:
print('Boa noite, ate amanha!')
|
[
"[email protected]"
] | |
e80a8d81e6392f1d7934470081943b1bf032f8fd
|
d53479a3a5efab85a065b4a7c08cb38b6246f0eb
|
/python-division.py
|
66b9ee6a6492eb7b5fa6987137dcbe09a4d4af61
|
[] |
no_license
|
Snehal6697/hackerrank
|
0242f927f630e652d6dcca901af8d8bd737b671f
|
c418fb24e08e5c57a1bd0d91f95ab2af32f01c64
|
refs/heads/master
| 2022-12-26T12:35:47.586007 | 2020-07-07T22:14:39 | 2020-07-07T22:14:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
#!/usr/bin/env python2.7
from __future__ import division
def main():
a = int(raw_input())
b = int(raw_input())
print a // b
print a / b
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
259cd5eaaa20071850043e7d7215f3ce6aebb6c9
|
02447b317690827683dc329153e74f1599e4db92
|
/wazimap_ng/general/views.py
|
cf915a8c7c7477aca9df7290d0a6c06d7c62058d
|
[
"Apache-2.0"
] |
permissive
|
neoromantique/wazimap-ng
|
fc8ca6704851db8d9941d3bcb9c06e367c2e1e94
|
c19e9450655f5d404c60e2b4d214715ec8a0b1d9
|
refs/heads/master
| 2021-02-11T00:02:54.001820 | 2020-02-13T20:36:40 | 2020-02-13T20:36:40 | 244,431,358 | 0 | 0 |
Apache-2.0
| 2020-03-02T17:23:51 | 2020-03-02T17:23:50 | null |
UTF-8
|
Python
| false | false | 1,482 |
py
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from ..datasets import models as dataset_models
from ..datasets import views as dataset_views
from ..boundaries import models as boundaries_models
from ..boundaries import views as boundaries_views
from ..utils import cache_decorator
@cache_decorator("consolidated_profile")
def consolidated_profile_helper(profile_id, code):
profile_js = dataset_views.profile_geography_data_helper(profile_id, code)
boundary_js = boundaries_views.geography_item_helper(code)
children_boundary_js = boundaries_views.geography_children_helper(code)
parent_layers = []
parents = profile_js["geography"]["parents"]
children_levels = [p["level"] for p in parents[1:]] + [profile_js["geography"]["level"]]
pairs = zip(parents, children_levels)
for parent, children_level in pairs:
layer = boundaries_views.geography_children_helper(parent["code"])
parent_layers.append(layer[children_level])
return ({
"profile": profile_js,
"boundary": boundary_js,
"children": children_boundary_js,
"parent_layers": parent_layers,
})
@api_view()
def consolidated_profile(request, profile_id, code):
js = consolidated_profile_helper(profile_id, code)
return Response(js)
@api_view()
def consolidated_profile_test(request, profile_id, code):
js = consolidated_profile_helper(profile_id, code)
return Response("test")
|
[
"[email protected]"
] | |
2b66f779ad34d216561b67a4a62e5d69750079e3
|
869d917ef14fb8e4bb899a192903dd1f64028d2b
|
/train/train_street_view_regression.py
|
faa00a2a48b49ee081de9cbc395048edd88abcef
|
[] |
no_license
|
andreiqv/rotnet_not_my
|
bbd7fadba9c2e000d324e931d4fddc95ad8e4e25
|
ce0ea3f80aba263ae5fc54549c5d3d571d02ef59
|
refs/heads/master
| 2020-04-26T11:00:21.724905 | 2019-03-02T22:50:31 | 2019-03-02T22:50:31 | 173,502,216 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,469 |
py
|
from __future__ import print_function
import os
import sys
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from keras.applications.resnet50 import ResNet50
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
from keras.layers import Dense, Flatten
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import angle_error_regression, RotNetDataGenerator
from data.street_view import get_filenames as get_street_view_filenames
data_path = os.path.join('data', 'street_view')
train_filenames, test_filenames = get_street_view_filenames(data_path)
print(len(train_filenames), 'train samples')
print(len(test_filenames), 'test samples')
model_name = 'rotnet_street_view_resnet50_regression'
# input image shape
input_shape = (224, 224, 3)
# load base model
base_model = ResNet50(weights='imagenet', include_top=False,
input_shape=input_shape)
# append classification layer
x = base_model.output
x = Flatten()(x)
final_output = Dense(1, activation='sigmoid', name='fc1')(x)
# create the new model
model = Model(inputs=base_model.input, outputs=final_output)
model.summary()
# model compilation
model.compile(loss=angle_error_regression,
optimizer='adam')
# training parameters
batch_size = 16 # was 64
nb_epoch = 50
output_folder = 'models'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# callbacks
checkpointer = ModelCheckpoint(
filepath=os.path.join(output_folder, model_name + '.hdf5'),
save_best_only=True
)
early_stopping = EarlyStopping(patience=0)
tensorboard = TensorBoard()
# training loop
model.fit_generator(
RotNetDataGenerator(
train_filenames,
input_shape=input_shape,
batch_size=batch_size,
one_hot=False,
preprocess_func=preprocess_input,
crop_center=True,
crop_largest_rect=True,
shuffle=True
),
steps_per_epoch=len(train_filenames) / batch_size,
epochs=nb_epoch,
validation_data=RotNetDataGenerator(
test_filenames,
input_shape=input_shape,
batch_size=batch_size,
one_hot=False,
preprocess_func=preprocess_input,
crop_center=True,
crop_largest_rect=True
),
validation_steps=len(test_filenames) / batch_size,
callbacks=[checkpointer, early_stopping, tensorboard],
nb_worker=10,
pickle_safe=True,
verbose=1
)
|
[
"[email protected]"
] | |
fdb8fc4c86a750baa500c7ee03cbb74671b28f35
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/minigame/RepairGlobals.py
|
8027e62d79153e2436b77a14e3c56012b7f68cec
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,950 |
py
|
from pandac.PandaModules import Vec3, Vec4, Point3
class VariableContainer:
def __init__(self):
pass
AI = VariableContainer()
AI.goldRewardRange = (15, 35)
AI.goldRewardMultiplier = [
(14.0, 3.0),
(18.0, 2.5),
(24.0, 2.0),
(36.0, 1.6000000000000001),
(52.0, 1.3),
(72.0, 1.1499999999999999)]
AI.repairRewardRange = (5000, 1000)
AI.grapeshotEffectCooldown = 2.0
AI.grapeshotEffectProbability = 0.5
AI.kickedTimestampLife = 60.0 * 60.0
AI.inactiveClientKickTime = 60.0 * 2.0 + 2.0
AI.numTimesKickedBeforeBlacklisted = 3
AI.maxPlayersPerBench = 5
AI.baseRepairAmount = 0.5
AI.maxRepairCount = 30
AI.reductionAtFullRepair = 0.5
AI.maxCombatCount = 20
AI.reductionAtFullCombat = 0.5
AI.critGrapeshotCombatDebuff = 3
AI.grapeshotCombatDebuff = 3
AI.regularCombatDebuff = 1
AI.totalDifficulty = AI.maxRepairCount + AI.maxCombatCount
AI.difficultyIncreasePoint = AI.totalDifficulty / 10.0
AI.repairDebuffPerModelClass = {
1: 1.0,
2: 1.0,
3: 1.0,
11: 1.0,
12: 1.0,
13: 1.0,
21: 1.0,
22: 1.0,
23: 1.0,
24: 1.0,
25: 1.0,
26: 1.0,
27: 1.0 }
AI.sailRepairPercent = 0.14999999999999999
AI.armorRepairPercent = 0.14999999999999999
AI.hpRepairPercent = 0.40000000000000002
AI.hpTertiaryDecay = 0.0
Common = VariableContainer()
Common.guiShakeCooldownTime = 2.0
Common.youWinPos = {
'careening': (-0.12, 0.0, 0.22),
'pumping': (0.0, 0.0, 0.14999999999999999),
'sawing': (0.0, 0.0, 0.14999999999999999),
'bracing': (0.0, 0.0, 0.22),
'hammering': (0.0, 0.0, 0.38),
'pitching': (0.0, 0.0, 0.22) }
Common.scorePos = {
'careening': (-0.12, 0.0, 0.089999999999999997),
'pumping': (0.0, 0.0, 0.02),
'sawing': (0.0, 0.0, 0.02),
'bracing': (0.0, 0.0, 0.089999999999999997),
'hammering': (0.0, 0.0, 0.25),
'pitching': (0.0, 0.0, 0.089999999999999997) }
Common.speedThresholds = {
'careening': [
(5.0, 15.0),
(10.0, 30.0),
(20.0, 90.0)],
'pumping': [
(10.0, 13.0),
(20.0, 40.0),
(40.0, 90.0)],
'sawing': [
(6.0, 9.0),
(12.0, 18.0),
(30.0, 45.0)],
'bracing': [
(5.0, 15.0),
(30.0, 45.0),
(90.0, 180.0)],
'hammering': [
(5.0, 10.0),
(10.0, 20.0),
(20.0, 40.0)],
'pitching': [
(8.0, 16.0),
(16.0, 32.0),
(32.0, 64.0)] }
Careening = VariableContainer()
Careening.barnacleCountRange = (15, 30)
Careening.superScrubMultiplier = 4.0
Careening.superScrubDecreaseRate = 0.40000000000000002
Careening.superScrubIncreaseRate = 0.80000000000000004
Careening.barnacleHPRange = (30, 70)
Careening.barnacleHPScaleRange = (1.0, 3.0)
Careening.xRange = (-0.61499999999999999, 0.375)
Careening.yRange = (-0.16500000000000001, 0.51500000000000001)
Careening.barnacleRadius = 0.040000000000000001
Careening.mossPercentage = 0.75
Careening.mossPosVariance = 0.01
Careening.mossEdgeRestrictionAmount = 0.10000000000000001
Careening.showBarnacleHP = False
Pumping = VariableContainer()
Pumping.pumpPowerRange = (0.059999999999999998, 0.02)
Pumping.hitRange = (0.17999999999999999, 0.17999999999999999)
Pumping.barStartRange = (1.2, 1.0)
Pumping.barSpeedMin = 2.0
Pumping.barSpeedMax = 0.29999999999999999
Pumping.barSpeedIncrease = 1.25
Pumping.barSpeedDecrease = 0.80000000000000004
Pumping.chainMultiplier = 0.080000000000000002
Sawing = VariableContainer()
Sawing.difficultySets = ((3, 3, 1, 1), (3, 1, 1, 2), (1, 2, 1, 2), (3, 1, 2, 2), (2, 2, 1, 2), (3, 2, 1, 4), (2, 4, 3, 2), (4, 2, 1, 2), (4, 1, 1, 5), (2, 2, 4, 5))
Sawing.waypointRange = (0.080000000000000002, 0.080000000000000002, 0.080000000000000002, 0.11, 0.10000000000000001)
Sawing.sawlineColor = Vec4(0.75, 0.75, 0.75, 0.69999999999999996)
Sawing.sawlineLineThickness = 4.0
Sawing.sawlineLinespawnDist = 0.02
Sawing.testWaypointDelta = 0.040000000000000001
Sawing.playSawingSoundDelta = 0.10000000000000001
Sawing.totalPoints = 20.0
Sawing.pointsPerBoard = 7.0
Sawing.pointsLostForZone1 = 4.0
Sawing.pointsLostForZone2 = 1.0
Sawing.cutColor = (0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 1.0)
Sawing.zone1Color = (0.75, 0.75, 0.75, 1.0)
Sawing.zone2Color = (0.75, 0.75, 0.75, 1.0)
Sawing.sawTurnSpeed = 1000
Sawing.newBoardAnimTime = 0.25
Sawing.splitBoardAnimTime = 0.5
Sawing.activeBoardPosition = (0.0, 0.0, 0.10000000000000001)
Sawing.boardYDist = 1.3
from RepairGridPiece import GOAL_HORIZ_1, GOAL_HORIZ_2, GOAL_VERT_1
Bracing = VariableContainer()
Bracing.difficultyLevels = ((8, (GOAL_HORIZ_1,)), (7, (GOAL_HORIZ_1,)), (6, (GOAL_HORIZ_1,)), (7, (GOAL_HORIZ_1, GOAL_VERT_1)), (6, (GOAL_HORIZ_1, GOAL_VERT_1)), (5, (GOAL_HORIZ_1, GOAL_VERT_1)), (4, (GOAL_HORIZ_1, GOAL_VERT_1)), (5, (GOAL_HORIZ_1, GOAL_HORIZ_2)), (4, (GOAL_HORIZ_1, GOAL_HORIZ_2)), (3, (GOAL_HORIZ_1, GOAL_HORIZ_2)))
Bracing.moveTime = 0.080000000000000002
Bracing.fadeTime = 0.14999999999999999
Bracing.movePieceThreshold = 0.080000000000000002
Bracing.pushPieceThreshold = 0.01
Bracing.repairTimeframe = 20
Hammering = VariableContainer()
Hammering.reticleScaleRange = (0.20000000000000001, 1.0)
Hammering.reticleScaleRate = 1.0
Hammering.recoveryTime = 4.0
Hammering.nailCountRange = (4, 8)
Hammering.rankingThresholds = (5, 4, 3, 2, 1)
Hammering.hitForgiveness = 0.10000000000000001
Hammering.useReticleColor = True
Pitching = VariableContainer()
Pitching.leakScaleRange = (0.10000000000000001, 0.27500000000000002)
Pitching.spawnDelayRange = (0.5, 0.10000000000000001, 2.0, 1.0)
Pitching.leakCountRange = (16, 32)
Pitching.maxLeaksRange = (2, 5)
Pitching.useReticle = True
Pitching.ratingGive = 0
REPAIR_AT_SEA_REWARD_RATING = [
0,
1,
1,
1.5,
2.0]
REPAIR_AT_SEA_GAME_MULTIPLIER = [
20,
60,
200,
40,
20]
def getAtSeaRepairRating(rating, gameType):
if rating > 4 or rating < 0:
rating = 0
return REPAIR_AT_SEA_REWARD_RATING[rating] * REPAIR_AT_SEA_GAME_MULTIPLIER[gameType]
|
[
"[email protected]"
] | |
bce6368fc8a866dd4bff9c0a271687bdaea848c1
|
5e014f95b49f376b34d20760c41f09bdca094247
|
/flask_ide/auth/models.py
|
2fe1fcdca8701cfe3cf45972adb5b95603c108eb
|
[] |
no_license
|
jstacoder/flask-ide
|
34ae304c211c7b263f37b2fcf0660ae76053c0a2
|
3890756c094b4b7872bad7d915e764e3e32dcb2d
|
refs/heads/master
| 2023-02-12T11:22:24.412680 | 2020-07-20T17:21:55 | 2020-07-20T17:21:55 | 29,079,246 | 50 | 10 | null | 2023-02-02T07:17:40 | 2015-01-11T02:51:35 |
JavaScript
|
UTF-8
|
Python
| false | false | 3,478 |
py
|
from flask_xxl.basemodels import BaseMixin
from flask import url_for
from LoginUtils import encrypt_password, check_password
from sqlalchemy.ext.declarative import declared_attr
#import sqlalchemy to global namespace
from sqlalchemy import (
UnicodeText,func,Enum,UniqueConstraint,DateTime,Text,Column,Integer,
ForeignKey,Boolean,String,Table
)
from sqlalchemy.orm import relationship, backref
class UnknownUser(object):
is_unknown = True
class Role(BaseMixin):
__tablename__ = 'roles'
name = Column(String(255))
can_view = Column(Boolean,default=True,nullable=False)
can_add = Column(Boolean,default=False,nullable=False)
can_edit = Column(Boolean,default=False,nullable=False)
can_delete = Column(Boolean,default=False,nullable=False)
class User(BaseMixin):
__tablename__ = 'users'
first_name = Column(String(255),default="")
last_name = Column(String(255),default="")
email = Column(String(255),nullable=False,unique=True)
role_id = Column(Integer,ForeignKey('roles.id'))
role = relationship('Role',backref=backref(
'users',lazy='dynamic'))
add_date = Column(DateTime,default=func.now())
_pw_hash = Column(UnicodeText,nullable=False)
age = Column(Integer)
def __init__(self,*args,**kwargs):
if 'first_name' in kwargs:
self.first_name = kwargs.pop('first_name')
if 'last_name' in kwargs:
self.last_name = kwargs.pop('last_name')
if 'email' in kwargs:
self.email = kwargs.pop('email')
if 'role' in kwargs:
self.role = kwargs.pop('role')
if 'role_id' in kwargs:
self.role_id = kwargs.pop('role_id')
if 'password' in kwargs:
self.password = kwargs.pop('password')
def _to_json(self):
import json
return json.dumps(
{
'first_name':self.first_name,
'last_name':self.last_name,
'email':self.email,
'age':self.age,
'date_added':self.add_date,
}
)
@declared_attr
def __table_args__(cls):
return (UniqueConstraint('email','first_name','last_name'),{})
@property
def is_unknown(self):
return False
def check_password(self, pw):
return check_password(pw,self._pw_hash)
@classmethod
def get_by_email(cls, email):
return cls.query().filter_by(email=email).first()
@property
def password(self):
return 'private'
raise ValueError('Private Value!!!!')
@password.setter
def password(self,pw):
self._pw_hash = encrypt_password(pw)
@property
def full_name(self):
return '{} {}'.format(self.first_name.title(),self.last_name.title())
@property
def name(self):
return str(self.first_name)
def __str__(self):
if self.first_name != "":
rtn = self.full_name
else:
rtn = self.email
return rtn
def __repr__(self):
return 'User<{} {}'.format(self.email,self.first_name)
def _get_absolute_url(self):
return url_for('member.profile',member_id=str(int(self.id)))
@property
def absolute_url(self):
return str(self._get_absolute_url())
def _get_edit_url(self):
return '#'
@property
def edit_url(self):
return str(self._get_edit_url())
|
[
"[email protected]"
] | |
87c7524501017490341a86012b5d7364f04aacde
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_54/78.py
|
1e0afea1344679e1079ae74d8bb54a891e5ad167
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 956 |
py
|
def gcd(a,b):
while (b != 0):
c = a%b
a = b
b = c
return a
def get_gcd(line):
g = line[0]
cnt = len(line)
for i in range(1,cnt):
g = gcd(g,line[i])
return g
def solve(line):
N = int(line.pop(0))
for i in range(0,N):
line[i] = int(line[i])
line.sort()
diffs = list()
for i in range(0,N-1):
diff = line[i+1] - line[i]
diffs.append(diff)
g = pg = get_gcd(diffs)
if g < line[0]:
g = line[0] / pg * pg
if line[0] % pg != 0:
g += pg
ans = g - line[0]
return ans
AnsT = ""
myfile = open("B.in")
T = int(myfile.readline())
for i in range(0,T):
line = myfile.readline()
line = line.split("\n")
print i
ans = solve(line[0].split(" "))
AnsT = AnsT + "Case #"+ str(i+1) +": "+str(ans) + "\n"
outfile = open("B.out","w")
outfile.write(AnsT)
outfile.close()
|
[
"[email protected]"
] | |
d5b659372a216b999b788a1e5dbe6d3852e2a1f3
|
474525154a4e1d48ef5242d1f44164d05399b145
|
/tensorflow_probability/python/experimental/distributions/mvn_precision_factor_linop_test.py
|
47676d4d6f31be7ebf0b5ac98d233982286579c7
|
[
"Apache-2.0"
] |
permissive
|
svshivapuja/probability
|
9855737790f74a39169688fbfec9671deef804d9
|
af7ccb22d972329633530c3b754ed1f49472f6a7
|
refs/heads/main
| 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 |
Apache-2.0
| 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null |
UTF-8
|
Python
| false | false | 8,157 |
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.experimental.distributions.mvn_precision_factor_linop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
tfd_e = tfp.experimental.distributions
@test_util.test_all_tf_execution_regimes
class MVNPrecisionFactorLinOpTest(test_util.TestCase):
def _random_constant_spd_linop(
self,
event_size,
batch_shape=(),
conditioning=1.2,
dtype=np.float32,
):
"""Randomly generate a constant SPD LinearOperator."""
# The larger conditioning is, the better posed the matrix is.
# With conditioning = 1, it will be on the edge of singular, and likely
# numerically singular if event_size is large enough.
# Conditioning on the small side is best, since then the matrix is not so
# diagonally dominant, and we therefore test use of transpositions better.
assert conditioning >= 1
scale_wishart = tfd.WishartLinearOperator(
df=dtype(conditioning * event_size),
scale=tf.linalg.LinearOperatorIdentity(event_size, dtype=dtype),
input_output_cholesky=False,
)
# Make sure to evaluate here. This ensures that the linear operator is a
# constant rather than a random operator.
matrix = self.evaluate(
scale_wishart.sample(batch_shape, seed=test_util.test_seed()))
return tf.linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
@test_combinations.generate(
test_combinations.combine(
use_loc=[True, False],
use_precision=[True, False],
event_size=[3],
batch_shape=[(), (2,)],
n_samples=[5000],
dtype=[np.float32, np.float64],
),
)
def test_log_prob_and_sample(
self,
use_loc,
use_precision,
event_size,
batch_shape,
dtype,
n_samples,
):
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision = cov.inverse()
precision_factor = precision.cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
if use_loc:
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
else:
loc = None
mvn_scale = tfd.MultivariateNormalTriL(
loc=loc, scale_tril=cov.cholesky().to_dense())
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor,
precision=precision if use_precision else None,
)
point = tf.random.normal(
batch_shape + (event_size,), dtype=dtype, seed=test_util.test_seed())
mvn_scale_log_prob, mvn_precision_log_prob = self.evaluate(
[mvn_scale.log_prob(point),
mvn_precision.log_prob(point)])
self.assertAllClose(
mvn_scale_log_prob, mvn_precision_log_prob, atol=5e-4, rtol=5e-4)
batch_point = tf.random.normal(
(2,) + batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed())
mvn_scale_log_prob, mvn_precision_log_prob = self.evaluate(
[mvn_scale.log_prob(batch_point),
mvn_precision.log_prob(batch_point)])
self.assertAllClose(
mvn_scale_log_prob, mvn_precision_log_prob, atol=5e-4, rtol=5e-4)
samples = mvn_precision.sample(n_samples, seed=test_util.test_seed())
arrs = self.evaluate({
'stddev': tf.sqrt(cov.diag_part()),
'var': cov.diag_part(),
'cov': cov.to_dense(),
'sample_mean': tf.reduce_mean(samples, axis=0),
'sample_var': tfp.stats.variance(samples, sample_axis=0),
'sample_cov': tfp.stats.covariance(samples, sample_axis=0),
})
self.assertAllClose(
arrs['sample_mean'],
loc if loc is not None else np.zeros_like(arrs['cov'][..., 0]),
atol=5 * np.max(arrs['stddev']) / np.sqrt(n_samples))
self.assertAllClose(
arrs['sample_var'],
arrs['var'],
atol=5 * np.sqrt(2) * np.max(arrs['var']) / np.sqrt(n_samples))
self.assertAllClose(
arrs['sample_cov'],
arrs['cov'],
atol=5 * np.sqrt(2) * np.max(arrs['var']) / np.sqrt(n_samples))
def test_dynamic_shape(self):
x = tf.Variable(ps.ones([7, 3]), shape=[7, None])
self.evaluate(x.initializer)
# Check that the shape is actually `None`.
if not tf.executing_eagerly():
last_shape = x.shape[-1]
if last_shape is not None: # This is a `tf.Dimension` in tf1.
last_shape = last_shape.value
self.assertIsNone(last_shape)
dynamic_dist = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(tf.ones_like(x)))
static_dist = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(tf.ones([7, 3])))
in_ = tf.zeros([7, 3])
self.assertAllClose(self.evaluate(dynamic_dist.log_prob(in_)),
static_dist.log_prob(in_))
@test_combinations.generate(
test_combinations.combine(
batch_shape=[(), (2,)],
dtype=[np.float32, np.float64],
),
)
def test_mean_and_mode(self, batch_shape, dtype):
event_size = 3
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision_factor = cov.inverse().cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor)
self.assertAllClose(mvn_precision.mean(), loc)
self.assertAllClose(mvn_precision.mode(), loc)
@test_combinations.generate(
test_combinations.combine(
batch_shape=[(), (2,)],
use_precision=[True, False],
dtype=[np.float32, np.float64],
),
)
def test_cov_var_stddev(self, batch_shape, use_precision, dtype):
event_size = 3
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision = cov.inverse()
precision_factor = precision.cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor,
precision=precision if use_precision else None)
self.assertAllClose(mvn_precision.covariance(), cov.to_dense(), atol=1e-4)
self.assertAllClose(mvn_precision.variance(), cov.diag_part(), atol=1e-4)
self.assertAllClose(mvn_precision.stddev(), tf.sqrt(cov.diag_part()),
atol=1e-5)
if __name__ == '__main__':
test_util.main()
|
[
"[email protected]"
] | |
bd374ed841b18e22b1108b9e8b2c12dac786d446
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_30/ar_12/test_artificial_128_Anscombe_MovingMedian_30_12_100.py
|
ccc8235516ad5f149b1dacaabb8d05d4860cb57f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 269 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"[email protected]"
] | |
f0c8e4a7e7eedd40041bc507e96e9ebd1d7c55c0
|
3e713a67f370d1cc1ba0882159a03b673bd22f9a
|
/DataStructure and Alogorithms/[HACKERRANK]-cats and a mouse .py
|
d1edad7dc3eaf287f6fbb70ca5520a5f5a091571
|
[] |
no_license
|
s-abhishek2399/competitive-progamming--PYTHON
|
739797ffea0b92cc2781559e7d4eed1d274678a6
|
29f9e63cfc05c01fa605c14fb8a3a55920296d43
|
refs/heads/master
| 2023-03-08T02:40:00.962109 | 2021-02-16T15:07:52 | 2021-02-16T15:07:52 | 328,732,345 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 239 |
py
|
n = int(input())
for i in range(n):
l=[int(x) for x in input().split()]
a = l[0]-l[2]
b = l[1]-l[2]
if abs(a)<abs(b):
print("Cat A")
elif abs(b)<abs(a):
print("Cat B")
else:
print("Mouse C")
|
[
"[email protected]"
] | |
3fa07e5008b46020f7867d26769152465c99df3f
|
07ffe8db66fbd50f87315df34074e20b3ce67f0e
|
/about/models.py
|
80a8e89e5bba77662e330b6c74d3a6e0a8d8a48a
|
[] |
no_license
|
jakiiii/jtro-ecommerce
|
9acc6d37797e409a79921358958e50d66f20a0b4
|
e6e5ae04c7756e99f862634ad21f1d3877b501ab
|
refs/heads/master
| 2023-01-22T09:44:47.891286 | 2020-12-01T23:32:19 | 2020-12-01T23:32:19 | 316,202,084 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 488 |
py
|
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from jtro_ecommerce.utils import upload_image_path
class About(models.Model):
title = models.CharField(max_length=150)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
description = RichTextUploadingField()
timestamp = models.DateField(auto_now_add=True)
update = models.DateField(auto_now=True)
def __str__(self):
return "ABOUT US"
|
[
"[email protected]"
] | |
03668cd8657241fcab646595058f80c9f4125756
|
c3aad901e32f735625f938b4c26cdfa307254a6b
|
/biothings_explorer/api_preprocess/reasoner.py
|
b89e427492a7a016d9355ed1ccfbe18fd59cd9d8
|
[
"Apache-2.0"
] |
permissive
|
andrewgcodes/biothings_explorer
|
73c598fae2171e8b61687325fa1c1ee1a625fbe1
|
b54aa195bbed19ff5be09ed24dee869b24bb3c16
|
refs/heads/master
| 2022-12-23T18:06:34.061346 | 2022-08-18T20:23:17 | 2022-08-18T20:23:17 | 279,000,723 | 0 | 0 |
Apache-2.0
| 2020-07-12T05:49:16 | 2020-07-12T05:49:15 | null |
UTF-8
|
Python
| false | false | 939 |
py
|
from itertools import groupby
def restructure_reasoner_response(json_doc):
"""Restructure the API output from reasoner API.
:param: json_doc: json output from reasoner API
"""
edges = json_doc['knowledge_graph']['edges']
if not edges:
return {}
res = {}
edges = sorted(edges, key=lambda x: x['type'])
for k, g in groupby(edges, lambda x: x['type']):
res[k] = []
for _item in g:
if _item['target_id'].startswith("PANTHER.FAMILY"):
_item['panther'] = _item['target_id'][15:]
if _item['target_id'].startswith("CHEBI"):
_item['chebi'] = _item['target_id']
if _item['target_id'].startswith("CHEMBL:"):
_item['chembl'] = _item['target_id'][7:]
if _item['target_id'].startswith("MONDO:"):
_item['mondo'] = _item['target_id'][6:]
res[k].append(_item)
return res
|
[
"[email protected]"
] | |
57839fbdaf39ce151f280eecf2ac06516ded4c83
|
0123229ac84c057b188f6b17c1131ec630ecaf25
|
/stochastic_gradient_descent/test_sire_offset/offset_fix_phiandpsi/extract_frcmod.py
|
4598f60b3d2efdd919bfb1c52e5dd461d50b8d9e
|
[] |
no_license
|
michellab/paramfit-tests
|
689851ab95406aad7160403c4a70d3ec6be91981
|
39598e93936beff48aefff1604483fd265a5f46a
|
refs/heads/master
| 2021-01-13T05:47:23.287857 | 2017-04-24T10:58:21 | 2017-04-24T10:58:21 | 76,249,680 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
from parmed.amber import *
import parmed
import os
base = AmberParm("orig.prmtop", "fit.rst7")
parmed.tools.writeFrcmod(base,"test.frcmod").execute()
frcmod_file = open("test.frcmod","r").readlines()
for fr in frcmod_file:
if "C -N -CT-C " in fr: # this is phi
print("value of Phi")
print(fr)
elif "N -CT-C -N" in fr:
print("value of Psi")
print(fr)
else:
continue
cmd = "rm test.frcmod"
os.system(cmd)
|
[
"[email protected]"
] | |
35a457296554b87038a7ebfa03198c4b1c60e697
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/effects/VoodooAura2.py
|
852b91918310ef820ba576e0b80105d5ea24b395
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,226 |
py
|
# File: V (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from otp.otpbase import OTPRender
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class VoodooAura2(PooledEffect, EffectController):
cardScale = 128.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/battleEffects')
self.card = model.find('**/effectVoodooShockwave')
if not self.particleDummy:
self.particleDummy = self.attachNewNode(ModelNode('VoodooAura2ParticleDummy'))
self.particleDummy.setDepthWrite(0)
self.particleDummy.setLightOff()
self.particleDummy.hide(OTPRender.ShadowCameraBitmask)
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('VoodooAura2')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('PointEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(0)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.0)
self.p0.factory.setLifespanSpread(0.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.5)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.0050000000000000001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.012 * self.cardScale)
self.p0.renderer.setInitialYScale(0.0050000000000000001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.012 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1), self.effectColor, 1)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.20000000000000001)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
def createTrack(self, rate = 1):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.029999999999999999), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(0.75), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.getColorInterpolationManager().clearToInitial()
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1), self.effectColor, 1)
def cleanUpEffect(self):
self.detachNode()
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
[
"[email protected]"
] | |
f67e1e6de3d56e55471bc879166edec1c32ba813
|
8da79aedfb20c9798de0f4db4c5d85929a32f82b
|
/boo/columns.py
|
200ff1a19478b1dd373b0d3bbfd9b11bfc79fc79
|
[
"MIT"
] |
permissive
|
nasingfaund/boo
|
a94e941ca8d3251fbb320c2e2f63e439f7ef4d59
|
96d08857abd790bc44f48256e7be7da130543a84
|
refs/heads/master
| 2023-07-01T00:33:33.085311 | 2021-08-03T21:23:03 | 2021-08-03T21:23:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,429 |
py
|
"""Преобразование сырых названий столбцов в названия переменных.
Описания полей отчетности можно посмотреть например в:
http://info.avtovaz.ru/files/avtovaz_ras_fs_2012_rus_secured.pdf
Более подробно о публикуемой форме отчетности:
http://www.consultant.ru/document/cons_doc_LAW_103394/b990bf4a13bd23fda86e0bba50c462a174c0d123/#dst100515
"""
from collections import OrderedDict
from dataclasses import dataclass
import numpy
import pandas as pd
# Column names as provided at Rosstat web site
TTL_COLUMNS = [
"Наименование",
"ОКПО",
"ОКОПФ",
"ОКФС",
"ОКВЭД",
"ИНН",
"Код единицы измерения",
"Тип отчета",
"11103",
"11104",
"11203",
"11204",
"11303",
"11304",
"11403",
"11404",
"11503",
"11504",
"11603",
"11604",
"11703",
"11704",
"11803",
"11804",
"11903",
"11904",
"11003",
"11004",
"12103",
"12104",
"12203",
"12204",
"12303",
"12304",
"12403",
"12404",
"12503",
"12504",
"12603",
"12604",
"12003",
"12004",
"16003",
"16004",
"13103",
"13104",
"13203",
"13204",
"13403",
"13404",
"13503",
"13504",
"13603",
"13604",
"13703",
"13704",
"13003",
"13004",
"14103",
"14104",
"14203",
"14204",
"14303",
"14304",
"14503",
"14504",
"14003",
"14004",
"15103",
"15104",
"15203",
"15204",
"15303",
"15304",
"15403",
"15404",
"15503",
"15504",
"15003",
"15004",
"17003",
"17004",
"21103",
"21104",
"21203",
"21204",
"21003",
"21004",
"22103",
"22104",
"22203",
"22204",
"22003",
"22004",
"23103",
"23104",
"23203",
"23204",
"23303",
"23304",
"23403",
"23404",
"23503",
"23504",
"23003",
"23004",
"24103",
"24104",
"24213",
"24214",
"24303",
"24304",
"24503",
"24504",
"24603",
"24604",
"24003",
"24004",
"25103",
"25104",
"25203",
"25204",
"25003",
"25004",
"32003",
"32004",
"32005",
"32006",
"32007",
"32008",
"33103",
"33104",
"33105",
"33106",
"33107",
"33108",
"33117",
"33118",
"33125",
"33127",
"33128",
"33135",
"33137",
"33138",
"33143",
"33144",
"33145",
"33148",
"33153",
"33154",
"33155",
"33157",
"33163",
"33164",
"33165",
"33166",
"33167",
"33168",
"33203",
"33204",
"33205",
"33206",
"33207",
"33208",
"33217",
"33218",
"33225",
"33227",
"33228",
"33235",
"33237",
"33238",
"33243",
"33244",
"33245",
"33247",
"33248",
"33253",
"33254",
"33255",
"33257",
"33258",
"33263",
"33264",
"33265",
"33266",
"33267",
"33268",
"33277",
"33278",
"33305",
"33306",
"33307",
"33406",
"33407",
"33003",
"33004",
"33005",
"33006",
"33007",
"33008",
"36003",
"36004",
"41103",
"41113",
"41123",
"41133",
"41193",
"41203",
"41213",
"41223",
"41233",
"41243",
"41293",
"41003",
"42103",
"42113",
"42123",
"42133",
"42143",
"42193",
"42203",
"42213",
"42223",
"42233",
"42243",
"42293",
"42003",
"43103",
"43113",
"43123",
"43133",
"43143",
"43193",
"43203",
"43213",
"43223",
"43233",
"43293",
"43003",
"44003",
"44903",
"61003",
"62103",
"62153",
"62203",
"62303",
"62403",
"62503",
"62003",
"63103",
"63113",
"63123",
"63133",
"63203",
"63213",
"63223",
"63233",
"63243",
"63253",
"63263",
"63303",
"63503",
"63003",
"64003",
"Дата актуализации",
]
# -- Текстовые поля
MAPPER = OrderedDict(
[
("Наименование", "name"),
("ОКПО", "okpo"),
("ОКОПФ", "okopf"),
("ОКФС", "okfs"),
("ОКВЭД", "okved"),
("ИНН", "inn"),
("Код единицы измерения", "unit"),
("Тип отчета", "report_type"),
("Дата актуализации", "date_published"),
# -- Баланс
# -- Внеоборотные активы
("1100", "ta_fix"),
("1150", "of"),
("1170", "ta_fix_fin"),
# -- Оборотные активы
("1200", "ta_nonfix"),
("1210", "inventory"),
("1230", "receivables"),
("1240", "ta_nonfix_fin"),
("1250", "cash"),
("1600", "ta"),
# -- Пассивы
("1300", "tp_capital"),
("1360", "retained_earnings"),
("1400", "tp_long"),
("1410", "debt_long"),
("1500", "tp_short"),
("1510", "debt_short"),
("1520", "payables"),
("1700", "tp"),
# -- ОПУ
("2110", "sales"),
("2120", "costs"),
("2200", "profit_oper"),
("2330", "exp_interest"),
("2300", "profit_before_tax"),
("2400", "profit_after_tax"),
# -- ОДДС
("4400", "cf"),
# -- Операционная деятельность
("4100", "cf_oper"),
("4110", "cf_oper_in"),
("4111", "cf_oper_in_sales"),
("4120", "cf_oper_out"),
("4121", "paid_to_supplier"),
("4122", "paid_to_worker"),
("4123", "paid_interest"),
("4124", "paid_profit_tax"),
# -- Инвестицонная деятельность
("4200", "cf_inv"),
("4210", "cf_inv_in"),
("4220", "cf_inv_out"),
("4221", "paid_fa_investment"),
# -- Финансовая деятельность
("4300", "cf_fin"),
("4310", "cf_fin_in"),
("4311", "cf_loan_in"),
("4312", "cf_eq_in_1"),
("4313", "cf_eq_in_2"),
("4314", "cf_bond_in"),
("4320", "cf_fin_out"),
("4321", "cf_eq_out"),
("4322", "cf_div_out"),
("4323", "cf_debt_out"),
]
)
def ask(code):
return MAPPER.get(str(code))
def fst(text):
return text[0]
def last(text):
return text[-1]
def trim(text):
return text[0:-1]
NON_NUMERIC = "x"
# This type assures missing interger values will be converted to NaNs
# See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
# and https://github.com/ru-corporate/boo/issues/18
INT_TYPE = pd.Int64Dtype()
@dataclass
class Column:
code: str
section: str
lag: bool
def rename_with(self, mapper: dict):
new_code = mapper.get(self.code, self.code)
return Column(new_code, self.section, self.lag)
def is_numeric(self):
return self.section != NON_NUMERIC
@property
def label(self):
return self.code + ("_lag" if self.lag else "")
@property
def dtype(self):
return INT_TYPE if self.is_numeric() else str
def is_lagged(text):
if fst(text) == "3":
return False
if last(text) == "3":
return False
if last(text) == "4":
return True
return None
assert is_lagged("63243") is False
assert is_lagged("Дата актуализации") is None
assert is_lagged("23304") is True
def section(text):
num = text[0]
return {
"1": "Баланс",
"2": "ОПУ",
"3": "Изменения капитала",
"4": "ОДДС",
"6": "Extras",
}.get(num, NON_NUMERIC)
def code(text):
if fst(text) in ["1", "2", "4", "6"]:
return text[0:-1]
else:
return text
def column(text):
return Column(code(text), section(text), is_lagged(text))
columns = [column(x) for x in TTL_COLUMNS]
INDEX = [i for (i, c) in enumerate(columns) if c.rename_with(MAPPER) != c]
columns_short = [c.rename_with(MAPPER) for c in columns if c.rename_with(MAPPER) != c]
NAMES = {c.label: c.dtype for c in columns_short}
assert len(INDEX) == len(NAMES)
|
[
"[email protected]"
] | |
f771322752f5feab04cb77f3b2f35d3026f3513f
|
8aa3069cd4840fd216b917187a9c96bd7d3e2367
|
/Exercícios/binomiofatorial.py
|
424d1e4b8b3bb4389d4000032efe0357afec0102
|
[] |
no_license
|
rafaelsaidbc/USP
|
b10a28f958a1af5670fe48061f7b0c8b9db5d5d0
|
8c077f392fccd814380ea0e1b5ec228a54d4f779
|
refs/heads/master
| 2020-03-24T00:41:12.718523 | 2018-07-25T18:31:47 | 2018-07-25T18:31:47 | 142,302,564 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 901 |
py
|
def fatorial(n):
fat = 1 #variavel fat recebe o valor 1, porque 1 eh um valor nulo em uma multiplicacao
while(n > 1): #enquanto n for maior que 1, o laço (while) continua executando
fat = fat * n #multiplica fat por n
n = n - 1 #atualiza o n subtraindo 1
return fat #finalzia o while e atualiza a variavel fat
def numero_binomial(n, k):
return fatorial(n) / (fatorial(k) * fatorial(n - k))
def testa_fatorial(): #testa a funcao fatorial
if fatorial(1) == 1:
print("Funciona para 1")
else:
print("Não funciona para 1")
if fatorial(2) == 2:
print("Funciona para 2")
else:
print("Não funciona para 2")
if fatorial(0) == 1:
print("Funciona para 0")
else:
print("Não funciona para 0")
if fatorial(5) == 120:
print("Funciona para 5")
else:
print("Não funciona para 5")
|
[
"[email protected]"
] | |
cb840373802f4a2f053aa9b6db014d5a830284dd
|
404cb0431675327a751f7a6f422f53288a92b85b
|
/chirp/library/order_test.py
|
33fccabf573816f97b45246bff10199393e598bb
|
[
"Apache-2.0"
] |
permissive
|
chirpradio/chirpradio-machine
|
ade94d7ac9ded65f91e1b3845be408723c0501da
|
6fea6a87f2eb3cfac2a47831892c9ce02163b03b
|
refs/heads/master
| 2023-09-01T02:57:07.749370 | 2023-08-28T23:57:46 | 2023-08-28T23:57:46 | 2,330,078 | 9 | 10 |
Apache-2.0
| 2018-03-16T01:26:29 | 2011-09-05T19:10:48 |
Python
|
UTF-8
|
Python
| false | false | 3,525 |
py
|
#!/usr/bin/env python
import unittest
import mutagen.id3
from chirp.library import order
class OrderTest(unittest.TestCase):
def test_decode(self):
test_cases = (("1", 1, None),
(" 6", 6, None),
("006", 6, None),
("1/2", 1, 2),
("3 of 7", 3, 7),
("03anything04", 3, 4))
for text, order_num, max_num in test_cases:
self.assertEqual((order_num, max_num), order.decode(text))
# These should not be parseable.
error_test_cases = ("", "xxx", "0", "-1", "0/3", "3/", "3/0", "6/5",
"-1/4", "2/-1", "2/-", "3-4", "3/0")
for text in error_test_cases:
self.assertRaises(order.BadOrderError, order.decode, text)
def test_encode(self):
test_cases = ((1, 3, "1/3"), (7, None, "7"))
for order_num, total_num, expected_text in test_cases:
self.assertEqual(expected_text, order.encode(order_num, total_num))
error_test_cases = ((7, 5), (0, 3), (-1, 3), (4, 0), (4, -1))
for order_num, total_num in error_test_cases:
self.assertRaises(order.BadOrderError,
order.encode, order_num, total_num)
def test_standardize_str(self):
self.assertEqual("3", order.standardize_str(" 3 "))
self.assertEqual("3/7", order.standardize_str("3 of 7"))
def test_standardize(self):
tag = mutagen.id3.TRCK(text=["3 of 7"])
order_num, max_num = order.standardize(tag)
self.assertEqual(["3/7"], tag.text)
self.assertEqual(3, order_num)
self.assertEqual(7, max_num)
def test_is_archival(self):
self.assertTrue(order.is_archival("3/7"))
self.assertFalse(order.is_archival("bad"))
self.assertFalse(order.is_archival("3"))
self.assertFalse(order.is_archival("3 of 7"))
self.assertFalse(order.is_archival("7/3"))
self.assertFalse(order.is_archival(" 3/7"))
def test_verify_and_standardize_str_list(self):
# Check the simplest valid case.
self.assertEqual(["1/1"], order.verify_and_standardize_str_list(["1"]))
# Check an already-standardized set.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(
["1/4", "3/4", "2/4", "4/4"]))
# Check strings without a max number.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(["1", "3", "2", "4"]))
# Check mixed formats.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(["1", "3/4", "2", "4 of 4"]))
# Check empty list.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list, [])
# Check garbage in list.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list, ["xxx"])
# Check treatment of gaps.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list,
["1", "2", "4"])
# Check bad max number.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list,
["1/5", "3/5", "2/5", "4/5"])
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
775d4e974bbace6a037417248f6885324aebea6a
|
85764904e918310f9e4a209f64570dcdcf099818
|
/loutilities/user/roles.py
|
3df49ee5b5f5d63b1edda6261c2976dbd2e6b5e1
|
[
"Apache-2.0"
] |
permissive
|
louking/loutilities
|
05bb20994ae06d2e68989cd6a779c350a9a430ad
|
aaf7410849d0167001cd5f06ab0dae6563e58ec7
|
refs/heads/master
| 2023-07-24T18:32:36.128102 | 2023-07-15T10:02:43 | 2023-07-15T10:02:43 | 5,824,315 | 2 | 2 | null | 2023-05-10T09:59:37 | 2012-09-15T21:29:29 |
Python
|
UTF-8
|
Python
| false | false | 3,136 |
py
|
###########################################################################################
# roles - common location for xtility role declaration
#
# Date Author Reason
# ---- ------ ------
# 03/11/20 Lou King Create
#
# Copyright 2020 Lou King. All rights reserved
###########################################################################################
from loutilities.user.model import APP_CONTRACTS, APP_MEMBERS, APP_ROUTES, APP_SCORES, APP_ALL
# common roles
ROLE_SUPER_ADMIN = 'super-admin'
ROLES_COMMON = [ROLE_SUPER_ADMIN]
roles_common = [
{'name': 'super-admin', 'description': 'allowed to do everything on all applications', 'apps': APP_ALL},
]
# members roles
ROLE_LEADERSHIP_ADMIN = 'leadership-admin'
ROLE_LEADERSHIP_MEMBER = 'leadership-member'
ROLE_MEMBERSHIP_ADMIN = 'membership-admin'
ROLE_MEETINGS_ADMIN = 'meetings-admin'
ROLE_MEETINGS_MEMBER = 'meetings-member'
ROLE_RACINGTEAM_ADMIN = 'racingteam-admin'
ROLE_RACINGTEAM_MEMBER = 'racingteam-member'
roles_members = [
{'name': ROLE_LEADERSHIP_ADMIN, 'description': 'access to leadership tasks for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_LEADERSHIP_MEMBER, 'description': 'user of leadership tasks for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEMBERSHIP_ADMIN, 'description': 'access to membership admininstration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEETINGS_ADMIN, 'description': 'access to meetings administration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEETINGS_MEMBER, 'description': 'user of meetings for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_RACINGTEAM_ADMIN, 'description': 'access to racingteam administration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_RACINGTEAM_MEMBER, 'description': 'user of racingteam module for members application', 'apps':[APP_MEMBERS]},
]
# routes roles
ROLE_ROUTES_ADMIN = 'routes-admin'
ROLE_ICON_ADMIN = 'icon-admin'
roles_routes = [{'name': ROLE_ROUTES_ADMIN, 'description': 'access to routes for routes application', 'apps':[APP_ROUTES]},
{'name': ROLE_ICON_ADMIN, 'description': 'access to icons for routes application', 'apps':[APP_ROUTES]}
]
# contracts roles
ROLE_EVENT_ADMIN = 'event-admin'
ROLE_SPONSOR_ADMIN = 'sponsor-admin'
roles_contracts = [{'name': ROLE_EVENT_ADMIN, 'description': 'access to events for contracts application', 'apps':[APP_CONTRACTS]},
{'name': ROLE_SPONSOR_ADMIN, 'description': 'access to sponsors/races for contracts application', 'apps':[APP_CONTRACTS]}
]
# scores roles
ROLE_SCORES_ADMIN = 'scores-admin'
ROLE_SCORES_VIEWER = 'scores-viewer'
roles_scores = [{'name': ROLE_SCORES_ADMIN, 'description': 'administer scores application', 'apps':[APP_SCORES]},
{'name': ROLE_SCORES_VIEWER, 'description': 'view scores application', 'apps':[APP_SCORES]},
]
all_roles = [roles_common, roles_contracts, roles_members, roles_routes, roles_scores]
|
[
"[email protected]"
] | |
c4fb0116985e3ace94fc0fe7bbfb80ab7f53d331
|
7edb6f64afb9a9d5fd2b712faae9841d45c3a3b3
|
/monkeyAndPerformance/allCode/performanceTest/traffic/traffic.py
|
9edb99221fc8f0b920e0abebe9a4f074378baddb
|
[] |
no_license
|
Hanlen520/AppSpecialTest
|
413babbbecbeaa8e25dd1fd70dd349a1de07eb5e
|
06f69f116245162220985ad2632fbff3af72450c
|
refs/heads/master
| 2023-04-22T19:59:35.523780 | 2019-08-08T09:48:28 | 2019-08-08T09:48:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,398 |
py
|
import csv,os,time
from config.config import *
from monkeyAndPerformance.allCode.util.gettimestr import GetTimeStr
gettimestr = GetTimeStr() #实例化GetTimeStr
#控制类
class Controller(object):
def __init__(self):
self.counter = RunTrafficCount # 定义测试的次数
#定义收集数据的数组
self.alldata = [("deviceid","appversion","timestamp", "traffic")] # 要保存的数据,时间戳及流量
#单次测试过程
def TestProcessOnce(self):
#执行获取进程的命令
cmd = 'adb shell "ps | grep %s"' % AppPackageName # 获取进程
content = os.popen(cmd)
result = content.readlines()
print("result:%s"% result)
print("result.length:%s" % len(result))
if len(result):
#获取进程ID
# pid = result[0].split(" ")[5]
pid = result[0].split(" ")[3]
print("result[0].split():%s" % result[0].split(" "))
print("pid:%s"% pid)
self.DeleyTime(3)
#执行进程ID使用的流量
cmd = 'adb shell cat /proc/%s/net/dev'% pid # 获取流量
content = os.popen(cmd)
traffic = content.readlines()
print("traffic:%s"% traffic)
#获取流量
for line in traffic:
print("line:%s" % line)
if "wlan0" in line:
#将所有空行换成#
line = "#".join(line.split())
print("line##:%s"% line)
#按#号拆分,获取收到和发出的流量
receive = line.split("#")[1]
print("receive#:%s"%receive)
transmit = line.split("#")[9]
print("transmit##:%s"% transmit)
# if "eth0" in line:
# #将所有空行换成#
# line = "#".join(line.split())
# #按#号拆分,获取收到和发出的流量
# receive = line.split("#")[1]
# transmit = line.split("#")[9]
# elif "eth1" in line:
# # 将所有空行换成#
# line = "#".join(line.split())
# # 按#号拆分,获取收到和发出的流量
# receive2 = line.split("#")[1]
# transmit2 = line.split("#")[9]
#计算所有流量的之和
# alltraffic = int(receive) + int(transmit) + int(receive2) + int(transmit2)
alltraffic = int(receive) + int(transmit)
#按KB计算流量值
alltraffic = alltraffic/1024
currenttime = self.GetCurrentTime() # 获取当前时间
#将获取到的数据存储到数组中
self.alldata.append((TestDeviceID,AppVersion,currenttime,alltraffic)) # 写入数据到self.alldata
else:
print("没有获取到相应进程,请确定打开相应的app")
#延时函数
def DeleyTime(self,delaytime):
delaytime = int(delaytime)
time.sleep(delaytime) # 等待5秒
print("等待%s秒..."% delaytime)
#多次执行测试过程
def RunMore(self):
#设置手机进入非充电状态
cmd = 'adb shell dumpsys battery set status 1'
os.popen(cmd)
self.DeleyTime(3)
print("循环开始时间:%s" % self.GetCurrentTime() )
while self.counter>0: # 如果次数大于0
self.TestProcessOnce() # 则执行一次测试过程
self.counter = self.counter -1 # 测试次数减一
self.DeleyTime(5) # 间隔5秒取一次值
gettimestr.outPutMyLog("流量统计剩余运行次数为:%s" % self.counter)
print("循环结束时间:%s" % self.GetCurrentTime())
#获取当前存储数据的时间戳
def GetCurrentTime(self):
currenttime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) # 获取当前时间
return currenttime # 返回当前时间
# 获取当前时间的字符串
def GetCurrentTimeString(self):
currenttime = time.strftime("%Y%m%d%H%M%S", time.localtime()) # 获取当前时间
return currenttime # 返回当前时间
#存储数据到CSV时间
def SaveDataToCSV(self,timestr):
basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + "/" + "codeResult"
nyrsfmdir = gettimestr.createNYRSFMdir(basedir,timestr)
csvfile = "%s/%s_%s" % (nyrsfmdir,timestr,AppTrafficCSVFile)
opencsvfile = open(csvfile, "w",newline="") #加入newline="",解决python3写入csv出现空白行
writercsv = csv.writer(opencsvfile) # 写入文件
writercsv.writerows(self.alldata) # 写入数据,将字符串数据转换为字节,存储到CSV中
opencsvfile.close() # 关闭文件
print("数据:%s" % self.alldata)
print("数据保存路径:%s"% csvfile)
print("流量消耗:最后一次的流量值减去第一次的流量值,就是本次操作消耗的流量值")
def run(self,timestr): # 运行
self.RunMore()
self.SaveDataToCSV(timestr)
if __name__ == "__main__":
timestr = gettimestr.getTimeStr()
controller = Controller()
controller.run(timestr)
|
[
"[email protected]"
] | |
25e372cb14bdc5d7011802d05410d01a864a361a
|
7f8d2288dc8d81275269bdb8e8f196339a52d30d
|
/code/1010_solution.py
|
c14133019520efb5a27564644e2a7e131773bfda
|
[] |
no_license
|
ishaansharma/leetcode-3
|
f9cab568c31322e2bf84768264f3c644182cd470
|
9081dd3ff86409d554b0298a8152ed40a6befa96
|
refs/heads/master
| 2023-03-25T15:36:04.235650 | 2021-03-30T20:15:45 | 2021-03-30T20:15:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
count = 0
seen = [0] * 60
for t in time:
count += seen[-t % 60]
seen[t % 60] += 1
return count
|
[
"[email protected]"
] | |
09c0fefdd010970f39b250148bf0b0160b5f65a1
|
a00fdfc743262d3d9253bab1f2e8b10f99f013ee
|
/Bambu/bambuToNero.py
|
88f058034181c1d5bdb4ff97c5bcf43358b2fc8b
|
[] |
no_license
|
pdoming/NeroProducer
|
2a97101002c626d7f23f3c80e1abfaacc5c81968
|
8082361fa0a05c83cc6c6aacb1bdd5de24f65115
|
refs/heads/master
| 2021-01-15T17:35:58.814592 | 2015-07-25T16:50:39 | 2015-07-25T16:50:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,400 |
py
|
from MitAna.TreeMod.bambu import mithep, analysis
import os
mitdata = os.environ['MIT_DATA']
from MitPhysics.Mods.GoodPVFilterMod import goodPVFilterMod
from MitPhysics.Mods.JetCorrectionMod import jetCorrectionMod
from MitPhysics.Mods.JetIdMod import jetIdMod
from MitPhysics.Mods.MetCorrectionMod import metCorrectionMod
from MitPhysics.Mods.PFTauIdMod import pfTauIdMod
pfTauIdMod.AddCutDiscriminator(mithep.PFTau.kDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits, 5., False)
from MitPhysics.Mods.ElectronIdMod import electronIdMod
from MitPhysics.Mods.MuonIdMod import muonIdMod
from MitPhysics.Mods.PhotonIdMod import photonIdMod
from MitPhysics.Mods.SeparatePileUpMod import separatePileUpMod
generatorMod = mithep.GeneratorMod(
IsData = False,
CopyArrays = False,
MCMETName = "GenMet"
)
electronTightId = electronIdMod.clone('ElectronTightId',
IsFilterMode = False,
InputName = electronIdMod.GetOutputName(),
OutputName = 'TightElectronId',
IdType = mithep.ElectronTools.kPhys14Tight,
IsoType = mithep.ElectronTools.kPhys14TightIso
)
muonTightId = muonIdMod.clone('MuonTightId',
IsFilterMode = False,
InputName = muonIdMod.GetOutputName(),
OutputName = 'TightMuonId',
IdType = mithep.MuonTools.kMuonPOG2012CutBasedIdTight,
IsoType = mithep.MuonTools.kPFIsoBetaPUCorrected
)
muonTightIdMask = mithep.MaskCollectionMod('TightMuons',
InputName = muonIdMod.GetOutputName(),
MaskName = muonTightId.GetOutputName(),
OutputName = 'TightMuons'
)
fatJetCorrectionMod = mithep.JetCorrectionMod('FatJetCorrection',
InputName = 'AKt8PFJetsCHS',
CorrectedJetsName = 'CorrectedFatJets',
RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll
)
if analysis.isRealData:
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L1FastJet_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2Relative_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L3Absolute_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2L3Residual_AK8PFchs.txt")
else:
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L1FastJet_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L2Relative_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L3Absolute_AK8PFchs.txt")
fatJetIdMod = jetIdMod.clone('FatJetId',
InputName = fatJetCorrectionMod.GetOutputName(),
OutputName = 'GoodFatJets',
MVATrainingSet = mithep.JetIDMVA.nMVATypes
)
photonMediumId = photonIdMod.clone('PhotonMediumId',
IsFilterMode = False,
InputName = photonIdMod.GetOutputName(),
OutputName = 'PhotonMediumId',
IdType = mithep.PhotonTools.kPhys14Medium,
IsoType = mithep.PhotonTools.kPhys14Medium
)
photonTightId = photonMediumId.clone('PhotonTightId',
OutputName = 'PhotonTightId',
IdType = mithep.PhotonTools.kPhys14Tight,
IsoType = mithep.PhotonTools.kPhys14Tight
)
head = 'HEAD'
tag = 'BAMBU_041'
fillers = []
fillers.append(mithep.nero.EventFiller(
RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll
))
fillers.append(mithep.nero.VertexFiller(
VerticesName = goodPVFilterMod.GetOutputName()
))
fillers.append(mithep.nero.JetsFiller(
JetsName = jetIdMod.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName(),
JetIDMVA = jetIdMod.GetJetIDMVA()
))
fillers.append(mithep.nero.TausFiller(
TausName = pfTauIdMod.GetOutputName()
))
fillers.append(mithep.nero.LeptonsFiller(
ElectronsName = electronIdMod.GetOutputName(),
MuonsName = muonIdMod.GetOutputName(),
ElectronIdsName = electronTightId.GetOutputName(),
MuonIdsName = muonTightId.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName(),
PFCandsName = mithep.Names.gkPFCandidatesBrn,
NoPUPFCandsName = separatePileUpMod.GetPFNoPileUpName(),
PUPFCandsName = separatePileUpMod.GetPFPileUpName()
))
fillers.append(mithep.nero.FatJetsFiller(
FatJetsName = fatJetIdMod.GetOutputName()
))
fillers.append(mithep.nero.MetFiller(
MetName = metCorrectionMod.GetOutputName(),
MuonsName = muonTightIdMask.GetOutputName(),
GenMetName = generatorMod.GetMCMETName()
))
fillers.append(mithep.nero.PhotonsFiller(
PhotonsName = photonIdMod.GetOutputName(),
MediumIdName = photonMediumId.GetOutputName(),
TightIdName = photonTightId.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName()
))
fillers.append(mithep.nero.MonteCarloFiller())
fillers.append(mithep.nero.TriggerFiller())
fillers.append(mithep.nero.AllFiller())
neroMod = mithep.NeroMod(
Info = 'Nero',
Head = head,
Tag = tag,
FileName = 'nero.root',
PrintLevel = 0
)
for filler in fillers:
neroMod.AddFiller(filler)
sequence = goodPVFilterMod
if not analysis.isRealData:
sequence *= generatorMod
sequence *= separatePileUpMod * \
jetCorrectionMod * \
jetIdMod * \
metCorrectionMod * \
pfTauIdMod * \
electronIdMod * \
muonIdMod * \
photonIdMod * \
electronTightId * \
muonTightId * \
muonTightIdMask * \
fatJetCorrectionMod * \
fatJetIdMod * \
photonMediumId * \
photonTightId * \
neroMod
analysis.SetAllowNoHLTTree(True)
analysis.setSequence(sequence)
|
[
"[email protected]"
] | |
feab3ebba8930e7e527605d29f696b086b58d027
|
4c3094a869f59be8836993469b28f088fef9fff1
|
/Questions/Q_093_RentalCarLocations.py
|
35739a04cd88935d0ee54e3e84963fad486f00b2
|
[] |
no_license
|
Bharadwaja92/DataInterviewQuestions
|
d885d40da4d546a164eee37e7250ddb519fc8954
|
5b002f34c3b1440f4347a098f7ce1db84fc80e7f
|
refs/heads/master
| 2022-11-06T08:57:49.283013 | 2020-06-22T09:10:40 | 2020-06-22T09:10:40 | 269,247,468 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 909 |
py
|
"""""""""
Suppose you're working for a car rental company, looking to model potential location distribution of their
cars at major airports. The company operates in LA, SF, and San Jose.
Customers regularly pickup a car in one of these 3 cities and drop it off in another.
The company is looking to compute how likely it is that a given car will end up in a given city.
You can model this as a Markov chain (where each time step corresponds to a new customer taking the car).
The transition probabilities of the company's car allocation by city is as follows:
SF | LA | San Jose
0.6 0.1 0.3 | SF
0.2 0.8 0.3 | LA
0.2 0.1 0.4 | San Jose
As shown, the probability a car stays in SF is 0.6, the probability it moves from SF to LA is 0.2,
SF to San Jose is 0.2, etc.
Using the information above, determine the probability a car will start in SF but move to LA right after.
"""
|
[
"[email protected]"
] | |
a1b04624df6910adad210fe98bb6ade2e31d986b
|
b772048db1d84de6071dcb3978b6f548d2b42ae4
|
/tests/test_ner.py
|
25161ef7c203bccec745b1000a646113cac4af98
|
[
"BSD-2-Clause"
] |
permissive
|
yutanakamura-tky/MedNER-J
|
46ca13d87b6c4977b4042915ff2105ab4dc62d88
|
a0c68a32553bbbdb9f5ae5fd41584198951bc14c
|
refs/heads/master
| 2023-08-21T23:05:22.645001 | 2021-08-10T02:34:45 | 2021-08-10T02:34:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,623 |
py
|
import unittest
from medner_j import Ner
class TestNer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = Ner.from_pretrained(model_name="BERT", normalizer="dict")
cls.examples = ['それぞれの関節に関節液貯留は見られなかった', 'その後、左半身麻痺、CTにて右前側頭葉の出血を認める。 ']
cls.xmls = ['それぞれの関節に<CN value="かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留">関節液貯留</CN>は見られなかった', 'その後、<C value="ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺">左半身麻痺</C>、CTにて右前側頭葉の<C value="しゅっけつ;icd=R58;lv=S/freq=高;出血">出血</C>を認める。 ']
cls.dicts = [
[{"span": (8, 13), "type": "CN", "disease":"関節液貯留", "norm":"かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留"}],
[{"span": (4, 9), "type": "C", "disease": "左半身麻痺", "norm": "ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺"}, {"span": (20, 22), "type": "C", "disease": "出血", "norm": "しゅっけつ;icd=R58;lv=S/freq=高;出血"}]
]
def test_xml(self):
results = self.model.predict(self.examples)
self.assertEqual(results, self.xmls)
def test_dict(self):
results = self.model.predict(self.examples, output_format="dict")
self.assertEqual(results, self.dicts)
@classmethod
def tearDownClass(cls):
del cls.model
del cls.examples
del cls.xmls
del cls.dicts
|
[
"[email protected]"
] | |
d1cd60b8ac3a89b9dd0b4a456d9c166b93f4ffe5
|
67c5269fa4720cf728d4c1dd572c09d5e4e7a321
|
/convert_mcnp71.py
|
db687aef0e14ec73a1479e0f9dc3959d89a76938
|
[] |
no_license
|
SamPUG/data
|
cff882327f5fe79ce2c2fca70d217173300c4f85
|
457755083bb8e05e58bbc3765f52bf8c756abb9c
|
refs/heads/master
| 2020-12-19T14:57:12.806099 | 2020-03-06T08:30:47 | 2020-03-06T08:30:47 | 235,767,080 | 0 | 0 | null | 2020-02-25T14:43:04 | 2020-01-23T09:58:38 |
Python
|
UTF-8
|
Python
| false | false | 4,330 |
py
|
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from pathlib import Path
import sys
import openmc.data
# Make sure Python version is sufficient
assert sys.version_info >= (3, 6), "Python 3.6+ is required"
description = """
Convert ENDF/B-VII.1 ACE data from the MCNP6 distribution into an HDF5 library
that can be used by OpenMC. This assumes that you have a directory containing
subdirectories 'endf71x' and 'ENDF71SaB'. Optionally, if a recent photoatomic
library (e.g., eprdata14) is available, it can also be converted using the
--photon argument.
"""
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=description,
formatter_class=CustomFormatter
)
parser.add_argument('-d', '--destination', type=Path, default=Path('mcnp_endfb71'),
help='Directory to create new library in')
parser.add_argument('--libver', choices=['earliest', 'latest'],
default='earliest', help="Output HDF5 versioning. Use "
"'earliest' for backwards compatibility or 'latest' for "
"performance")
parser.add_argument('-p', '--photon', type=Path,
help='Path to photoatomic data library (eprdata12 or later)')
parser.add_argument('mcnpdata', type=Path,
help='Directory containing endf71x and ENDF71SaB')
args = parser.parse_args()
# Check arguments to make sure they're valid
assert args.mcnpdata.is_dir(), 'mcnpdata argument must be a directory'
if args.photon is not None:
assert args.photon.is_file(), 'photon argument must be an existing file'
# Get a list of all ACE files
endf71x = list(args.mcnpdata.glob('endf71x/*/*.7??nc'))
endf71sab = list(args.mcnpdata.glob('ENDF71SaB/*.??t'))
# Check for fixed H1 files and remove old ones if present
hydrogen = args.mcnpdata / 'endf71x' / 'H'
if (hydrogen / '1001.720nc').is_file():
for i in range(10, 17):
endf71x.remove(hydrogen / f'1001.7{i}nc')
# There's a bug in H-Zr at 1200 K
thermal = args.mcnpdata / 'ENDF71SaB'
endf71sab.remove(thermal / 'h-zr.27t')
# Check for updated TSL files and remove old ones if present
checks = [
('sio2', 10, range(20, 37)),
('u-o2', 30, range(20, 28)),
('zr-h', 30, range(20, 28))
]
for material, good, bad in checks:
if (thermal / f'{material}.{good}t').is_file():
for suffix in bad:
f = thermal / f'{material}.{suffix}t'
if f.is_file():
endf71sab.remove(f)
# Group together tables for the same nuclide
tables = defaultdict(list)
for p in sorted(endf71x + endf71sab):
tables[p.stem].append(p)
# Create output directory if it doesn't exist
(args.destination / 'photon').mkdir(parents=True, exist_ok=True)
library = openmc.data.DataLibrary()
for name, paths in sorted(tables.items()):
# Convert first temperature for the table
p = paths[0]
print(f'Converting: {p}')
if p.name.endswith('t'):
data = openmc.data.ThermalScattering.from_ace(p)
else:
data = openmc.data.IncidentNeutron.from_ace(p, 'mcnp')
# For each higher temperature, add cross sections to the existing table
for p in paths[1:]:
print(f'Adding: {p}')
if p.name.endswith('t'):
data.add_temperature_from_ace(p)
else:
data.add_temperature_from_ace(p, 'mcnp')
# Export HDF5 file
h5_file = args.destination / f'{data.name}.h5'
print(f'Writing {h5_file}...')
data.export_to_hdf5(h5_file, 'w', libver=args.libver)
# Register with library
library.register_file(h5_file)
# Handle photoatomic data
if args.photon is not None:
lib = openmc.data.ace.Library(args.photon)
for table in lib.tables:
# Convert first temperature for the table
print(f'Converting: {table.name}')
data = openmc.data.IncidentPhoton.from_ace(table)
# Export HDF5 file
h5_file = args.destination / 'photon' / f'{data.name}.h5'
print(f'Writing {h5_file}...')
data.export_to_hdf5(h5_file, 'w', libver=args.libver)
# Register with library
library.register_file(h5_file)
# Write cross_sections.xml
library.export_to_xml(args.destination / 'cross_sections.xml')
|
[
"[email protected]"
] | |
ba45777ebf476d635254faf1c942e070055b6fc5
|
c463e77c3d76e6b4810e202541d3f3f7f91bcf60
|
/build/PCL-ROS-cluster-Segmentation/cmake/sensor_stick-genmsg-context.py
|
31a011a3d2b1087f74bbb8bde784bccea1893805
|
[] |
no_license
|
MGRNascimento/Tese
|
18087ee59dfee96ee000c9f16c646d1750174285
|
bf78d417849a74d9c5a520d40dcbebeadf084706
|
refs/heads/master
| 2020-06-23T13:57:01.699657 | 2019-10-23T21:47:19 | 2019-10-23T21:47:19 | 198,638,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 992 |
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObjectsArray.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObject.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/SegmentedClustersArray.msg"
services_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/srv/GetNormals.srv"
pkg_name = "sensor_stick"
dependencies_str = "std_msgs;sensor_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "sensor_stick;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"[email protected]"
] | |
c72ea0fdf63e7cab3cd12fac24e9a96fe75a01e2
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/etc/MOPSO-GP0/ZDT4.py
|
1082e5005e8823de068729fbccebe4e6a539378f
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,609 |
py
|
'''
Created on 2014-1-25
@author: Walter
'''
from SwarmND import *;
import numpy as np;
import sys;
if __name__ == '__main__':
def func1(x):
return x[0];
def func2(x):
sum = 0.0;
for i in range(2, 10):
sum += x[i]**2 - 10 * np.cos(4 * np.pi * x[i]);
g = 1 + 10 * 9 + sum;
h = 1 - np.sqrt(x[0]/g);
return g * h;
figFolder = sys.path[0];
figFolder = sys.path[0] + "\\zdt4";
paretoX = np.arange(0.0,1.0,0.005);
paretoY = np.zeros(len(paretoX));
localParetoY = np.zeros(len(paretoX));
paretoPos = [];
for i in range(len(paretoX)):
paretoY[i] = 1 - np.sqrt(paretoX[i]);
localParetoY[i] = 1 - np.sqrt(paretoX[i]/1.25);
fitPos = np.matrix(np.zeros((1,2), np.float));
fitPos[0,0] = paretoX[i];
fitPos[0,1] = paretoY[i];
paretoPos.append(fitPos);
swarm = SwarmND(100, 10);
swarm.setDisplayParam(600, 600, 20, 0.1)
swarm.setParam(2.0, 2.0, 0.8, [func1, func2]);
ws = [];
ws.append([0.0, 1.0]);
for i in range(1,10):
ws.append([-5.0, 5.0])
swarm.initParticles(ws);
swarm.paretoX = paretoX;
swarm.paretoY = paretoY;
swarm.localParetoX = paretoX;
swarm.localParetoY = localParetoY;
swarm.paretoPos = paretoPos;
runPlan = [30, 60, 80, 100];
count = 0;
for r in runPlan:
for t in range(r):
swarm.update();
count += 1;
swarm.plot(count, figFolder);
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
7e15d512ec3c87a9d4dc6de189623ab45646f041
|
efb3194a583cd79cc03dc91b9a96dfc0bdd3a344
|
/stm32f/json_pkt.py
|
8fab02dbeb225a6406222a1a16911d147abec342
|
[
"Apache-2.0"
] |
permissive
|
andersy005/capstone
|
9227b0c19b4e16ea5e67a529937652408d0a35f2
|
b4301ebc7c1447f3ce2ff034add985c1f417f065
|
refs/heads/master
| 2021-09-13T07:42:52.359116 | 2018-04-26T17:58:05 | 2018-04-26T17:58:05 | 118,843,216 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,517 |
py
|
# This code should run fine on MicroPython or CPython.
#
# It allows objects which can be represented as JSON objects to be sent
# between two python programs (running on the same or different computers).
import json
from dump_mem import dump_mem
SOH = 0x01
STX = 0x02
ETX = 0x03
EOT = 0x04
# <SOH><LenLow><LenHigh><STX><PAYLOAD><ETX><LRC><EOT>
def lrc(str):
sum = 0
for b in str:
sum = (sum + b) & 0xff
return ((sum ^ 0xff) + 1) & 0xff
class JSON_Packet:
STATE_SOH = 0
STATE_LEN_0 = 1
STATE_LEN_1 = 2
STATE_STX = 3
STATE_PAYLOAD = 4
STATE_ETX = 5
STATE_LRC = 6
STATE_EOT = 7
def __init__(self, serial_port, show_packets=False):
self.serial_port = serial_port
self.show_packets = show_packets
self.pkt_len = 0
self.pkt_idx = 0
self.pkt = None
self.lrc = 0
self.state = JSON_Packet.STATE_SOH
def send(self, obj):
"""Converts a python object into its json representation and then sends
it using the 'serial_port' passed in the constructor.
"""
j_str = json.dumps(obj).encode('ascii')
j_len = len(j_str)
j_lrc = lrc(j_str)
hdr = bytearray((SOH, j_len & 0xff, j_len >> 8, STX))
ftr = bytearray((ETX, j_lrc, EOT))
if self.show_packets:
data = hdr + j_str + ftr
dump_mem(data, 'Send')
self.serial_port.write(hdr)
self.serial_port.write(j_str)
self.serial_port.write(ftr)
def process_byte(self, byte):
"""Processes a single byte. Returns a json object when one is
successfully parsed, otherwise returns None.
"""
if self.show_packets:
if byte >= ord(' ') and byte <= ord('~'):
print('Rcvd 0x%02x \'%c\'' % (byte, byte))
else:
print('Rcvd 0x%02x' % byte)
if self.state == JSON_Packet.STATE_SOH:
if byte == SOH:
self.state = JSON_Packet.STATE_LEN_0
elif self.state == JSON_Packet.STATE_LEN_0:
self.pkt_len = byte
self.state = JSON_Packet.STATE_LEN_1
elif self.state == JSON_Packet.STATE_LEN_1:
self.pkt_len += (byte << 8)
self.state = JSON_Packet.STATE_STX
elif self.state == JSON_Packet.STATE_STX:
if byte == STX:
self.state = JSON_Packet.STATE_PAYLOAD
self.pkt_idx = 0
self.pkt = bytearray(self.pkt_len)
self.lrc = 0
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_PAYLOAD:
self.pkt[self.pkt_idx] = byte
self.lrc = (self.lrc + byte) & 0xff
self.pkt_idx += 1
if self.pkt_idx >= self.pkt_len:
self.state = JSON_Packet.STATE_ETX
elif self.state == JSON_Packet.STATE_ETX:
if byte == ETX:
self.state = JSON_Packet.STATE_LRC
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_LRC:
self.lrc = ((self.lrc ^ 0xff) + 1) & 0xff
if self.lrc == byte:
self.state = JSON_Packet.STATE_EOT
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_EOT:
self.state = JSON_Packet.STATE_SOH
if byte == EOT:
return json.loads(str(self.pkt, 'ascii'))
|
[
"[email protected]"
] | |
fefa4008d3c6a8622e01e84a315130f060863036
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/2-Python-Basics-part2/6-Logical-operators_20200414002000.py
|
7a4ee8fd3c96f8e57b7e41dc522b12fb81613bec
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
# Short Circuting
is_Friend = True
is_User = True
if is_Friend or is_User:
print("both are true")
if is_Friend and is_User:
print("both are true")
age = 15
year = 2019
boy = "Vlad"
|
[
"[email protected]"
] | |
9afc659a83985ca5e7a34f87ceb3a5de075cc25b
|
5a3b070f39715f604a8bfc38888b6ee4382e54ac
|
/TalkTalk-Server/app.py
|
aa21f179f70f37f987a80665e81a7a672d8cc074
|
[] |
no_license
|
aupaprunia/talktalk
|
717245ec0378559abf2dba0793822d19613faf57
|
895418aa25ad154449f4036362a77b615092b00b
|
refs/heads/main
| 2023-04-13T03:53:37.361677 | 2021-04-11T19:08:54 | 2021-04-11T19:08:54 | 356,480,824 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,022 |
py
|
from flask import Flask, request
import pyrebase
import requests
choice_dict = {1:"Sad", 2: "Happy", 3: "Angry", 4: "Excited"}
config = {"apiKey": "AIzaSyBrey3ZZ5X74WrAQuj7HISWLl70PqP8dnA",
"authDomain": "trialproject-55deb.firebaseapp.com",
"databaseURL": "https://trialproject-55deb-default-rtdb.firebaseio.com",
"projectId": "trialproject-55deb",
"storageBucket": "trialproject-55deb.appspot.com",
"messagingSenderId": "930590452475",
"appId": "1:930590452475:web:d8857d9906874468fd5e5e"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
db = firebase.database()
app = Flask(__name__)
# @app.route('/signup', methods =['GET'])
# def signup():
# register = request.get_json()
# email = register['email']
# password = register['password']
# auth.create_user_with_email_and_password(email, password)
# return {"status": " success", "email": email, "password": password}
@app.route('/signin/<string:email>/<string:password>', methods = ['GET'])
def signin(email, password):
try:
result = auth.sign_in_with_email_and_password(email, password)
global userId
userId = result['localId']
get_token = db.child("Users").child(userId).get()
global token
token = get_token.val()['token']
name = get_token.val()['name']
return{"token": token, "status": 1, "name": name}
except:
return {"status": 0}
@app.route('/speaker/<int:choice>', methods = ["GET"])
def speaker(choice):
try:
users = db.child("Online").child("Listener").child(choice_dict[choice]).get()
uid = ""
flag = True
for key in users.val():
if flag == True:
uid = key
flag = False
db.child("Online").child("Listener").child(choice_dict[choice]).child(uid).child("status").set("1")
db.child("Users").child(userId).child("token").set(token-1)
url = "https://fcm.googleapis.com/fcm/send"
payload="{\r\n \"to\":\"/topics/"+userId+",\r\n \"data\": {\r\n \"title\": \"Alert\",\r\n \"body\": \"You have an incoming call...\"\r\n }\r\n}"
headers = {'Authorization': 'key=AAAA2KuDavs:APA91bGCwqzJYQntRNVZU4WfjDh71D2kLvI4ei3iXr9BIlrz-lzp3HdzZWKAWghUwZK0i1rvC0RKFl2rdk1uyAf3RozvlPO1snRvwYpxJVz5qAH5keFgzygj8h16D0g-YDHrz6SoqJfh',
'Content-Type': 'application/json'}
response = requests.request("POST", url, headers=headers, data=payload)
print(response)
return {"channel_name": uid, "status":1}
except:
return {"message": "No Listner available. Try reconnecting later.", "status":0}
@app.route('/listner/<int:choice>', methods = ["GET"])
def push_listner(choice):
db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("status").set("0")
db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("uid").set(userId)
db.child("Users").child(userId).child("token").set(token+1)
return {"status" : 1, "message": "You will be connected to a speaker shortly."}
if __name__ == '__main__':
app.run(debug = True)
|
[
"="
] |
=
|
08dfeef07dc2184dd58ed15584e4a9d792be3383
|
3a8c2bd3b8df9054ed0c26f48616209859faa719
|
/Challenges/Hackerrank-DynamicArray.py
|
c63264cadb5c93066503209dd51764b1eaa68ce0
|
[] |
no_license
|
AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges
|
684f1ca2f9ee3c49d0b17ecb1e80707efe305c82
|
98fb752c574a6ec5961a274e41a44275b56da194
|
refs/heads/master
| 2023-09-01T23:58:15.514231 | 2021-09-10T12:42:03 | 2021-09-10T12:42:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
#!/bin/python3
import os
import sys
#
# Complete the dynamicArray function below.
#
def dynamicArray(n, queries):
#
# Write your code here.
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nq = input().split()
n = int(nq[0])
q = int(nq[1])
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
result = dynamicArray(n, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
[
"[email protected]"
] | |
bd080db414250c7460293da72e2625c463127dcf
|
55a4d7ed3ad3bdf89e995eef2705719ecd989f25
|
/main/tensorflow_test/hmm_天气_活动理解.py
|
1318a13a359255ef5e47ef393f656642d7456de5
|
[] |
no_license
|
ichoukou/Bigdata
|
31c1169ca742de5ab8c5671d88198338b79ab901
|
537d90ad24eff4742689eeaeabe48c6ffd9fae16
|
refs/heads/master
| 2020-04-17T04:58:15.532811 | 2018-12-11T08:56:42 | 2018-12-11T08:56:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,842 |
py
|
# coding:utf-8
states = ('Rainy', 'Sunny')
observations = ('walk', 'shop', 'clean')
start_probability = {'Rainy': 0.6, 'Sunny': 0.4}
transition_probability = {
'Rainy': {'Rainy': 0.7, 'Sunny': 0.3},
'Sunny': {'Rainy': 0.4, 'Sunny': 0.6},
}
emission_probability = {
'Rainy': {'walk': 0.1, 'shop': 0.4, 'clean': 0.5},
'Sunny': {'walk': 0.6, 'shop': 0.3, 'clean': 0.1},
}
# 打印路径概率表
def print_dptable(V):
print " ",
for i in range(len(V)): print "%7d" % i,
print
for y in V[0].keys():
print "%.10s: " % y,
for t in range(len(V)):
print "%.12s" % ("%f" % V[t][y]),
print
def viterbi(obs, states, start_p, trans_p, emit_p):
"""
:param obs:观测序列
:param states:隐状态
:param start_p:初始概率(隐状态)
:param trans_p:转移概率(隐状态)
:param emit_p: 发射概率 (隐状态表现为显状态的概率)
:return:
"""
# 路径概率表 V[时间][隐状态] = 概率
V = [{}]
# 一个中间变量,代表当前状态是哪个隐状态
path = {}
# 初始化初始状态 (t == 0)
for y in states:
V[0][y] = start_p[y] * emit_p[y][obs[0]]
path[y] = [y]
# 对 t > 0 跑一遍维特比算法
for t in range(1, len(obs)): # [1,2]
V.append({})
newpath = {}
for y in states:
# 概率 隐状态 = 前状态是y0的概率 * y0转移到y的概率 * y表现为当前状态的概率
# print [(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states]
#计算当前循环下,天气为y的概率,可由前一天是阴天、晴天两种情况得来,但是取概率最大的作为当前链。
(prob, state) = max([(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states])
# 记录最大概率
V[t][y] = prob
print V
# 更新晴天、雨天的路径,更新当前为晴天、雨天的链路径,最后一个
newpath[y] = path[state] + [y]
# print newpath
# 不需要保留旧路径
path = newpath
#打印列表,每天的晴天、阴天的最大的概率值输出,作为后一天晴天、阴天的输入。总之每天的计算输出,只保留“一个”晴天的输出和阴天的输出。
print_dptable(V)
(prob, state) = max([(V[len(obs) - 1][y], y) for y in states])
return (prob, path[state])
def example():
return viterbi(observations,
states,
start_probability,
transition_probability,
emission_probability)
#注意: max([(4,'hello'),(3,'hello'),(10,'hello')]) 比较的是[]中的()中第一个数值!!!!!!!
print example()
|
[
"[email protected]"
] | |
a25a9a45abf6afeb485d96f23c00c3d70ff087dc
|
b8f9d2cafb8958cdb417f05156acb6aadf90f4dd
|
/MachineLearning/NetworkAnalysis/PageRank.py
|
5d5647d240c30f7abe41a25e7aa9ec6bbe87407e
|
[] |
no_license
|
Anova07/Data-Science
|
8d14f78236de0053e2d31cc8cd85b9c70dfa2c8a
|
86dd24fb04a199536ae8f3f5f843aae3fc69c086
|
refs/heads/master
| 2021-12-08T10:35:35.512188 | 2016-03-06T19:08:58 | 2016-03-06T19:08:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,945 |
py
|
import math, random, re
from collections import defaultdict, Counter, deque
from LinearUtils.Vectors import dotProduct, magnitude, scalarMultiply, shape, distance
from LinearUtils.Matrices import getRow, getCol, generateMatrix
from functools import partial
# Code from Data Science from Scratch - github
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# give each user a friends list
for user in users:
user["friends"] = []
# and fill it
for i, j in friendships:
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3),
(2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
def PageRank(users, damping = 0.85, num_iters = 100):
"""
A simplified version looks like this:
1. There is a total of 1.0 (or 100%) PageRank in the network.
2. Initially this PageRank is equally distributed among nodes.
3. At each step, a large fraction of each node’s PageRank is distributed evenly among its outgoing links.
4. At each step, the remainder of each node’s PageRank is distributed evenly among all nodes.
"""
# initially distribute PageRank evenly
num_users = len(users)
pr = { user["id"] : 1 / num_users for user in users }
# this is the small fraction of PageRank
# that each node gets each iteration
base_pr = (1 - damping) / num_users
for __ in range(num_iters):
next_pr = { user["id"] : base_pr for user in users }
for user in users:
# distribute PageRank to outgoing links
links_pr = pr[user["id"]] * damping
for endorsee in user["endorses"]:
next_pr[endorsee["id"]] += links_pr / len(user["endorses"])
pr = next_pr
return pr
if __name__ == "__main__":
for user in users:
user["endorses"] = [] # add one list to track outgoing endorsements
user["endorsed_by"] = [] # and another to track endorsements
for source_id, target_id in endorsements:
users[source_id]["endorses"].append(users[target_id])
users[target_id]["endorsed_by"].append(users[source_id])
endorsements_by_id = [(user["id"], len(user["endorsed_by"]))
for user in users]
sorted(endorsements_by_id, key=lambda pair: pair[1], reverse=True)
print("PageRank")
for user_id, pr in PageRank(users).items():
print(user_id, pr)
|
[
"[email protected]"
] | |
c7ef812fb6b1c0a1bcbf2e8e463e19da84748944
|
6b265b404d74b09e1b1e3710e8ea872cd50f4263
|
/Python/Exercises/TreeChecker/check_tree_2.0.py
|
857bec02ba2b491a4a9f7d5ad9e1b2461082a30e
|
[
"CC-BY-4.0"
] |
permissive
|
gjbex/training-material
|
cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae
|
e748466a2af9f3388a8b0ed091aa061dbfc752d6
|
refs/heads/master
| 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 |
CC-BY-4.0
| 2023-08-03T07:07:25 | 2014-04-09T06:35:58 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,828 |
py
|
#!/usr/bin/env python
import sys
class BaseError(Exception):
def __init__(self, position):
super().__init__()
self._position = position
@property
def position(self):
return self._position
def __str__(self):
return self.message
class MissingRBError(BaseError):
def __init__(self, position):
super().__init__(position)
msg = 'missing right bracket for bracket at {0}'
self.message = msg.format(position)
class MissingLBError(BaseError):
def __init__(self, position):
super().__init__(position)
msg = 'missing left bracket for bracket at {0}'
self.message = msg.format(position)
class TrailingCharsError(BaseError):
def __init__(self, position):
super().__init__(position)
self.message = 'trailing characters at position {0}'.format(position)
def check_tree(tree):
bracket_positions = []
position = 1
for character in tree:
if character == '(':
bracket_positions.append(position)
elif character == ')':
if bracket_positions:
bracket_positions.pop()
else:
raise MissingLBError(position)
if len(bracket_positions) == 0:
break
position += 1
if len(bracket_positions) == 0 and position < len(tree) - 1:
raise TrailingCharsError(position + 1)
elif len(bracket_positions) > 0:
raise MissingRBError(bracket_positions.pop())
def main():
tree = ''.join([line.strip() for line in sys.stdin.readlines()])
try:
check_tree(tree)
except BaseError as error:
sys.stderr.write('### error: {0}\n'.format(str(error)))
return 1
else:
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
[
"[email protected]"
] | |
658b34c8593e518f6e856b6afb5c1d107b89f6bc
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/stgcn_wave/model.py
|
2463721f1b38ea34e09db1c8e3b064a7db69e439
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 |
Apache-2.0
| 2023-09-14T15:48:24 | 2018-04-20T14:49:09 |
Python
|
UTF-8
|
Python
| false | false | 3,480 |
py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from dgl.nn.pytorch import GraphConv
from dgl.nn.pytorch.conv import ChebConv
class TemporalConvLayer(nn.Module):
"""Temporal convolution layer.
arguments
---------
c_in : int
The number of input channels (features)
c_out : int
The number of output channels (features)
dia : int
The dilation size
"""
def __init__(self, c_in, c_out, dia=1):
super(TemporalConvLayer, self).__init__()
self.c_out = c_out
self.c_in = c_in
self.conv = nn.Conv2d(
c_in, c_out, (2, 1), 1, dilation=dia, padding=(0, 0)
)
def forward(self, x):
return torch.relu(self.conv(x))
class SpatioConvLayer(nn.Module):
def __init__(self, c, Lk): # c : hidden dimension Lk: graph matrix
super(SpatioConvLayer, self).__init__()
self.g = Lk
self.gc = GraphConv(c, c, activation=F.relu)
# self.gc = ChebConv(c, c, 3)
def init(self):
stdv = 1.0 / math.sqrt(self.W.weight.size(1))
self.W.weight.data.uniform_(-stdv, stdv)
def forward(self, x):
x = x.transpose(0, 3)
x = x.transpose(1, 3)
output = self.gc(self.g, x)
output = output.transpose(1, 3)
output = output.transpose(0, 3)
return torch.relu(output)
class FullyConvLayer(nn.Module):
def __init__(self, c):
super(FullyConvLayer, self).__init__()
self.conv = nn.Conv2d(c, 1, 1)
def forward(self, x):
return self.conv(x)
class OutputLayer(nn.Module):
def __init__(self, c, T, n):
super(OutputLayer, self).__init__()
self.tconv1 = nn.Conv2d(c, c, (T, 1), 1, dilation=1, padding=(0, 0))
self.ln = nn.LayerNorm([n, c])
self.tconv2 = nn.Conv2d(c, c, (1, 1), 1, dilation=1, padding=(0, 0))
self.fc = FullyConvLayer(c)
def forward(self, x):
x_t1 = self.tconv1(x)
x_ln = self.ln(x_t1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
x_t2 = self.tconv2(x_ln)
return self.fc(x_t2)
class STGCN_WAVE(nn.Module):
def __init__(
self, c, T, n, Lk, p, num_layers, device, control_str="TNTSTNTST"
):
super(STGCN_WAVE, self).__init__()
self.control_str = control_str # model structure controller
self.num_layers = len(control_str)
self.layers = nn.ModuleList([])
cnt = 0
diapower = 0
for i in range(self.num_layers):
i_layer = control_str[i]
if i_layer == "T": # Temporal Layer
self.layers.append(
TemporalConvLayer(c[cnt], c[cnt + 1], dia=2**diapower)
)
diapower += 1
cnt += 1
if i_layer == "S": # Spatio Layer
self.layers.append(SpatioConvLayer(c[cnt], Lk))
if i_layer == "N": # Norm Layer
self.layers.append(nn.LayerNorm([n, c[cnt]]))
self.output = OutputLayer(c[cnt], T + 1 - 2 ** (diapower), n)
for layer in self.layers:
layer = layer.to(device)
def forward(self, x):
for i in range(self.num_layers):
i_layer = self.control_str[i]
if i_layer == "N":
x = self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
else:
x = self.layers[i](x)
return self.output(x)
|
[
"[email protected]"
] | |
f661b97983d5da36c5d8f23356b77bb41fdbff71
|
dd05972a3bf9d15f332fbff420f10afe1977c0d8
|
/competition/base_example/aliceTest.py
|
76fec14b823615e7488647e1a92bf8e51c2b7006
|
[
"BSD-2-Clause"
] |
permissive
|
StephanieWehner/QI-Competition2018
|
b70df8c5bb343c534c2c0bd8fc0e7d6bb6183f25
|
cc1139c81e39f66b77c046414bcac8de45807557
|
refs/heads/master
| 2020-03-23T05:45:09.885955 | 2018-08-08T20:03:29 | 2018-08-08T20:03:29 | 141,164,280 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,108 |
py
|
#
# Copyright (c) 2017, Stephanie Wehner and Axel Dahlberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Stephanie Wehner, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SimulaQron.general.hostConfig import *
from SimulaQron.cqc.backend.cqcHeader import *
from SimulaQron.cqc.pythonLib.cqc import *
from SimulaQron.toolbox.measurements import parity_meas
import random
#####################################################################################################
#
# main
#
def main():
# Initialize the connection
Alice = CQCConnection("Alice")
# Create EPR pairs
q1 = Alice.createEPR("Bob")
q2 = Alice.createEPR("Bob")
# Make sure we order the qubits consistently with Bob
# Get entanglement IDs
q1_ID = q1.get_entInfo().id_AB
q2_ID = q2.get_entInfo().id_AB
if q1_ID < q2_ID:
qa = q1
qc = q2
else:
qa = q2
qc = q1
# Get row
row = 0
# Perform the three measurements
if row == 0:
m0 = parity_meas([qa, qc], "XI", Alice)
m1 = parity_meas([qa, qc], "XX", Alice)
m2 = parity_meas([qa, qc], "IX", Alice)
else:
m0 = 0
m1 = 0
m2 = 0
print("\n")
print("==========================")
print("App {}: row is:".format(Alice.name))
for _ in range(row):
print("(___)")
print("({}{}{})".format(m0, m1, m2))
for _ in range(2-row):
print("(___)")
print("==========================")
print("\n")
# Clear qubits
qa.measure()
qc.measure()
# Stop the connections
Alice.close()
##################################################################################################
main()
|
[
"[email protected]"
] | |
986bf659063dbb4023eaaf094cd1d3cccd06ebdb
|
44dbb043e52f00c9a797b1bea8f1df50dd621842
|
/os-example-4.py
|
69064074cfa33ba2ae8384a237bc9351ebad664a
|
[] |
no_license
|
peterdocter/standardmodels
|
140c238d3bef31db59641087e3f3d5413d4baba1
|
7addc313c16b416d0970461998885833614570ad
|
refs/heads/master
| 2020-12-30T16:59:30.489486 | 2016-12-13T06:32:03 | 2016-12-13T06:32:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
import os
# where are we?
cwd = os.getcwd()
print "1", cwd
# go down
os.chdir("samples")
print "2", os.getcwd()
# go back up
os.chdir(os.pardir)
print "3", os.getcwd()
|
[
"[email protected]"
] | |
235af1bbc670e956e37e472b363d092d53a2e10f
|
7927424f1983eecc7c7b2f0ebaf61ad552d2a7e7
|
/zigzag.py
|
1e4ea4b1030d84d3446c45f2f19960e1f1f9aafc
|
[] |
no_license
|
6reg/automate
|
295931d3ecf0e69e01921cc45d452fadfd1e6581
|
11e5de461ece3d8d111f3dc13de088788baf19a2
|
refs/heads/main
| 2023-03-08T18:39:42.991280 | 2021-02-22T20:53:13 | 2021-02-22T20:53:13 | 334,780,031 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 746 |
py
|
import time, sys
indent = 0 # How many spaces to indent
indentIncreasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop.
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for the 1/10 of a second.
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
except KeyboardInterrupt:
sys.exit()
|
[
"[email protected]"
] | |
696193e4863c900c995b49d8854b2fd947ef2ebd
|
9dc21ebb553fd116826c7cbae7d8c5eba47423d1
|
/cloneGraph.py
|
81681ac2a31cf11b69ac78e24d755d692f4aee77
|
[] |
no_license
|
KJSui/leetcode-2020
|
a475a8b8481231757222c5afaad2856a92572f89
|
37cf89e7fb1351b1deff09271d9bb5852395054e
|
refs/heads/main
| 2023-04-05T19:46:25.647605 | 2021-05-06T20:40:06 | 2021-05-06T20:40:06 | 365,031,592 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 426 |
py
|
class Solution:
def __init__(self):
self.copy = {}
def cloneGraph(self, node):
if not node:
return None
newNode = Node(node.val)
neight = []
for i in neight:
if i in self.copy:
neight.append(self.copy[i])
else:
neight.append(self.cloneGraph(i))
newNode.neighbors = neight
return newNode
|
[
"[email protected]"
] | |
97d55e2aec24c8c3c273787b6a0bfb6e207c6ee0
|
c261f0e98eedb4f0d85e92bd6ab8f4ae47096269
|
/lifeservice/schedule117/04美食下载团购糯米/getNuomiOtherCinemaMap.py
|
7e6d7d90119847ca9a6a6e964889df38e7707452
|
[] |
no_license
|
ShenDezhou/CPP
|
24379fe24f3c8588a7859ee586527d5cc6bfbe73
|
933c1e764a6ed2879b26aa548ff67153ca026bf6
|
refs/heads/master
| 2021-01-11T22:09:24.900695 | 2017-04-05T02:04:07 | 2017-04-05T02:04:07 | 78,928,291 | 0 | 1 | null | null | null | null |
GB18030
|
Python
| false | false | 1,328 |
py
|
#coding=gb2312
nuomiCinemaMap = dict()
otherCinemaMap = dict()
input = '/fuwu/Merger/Output/movie/cinema_movie_rel.table'
for line in open(input):
segs = line.strip('\n').decode('gb2312', 'ignore').split('\t')
cinemaid, source, ting = segs[1], segs[3], segs[9]
if source.find(u'糯米') != -1:
if cinemaid not in nuomiCinemaMap:
nuomiCinemaMap[cinemaid] = []
if ting not in nuomiCinemaMap[cinemaid]:
nuomiCinemaMap[cinemaid].append(ting)
else:
if cinemaid not in otherCinemaMap:
otherCinemaMap[cinemaid] = []
if ting not in otherCinemaMap[cinemaid]:
otherCinemaMap[cinemaid].append(ting)
# 糯米影院的厅名称是否都被包含
for cinemaid in otherCinemaMap:
if cinemaid not in nuomiCinemaMap:
#print ('#%s\t%s\t%s' % (cinemaid, u'糯米', '\t'.join(nuomiCinemaMap[cinemaid]))).encode('gb2312', 'ignore')
continue
noMatchTingList = []
for ting in nuomiCinemaMap[cinemaid]:
if ting not in otherCinemaMap[cinemaid]:
noMatchTingList.append(ting)
if len(noMatchTingList) == 0:
continue
# 存在不一致的情况
normTing = '\t'.join(otherCinemaMap[cinemaid])
noMatchTing = '\t'.join(noMatchTingList)
print ('%s\t%s\t%s' % (cinemaid, u'非糯米', normTing)).encode('gb2312', 'ignore')
print ('%s\t%s\t%s' % (cinemaid, u'糯米', noMatchTing)).encode('gb2312', 'ignore')
|
[
"[email protected]"
] | |
b6e8f2be226188fbb1defabbcc1d134f8fc8e070
|
3570f2e7b8d5666cbd2d29a4c75965a75699a3e2
|
/pyodbc/run_test.py
|
1b0460f4bd5adc94625a5a8b380978050e9a9c4a
|
[] |
no_license
|
ilanschnell/recipes
|
7876225db2eb08b21d4d1ab426d40f94650192fd
|
c946b446a002d55ecffff6ce789cf9dcb57a65a6
|
refs/heads/master
| 2023-08-19T19:40:17.750037 | 2022-01-21T00:27:38 | 2022-01-21T00:27:38 | 119,077,116 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,181 |
py
|
import sys
from os.path import isfile
print(sys.version)
print(sys.executable)
import pyodbc
print(pyodbc)
if sys.platform == 'darwin':
driver_path = '/Users/ilan/a/envs/py38/lib/libsqlite3odbc.dylib'
elif sys.platform.startswith('linux'):
driver_path = '/home/osboxes/bin/libsqlite3odbc-0.9996.so'
if not isfile(driver_path):
raise Exception('so such file: %r' % driver_path)
connect_string = (
"DRIVER={%s};SERVER=localhost;DATABASE=./test.sqlite;Trusted_connection=yes"
% driver_path
)
cnxn = pyodbc.connect(connect_string)
cursor = cnxn.cursor()
try:
cursor.execute('drop table foo')
except:
pass
cursor.execute('create table foo (symbol varchar(5), price float)')
N = 1000
for i in range(N):
cursor.execute("insert into foo (symbol, price) values (?, ?)",
(str(i), float(i)))
cursor.execute("commit")
cursor.execute("select * from foo")
dictarray = cursor.fetchdictarray()
cursor.close()
for i in range(N):
assert dictarray['symbol'][i] == str(i)
assert (dictarray['price'][i] - float(i)) < 1E-10
# tab completion fails in ipython for pyodbc.Cursor
assert pyodbc.Cursor.fetchdictarray.__doc__
print("Done.")
|
[
"[email protected]"
] | |
ada7809ed008445486cb53ed74ffb2f3f533ab06
|
c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171
|
/daily-coding-problems/problem429.py
|
f131f4e79b05103324b498c75f6d6f5240e45cd3
|
[] |
no_license
|
carlhinderer/python-exercises
|
c8367517fdf835fa1117f96dbfee3dccc596afa6
|
4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7
|
refs/heads/master
| 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 |
Python
|
UTF-8
|
Python
| false | false | 533 |
py
|
# Problem 429
# Medium
# Asked by Stitch Fix
#
# Pascal's triangle is a triangular array of integers constructed with the
# following formula:
#
# The first row consists of the number 1.
#
# For each subsequent row, each element is the sum of the numbers directly
# above it, on either side.
#
# For example, here are the first few rows:
#
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
#
# Given an input k, return the kth row of Pascal's triangle.
#
# Bonus: Can you do this using only O(k) space?
#
|
[
"[email protected]"
] | |
76b07fab07edb0667ffdda682c409887fdab50cc
|
2cf99a155405b48bf14f872e1980ed948079e5dd
|
/test/test_router.py
|
a30b567e256a3ea2fe3ba97d23c6ab0b5d1539e8
|
[
"MIT"
] |
permissive
|
marrow/web.dispatch.route
|
c15309a26023d068b8f84ea4bbc221b674c1e6b8
|
92494bcad2e2a9a52d2e51eecfab910d829cc2de
|
refs/heads/master
| 2021-01-25T04:01:46.245851 | 2016-02-15T07:54:36 | 2016-02-15T07:54:36 | 32,564,808 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,820 |
py
|
# encoding: utf-8
import pytest
from web.dispatch.route.router import __DYNAMIC__, Router
from sample import Root
@pytest.fixture
def router():
return Router.from_object(Root)
def test_dynamic_repr():
assert repr(__DYNAMIC__) == '<dynamic element>'
def test_router_singleton():
assert Router.from_object(Root) is Router.from_object(Root)
def test_invalid_route():
router = Router()
with pytest.raises(ValueError):
router.parse("{bad:/}")
class TestRouterSample(object):
def test_single_static(self, router):
assert len(router.routes) == 1 # There's only a single top-level element.
assert 'user' in router.routes # It's "user".
assert len(router.routes['user']) == 2 # Which has a terminus and dynamic continuation.
assert router.routes['user'][None] == Root.root # The terminus is the "root" method.
assert router.routes['user'][None](Root()) == "I'm all people." # It really is.
def test_dynamic_username(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
def test_dynamic_username_action(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
|
[
"[email protected]"
] | |
8bacb8e843f98006b0d409848f10edb92140f035
|
f160cf4eb335ea799559312ac3d43a60c2c5848b
|
/library/zip_extract.py
|
e1f1faecce940706c2ead17d0b449c0c1525aa28
|
[
"MIT"
] |
permissive
|
baseplate-admin/Machine-Learning-Source-Code
|
c3389e0acb81e1f4c8e4c0cc763fcbc3781ef94e
|
a2203033d525c17b31584b52527c30e2c8aad1c4
|
refs/heads/master
| 2022-11-21T04:33:41.307477 | 2020-07-10T15:46:32 | 2020-07-10T15:46:32 | 277,730,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,211 |
py
|
def zip_extract():
import os
from zipfile import ZipFile
def zip_function():
print("We are extracting ZIP!!!")
where_is_zip=input("What is your zip location?")
what_is_zip_name=input("What is your zip name?")
what_is_zip_extension=input("What is your ZIP format?")
zip_join=os.path.join(where_is_zip,what_is_zip_name+ '.'+ what_is_zip_extension)
with ZipFile(zip_join,"r") as zip:
zip.extractall()
zip.printdir()
print("Enter a Number or It will cause ValueError.")
how_many_zip=int(input('How many zip do you want to extract?'))
try:
print("""
This is a number!!
Lets Go!!!
""")
for i in range(how_many_zip):
ask_if_zip_extract=input("""
Do you want to extract zip?
Enter 0 to skip extracting zip.
Enter 1 to to extract ZIP.
""")
if int(ask_if_zip_extract)==0:
zip_function(2)
elif int(ask_if_zip_extract)==1:
zip_function(1)
else:
print("Theres a problem with zip extract.")
except Exception as e:
print(e)
|
[
"[email protected]"
] | |
52722c46ff54f9d588bdd4cd1a24506d64dacd60
|
bcc2d156334d3680561b17cec82cbc31a5ea07ad
|
/String/22. Generate Parentheses.py
|
2431fefda0dcde528d7eafd0b65a378afe0ebe31
|
[] |
no_license
|
kevinsshah/Leetcode
|
72b14e226b6881bcd18913b2fa132b0e3f8dd6ef
|
4419f46e6f6b1d96ff8b7066fce687cfa88e65a0
|
refs/heads/master
| 2020-03-25T23:00:49.851183 | 2018-09-08T04:13:27 | 2018-09-08T04:13:27 | 144,255,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,129 |
py
|
# Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
#
# For example, given n = 3, a solution set is:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
# def helper(A = []):
# if len(A) == 2*n:
# if isValid(A):
# ans.append("".join(A))
# else:
# A.append("(")
# helper(A)
# A.pop()
# A.append(")")
# helper(A)
# A.pop()
# def isValid(A):
# bal = 0
# for c in A:
# if c == "(":
# bal+=1
# else:
# bal -= 1
# if bal < 0:
# return False
# return bal == 0
# ans = []
# helper()
# return ans
# def backtrack(S = '', left = 0, right = 0):
# if len(S) == 2*n:
# ans.append(S)
# return
# if left < n:
# backtrack(S+"(", left + 1, right)
# if right < left:
# backtrack(S+")", left, right + 1)
# ans = []
# backtrack()
# return ans
ans = []
def helper(left, right, string, ans):
if right < left:
return
if not left and not right:
ans.append(string)
return
if left:
helper(left - 1, right, string + "(", ans)
if right:
helper(left, right - 1, string + ")", ans)
helper(n, n, "", ans)
return ans
|
[
"[email protected]"
] | |
846876364bc01fda2b044a0b561e2709369cd56c
|
268d9c21243e12609462ebbd6bf6859d981d2356
|
/Python/python_stack/Django/BeltReview/main/apps/books/models.py
|
fddd59aa3b548da3b7fdfa2c3d3484b1350a19f0
|
[] |
no_license
|
dkang417/cdj
|
f840962c3fa8e14146588eeb49ce7dbd08b8ff4c
|
9966b04af1ac8a799421d97a9231bf0a0a0d8745
|
refs/heads/master
| 2020-03-10T03:29:05.053821 | 2018-05-23T02:02:07 | 2018-05-23T02:02:07 | 129,166,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,886 |
py
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.core.exceptions import ValidationError
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self,postData):
errors={}
#validate password
if len(postData['password']) < 8:
errors["password"] = "password should be more than 8 characters"
#checks that the passwords match
if postData['password'] != postData['confirm']:
errors["confirm"] = "passwords do not match"
return errors
class User(models.Model):
name = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class AuthorManager(models.Manager):
def validate_author(request, postData):
errors = {}
return errors
class Author(models.Model):
author = models.CharField(max_length=255)
objects = AuthorManager()
class BookManager(models.Manager):
def validate_book(request,postData):
errors = {}
return errors
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, related_name="books")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = BookManager()
class ReviewManager(models.Manager):
def validate_review(request, postData):
errors = {}
return errors
class Review(models.Model):
rating = models.IntegerField()
comment = models.TextField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
book = models.ForeignKey(Book, related_name="reviews")
user = models.ForeignKey(User, related_name="reviews")
objects = ReviewManager()
|
[
"[email protected]"
] | |
3e4331ea4515d8ab9a244201033c44ae2211e3db
|
d4cd2476f8fa8a7d94e183a68bd0678971310c5b
|
/checkio/06_Ice_Base/06_IceBase_04_FunnyAddition.py
|
9030b3fb8d1063f001b7c9e2d024d3d76144968e
|
[] |
no_license
|
gwqw/LessonsSolution
|
b495579f6d5b483c30d290bfa8ef0a2e29515985
|
0b841b1ae8867890fe06a5f0dcee63db9a3319a3
|
refs/heads/master
| 2020-07-05T19:15:53.758725 | 2019-10-01T11:34:44 | 2019-10-01T11:34:44 | 202,744,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
def checkio(data):
"""The sum of two integer elements"""
return sum(data)
if __name__ == '__main__':
assert checkio([5, 5]) == 10, 'First'
assert checkio([7, 1]) == 8, 'Second'
print('All ok')
|
[
"="
] |
=
|
c78554bfaf8bee6f13777307c2c97139d339f973
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02390/s457532968.py
|
390a81631bac8de1e3a93db961d2ef9a82cb8ed1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 146 |
py
|
import sys
line = sys.stdin.readline()
inp = int(line)
h,mod = inp//3600, inp%3600
m,mod = mod//60, mod%60
s = mod
print ("%d:%d:%d" % (h,m,s))
|
[
"[email protected]"
] | |
b8a62fa93f2532714aacb95518a96010cd6afe03
|
fffa7b13491deadfc649dfd035099ef764d8d303
|
/api/tests/mathematical_object_detail.py
|
3ecfae51fd020c715c1a8504027fcc57a26800f4
|
[
"MIT"
] |
permissive
|
Gawaboumga/OEMS
|
3b12b8bebbe4b29716e8be4e22034ec394af36da
|
1e60fa1f350f4cf1ca2e48072e0b4228eeb15024
|
refs/heads/master
| 2022-12-14T11:15:55.797241 | 2019-01-22T10:22:42 | 2019-01-22T10:22:42 | 147,358,167 | 0 | 0 |
MIT
| 2022-12-08T01:26:59 | 2018-09-04T14:20:58 |
Python
|
UTF-8
|
Python
| false | false | 4,231 |
py
|
from rest_framework import status
from rest_framework.test import APITestCase
from django.test import override_settings
from django.urls import reverse
from oems.settings import TEST_MEDIA_ROOT
from api.models import MathematicalObject
from api.tests import utils
@override_settings(MEDIA_ROOT=TEST_MEDIA_ROOT)
class MathematicalObjectDetailTests(APITestCase):
def test_retrieve_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
def test_retrieve_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
self.assertEqual(function, response_data['functions'][0]['function'])
self.assertEqual(name, response_data['names'][0]['name'])
self.assertEqual(tag, response_data['tags'][0]['tag'])
self.assertEqual(convergence_radius, response_data['convergence_radius'])
def test_put_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_type = 'P'
data['type'] = new_type
response = self.client.put(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(new_type, response_data['type'])
def test_delete_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.delete(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(MathematicalObject.objects.count(), 0)
|
[
"[email protected]"
] | |
066a5edb911a9b5069125b1aee9dfad1bbc78dbb
|
7d74195bd00cbe8516670c8fe718e983106c9830
|
/src/data_types/test_collections_ordereddict.py
|
ee4fe8c69fee1eec3bc707d6f7b10d39022930d8
|
[] |
no_license
|
masa4u/example_python
|
7ab3d48020855ad493336afcd8d0c02eb3104b2b
|
7bdee4cb8e90255b20353f7f95d3e879f6462638
|
refs/heads/master
| 2021-01-18T14:10:56.539659 | 2017-03-28T12:52:08 | 2017-03-28T12:52:08 | 30,511,470 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 147 |
py
|
d = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
from collections import OrderedDict
print OrderedDict(sorted(d.items(), key=lambda t:t[0]))
|
[
"[email protected]"
] | |
4f17a87004d2e33cbb26f6d49b7cb84a0b7ffef9
|
70532360ddfdd8006bf7044c117403ce837cef0a
|
/code/Rplot.py
|
cd1f9b2b402c74ca5ecf9502d4eba1665cd10a9b
|
[] |
no_license
|
wsgan001/campus_wifi_analysis
|
09a7944f5019f726682925c8785cdf5f7d8c469a
|
c470135691ff8faad3cb4755301e4f59389e2c5a
|
refs/heads/master
| 2020-03-10T11:09:05.579870 | 2017-03-03T07:13:57 | 2017-03-03T07:13:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,312 |
py
|
# -*- coding: utf-8 -*-
import fileinput
user = {}
for line in fileinput.input("../data/select/select_a"):
mac = line.strip().split(" ")[0]
user[mac] = True
fileinput.close()
with open("../data/plot/R_trace_all","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
with open("../data/plot/R_trace_online","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
jac = {}
for line in fileinput.input("../data/jaccount/jaccount_taged"):
part = line.strip().split(" ")
dev, mac, sex, sta, col, age = part[0], part[1], part[2], part[3], part[4], int(part[5])
if dev == "mobile":
jac[mac] = {'sex':sex, 'sta':sta, 'col':col, 'age':age}
if sex == "男性":
jac[mac]['sex'] = "Male"
elif sex == "女性":
jac[mac]['sex'] = "Female"
if age <= 20:
jac[mac]['age'] = "<=20"
elif age > 20 and age <=22 :
jac[mac]['age'] = "21~22"
elif age > 22:
jac[mac]['age'] = ">=23"
if col == "电子信息与电气工程学院":
jac[mac]['col'] = "TOP1"
elif col == "机械与动力工程学院":
jac[mac]['col'] = "TOP2"
elif col == "材料科学与工程学院":
jac[mac]['col'] = "TOP3"
elif col == "船舶海洋与建筑工程学院":
jac[mac]['col'] = "TOP4"
elif col == "安泰经济与管理学院":
jac[mac]['col'] = "TOP5"
fileinput.close()
with open("../data/plot/R_trace_all_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
with open("../data/plot/R_trace_online_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
# 1:renren, 2:baidu, 3:sina, 4:taobao, 5:qq
mapping = {'1':'1','2':'1','3':'1','27':'1','46':'1','64':'1','69':'1',\
'5':'2','6':'2','21':'2','22':'2','26':'2','60':'2','63':'2','70':'2','77':'2','80':'2','93':'2','98':'2',\
'11':'3','15':'3','16':'3','17':'3','23':'3','24':'3','28':'3','29':'3','51':'3','82':'3','84':'3',\
'19':'4','23':'4','36':'4','39':'4','42':'4','56':'4','57':'4','58':'4','59':'4',\
'20':'5','31':'5','41':'5','45':'5','48':'5','86':'5',\
}
with open("../data/plot/R_trace_http_cor","w") as f:
f.write("mac renren baidu sina taobao qq sex age\n")
for line in fileinput.input("../data/feature/trace_http_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"renren":0,"baidu":0,"sina":0,"taobao":0,"qq":0}
for one in objs:
tag, rto = one.split("@")[0], int(one.split("@")[1].split(",")[1])
if len(tag.split("+")) == 2 and tag.split("+")[0] == "WD" and ":" in tag:
tag = tag.split("+")[1]
hst, typ = tag.split(":")[0], tag.split(":")[1]
if mapping.has_key(hst):
top = mapping[hst]
if top == "1":
user['renren'] += rto
elif top == "2":
user['baidu'] += rto
elif top == "3":
user['sina'] += rto
elif top == "4":
user['taobao'] += rto
elif top == "5":
user['qq'] += rto
f.write(mac+' '+str(user['renren'])+' '+str(user['baidu'])+' '+str(user['sina'])+' '+str(user['taobao'])+' '+str(user['qq'])+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
|
[
"[email protected]"
] | |
caff9c7cb685bc07ae6b58176aa41c8d83544348
|
9f0a4262c4402201df1cdd5674a679543f4a50b5
|
/shaderLibrary_maya2017/resources/__init__.py
|
05e522a865f16bd93dd2591fa2f1e5a4d20967ec
|
[] |
no_license
|
subing85/subins-toolkits
|
611b6b3b3012ccb023096f6e21d18d2bda5a534b
|
d02af1289ec3ee5bce6fa3d78c134a8847113aa6
|
refs/heads/master
| 2022-07-12T17:19:57.411454 | 2022-07-01T20:37:16 | 2022-07-01T20:37:16 | 168,826,548 | 11 | 2 | null | 2022-07-02T01:03:34 | 2019-02-02T11:51:25 |
Mathematica
|
UTF-8
|
Python
| false | false | 1,087 |
py
|
import os
from shaderLibrary_maya2017.utils import platforms
CURRENT_PATH = os.path.dirname(__file__)
MODULE = platforms.get_tool_kit()[0]
def getInputPath(module=None):
return os.path.join(
CURRENT_PATH, "inputs", "{}.json".format(module)
)
def getIconPath():
return os.path.join(CURRENT_PATH, "icons")
def getPreferencePath():
return os.path.join(getWorkspacePath(), "preference")
def getWorkspacePath():
return os.path.join(os.getenv("HOME"), "Documents", MODULE)
def getPublishDirectory():
return os.path.join(
os.environ["HOME"], "Walk_cycle", "characters"
)
def getResourceTypes():
data = {
"preference": getPreferencePath(),
"shader": getWorkspacePath(),
"generic": None,
}
return data
def getToolKitLink():
return "https://www.subins-toolkits.com"
def getToolKitHelpLink():
return "https://vimeo.com/314966208"
def getDownloadLink():
return "https://www.subins-toolkits.com/shader-library"
# end ####################################################################
|
[
"[email protected]"
] | |
64ebd8dc8dee1409f7462da7e97b36589440ca93
|
897d82d4953ed7b609746a0f252f3f3440b650cb
|
/evening/20200615/demo3.py
|
fb8a2467fdd7cd54f0e4530ae9c506eeaa9352c6
|
[] |
no_license
|
haiou90/aid_python_core
|
dd704e528a326028290a2c18f215b1fd399981bc
|
bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1
|
refs/heads/master
| 2022-11-26T19:13:36.721238 | 2020-08-07T15:05:17 | 2020-08-07T15:05:17 | 285,857,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 947 |
py
|
class GParent:
pass
class Parent(GParent):
def __init__(self,atk,hp):
self.atk = atk
self.hp = hp
def attack(self,target):
pass
def damage(self,value):
pass
#玩家攻击敌人 敌人受伤,还可能死亡
class Player(Parent,GParent):
def attack(self,target):
print('黑虎掏心')
target.damage(self.atk)
def damage(self,value):
print('小样你敢打我!')
self.hp -= value
if self.hp <= 0:
print('太菜了')
class Enemy(Parent):
def attack(self,target):
print('普通攻击第一式')
target.damage(self.atk)
def damage(self,value):
print('玩家打人啦')
self.hp -= value
if self.hp <= 0:
print('a~~~~')
print('爆装备')
p1 = Player(50,100)
e1 = Enemy(10,100)
p1.attack(e1)
e1.attack(p1)
e1.attack(p1)
e1.attack(p1)
e1.attack(p1)
p1.attack(e1)
|
[
"[email protected]"
] | |
21d9a316ce6cfdf96f3a9f5edaacf77894c81bf4
|
e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6
|
/spexy/adv/samples/simple.py
|
e2df8a641ff75635616d8894582fa8f83e6bf7dd
|
[] |
no_license
|
drufat/spexy
|
6eba9f44a5539245486cd4ef8fefd24bdb7ade6a
|
53255009c1830501986afbf6688142ddefe17b9a
|
refs/heads/master
| 2021-09-18T19:51:47.313946 | 2018-07-19T05:09:02 | 2018-07-19T05:09:02 | 100,453,374 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
# Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from sympy import sin, cos
def V(x, y):
return (-sin(y), sin(x))
def p(x, y):
return -cos(x) * cos(y)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.