blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea786f962c4bb8d037ea2e65e05397c5a17f9a0c | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/ytta/resp_func/redesign/rf_filament.py | 8567adadf509a4ecf5e11c731371ecc5217a1c3a | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 3,894 | py | #-------------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
#
# Created on Jun 2, 2010 by: rch
from enthought.traits.api import \
HasTraits, Float, Str, implements
from i_rf import \
IRF
from rf import \
RF
import os
from numpy import sign, linspace, array
from matplotlib import pyplot as plt
from scipy.weave import inline, converters
from types import ListType
def Heaviside( x ):
return ( sign( x ) + 1.0 ) / 2.0
class Filament( RF ):
'''Linear elastic, brittle filament.
'''
implements( IRF )
title = Str( 'brittle filament' )
xi = Float( 0.017857, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform'],
scale = 0.0178, shape = 4.0 )
theta = Float( 0.01, auto_set = False, enter_set = True,
distr = ['uniform', 'norm'],
loc = 0.01, scale = 0.001 )
lambd = Float( 0.2, auto_set = False, enter_set = True,
distr = ['uniform'],
loc = 0.0, scale = 0.1 )
A = Float( 5.30929158457e-10, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform', 'norm'],
scale = 5.3e-10, shape = 8 )
E_mod = Float( 70.0e9, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform', 'norm'],
scale = 70e9, shape = 8 )
eps = Float( ctrl_range = ( 0, 0.2, 20 ), auto_set = False, enter_set = True )
C_code = '''
double eps_ = ( eps - theta * ( 1 + lambd ) ) /
( ( 1 + theta ) * ( 1 + lambd ) );
// Computation of the q( ... ) function
if ( eps_ < 0 || eps_ > xi ){
q = 0.0;
}else{
q = E_mod * A * eps_;
}
'''
def __call__( self, eps, xi, theta, lambd, A, E_mod ):
'''
Implements the response function with arrays as variables.
first extract the variable discretizations from the orthogonal grid.
'''
# NOTE: as each variable is an array oriented in different direction
# the algebraic expressions (-+*/) perform broadcasting,. i.e. performing
# the operation for all combinations of values. Thus, the resulgin eps
# is contains the value of local strain for any combination of
# global strain, xi, theta and lambda
#
eps_ = ( eps - theta * ( 1 + lambd ) ) / ( ( 1 + theta ) * ( 1 + lambd ) )
# cut off all the negative strains due to delayed activation
#
eps_ *= Heaviside( eps_ )
# broadcast eps also in the xi - dimension
# (by multiplying with array containing ones with the same shape as xi )
#
eps_grid = eps_ * Heaviside( xi - eps_ )
# cut off all the realizations with strain greater than the critical one.
#
# eps_grid[ eps_grid >= xi ] = 0
# transform it to the force
#
q_grid = E_mod * A * eps_grid
return q_grid
if __name__ == '__main__':
f = Filament()
f.configure_traits()
print 'keys', f.param_keys
print 'values', f.param_list
print 'uniform', f.traits( distr = lambda x: x != None and 'uniform' in x )
X = linspace( 0, 0.05, 100 )
Y = []
for eps in X:
Y.append( f( eps, .017, .01, .2, 5.30929158457e-10, 70.e9 ) )
plt.plot( X, Y, linewidth = 2, color = 'navy' )
plt.show()
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
f51f5d126e624ce6f581af3758624e5393048c69 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/741.py | 508e825c69685e3c508599526da9af0a4ddab4fe | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,588 | py | #!/usr/bin/env python
input = """100
0 0 2 EA
1 QRI 0 4 RRQR
1 QFT 1 QF 7 FAQFDFQ
1 EEZ 1 QE 7 QEEEERA
0 1 QW 2 QW
14 QEC WRB QQG FDI RDH EAK FQJ ADM DWL RRO FEP RSY FSX ASZ 9 DQ DF FQ WR DA SF WS DW QA 88 WRADEADWFWRWRFQEFSWFEDWADFDRRFDFERREAWFQFQRRFRRWREWRFQQEASDWSEQQQWWRRSRRQQRDFQADQQSDQERS
16 RSC DAB WWG FFI FWH REK ARJ AQM ESO EEN AFP DDT AAV QDY FRX SSZ 23 AQ EQ DS RQ EA QS WE DF RW WD QD FA AR WS WA FE DE ER SR QF FR RD DA 79 ESRSEEFRESWAFSWRDDAESQDREWWWDDDWWWDEQFFFFDAQQFRARAFARRDDDFFAAEERWDASFRSQFWAFSSF
36 FQZ FRZ FSZ FFZ FWZ DDZ DEZ DAZ WWZ FAZ FDZ FEZ WQZ DWZ DRZ DSZ DQZ RWZ RQZ RRZ REZ AAZ AEZ EEZ AQZ ARZ AWZ EWZ EQZ QQZ SSZ SRZ SQZ SWZ SAZ SEZ 0 99 ASQQREEDFWQWWSRFQAWDRWADQWFRQFFRWQSDSSEEADRARDFEFDRESAAFFSAWQDESEWSWQFREDWDWSDQRDRRQWFFASWFSSDWSREW
0 4 QW QE WR ER 14 EASDFASDFASDFQ
15 SDC WWB QRG QDI EAH RRK DDJ EEM WAO DAN SWP DWU SFT DRV RAY 25 SF AQ FQ QW RQ FW DS RF SR AS DQ AW EW RE QE DF DA AE AR WS QS FE ED FA RW 86 SFRRWWDRESWSWEADADDQRSFSFRWWEEDRWAWWFWWQQDEWASFSDASQRSDEEEQDDWAFRSFRAWAWWSFAAQDSFQRQDF
16 QSC SRB FEI FWH EEK AWJ RAM DAL QEN RDP WRU DFT AEV WQY SWX AAZ 23 SE WQ AR WF QR SW DE WD DQ SF RW WA EW RD QF FR EA AS FA DA QE EF AQ 82 WRRAEAWAWDAAADADFQEFWWQEWRARARAWWRQSQSQDFRAQSAQSDQFRSREDAFFSREEEDFAAFESRDAWDQSSWQQ
16 QDC AWI QRH EAK FFJ QSM SFL SAO FRN DRP WWU DET WEV EEY DSX ARZ 15 WE AS WQ QS ED RE FA QA QR FD SD AR EF FS SR 84 FEAAWFFWEDWWWASADRDARSFQDDSWEQSSFQEAFFDRSSWDWEQRSFDFRWWQQRRWEAWDRDEDSDSSWWAQWWAREEFF
0 0 1 F
16 QEC AQB FFG WDI AAH DSK ADJ FEM RDO SWN SQU WWT WAV SEY RQX SFZ 22 AQ ER SA ES QD SD RA FD FQ AF FE FS RW RS EQ AD WF AW DR QS SW RF 84 ADFEWWRQQFAARQEESFSWQEQWASEARWDQERSQADADFERDSFAQFEFEFRRQFFWEQEQQWASERRQQEAWAADDSWARQ
14 RRB WQI AAH FQK ASM DEL AFO DAN WDP FDU EET ARV SSY QEX 10 DS WA RD AS AR DF QF ER QA WQ 88 EESSERRWDAFWDRRFQRDADEASDEAFRREEAFWFRWDDESFQAQAARQEAFAAWQEARWQWQFDSSARFQAAASASASWRRFQFDF
18 WRC QDB WAG QWI ASH ESK DAJ FRM FAL QEO FDN QFP SSU EWT RQV EDY RAX SRZ 27 WE QW AR EF AD SR WR SE DW ER AS QE DE WF AW FR QR QS FD SD AF QD WS DR FQ FS QA 89 AEWEDQEESRSRQFRAEDFEDFDWAQRQEWQDQFWAFDRQSSSQDFDFDSFDDASQASFRFAQEEWASQWRWREFRWAFAFESWREWFA
17 SRC EWB RQG QWI QFH AEK SSJ WRM REL DRO DSN FFP QEU DQT WAV AFY DDZ 14 WE QA DS RE RF DA DR SF DQ AF FQ FE WF AE 89 DQARQQFDAEFFDDAEREEDQDDDRSRRQSEEWQFAFEWFRQWAAESSRRQQWSAFQWDSDQWAAEDREWQEWRSRQEDQWQFQESSSQ
0 1 DR 10 FREEREWARD
16 QRC QWB DAG FDI RSH AAK QSJ FQM QQL SDO RRN RWP ERU EET DWY FEX 7 EW SQ RA AS AE SW DQ 85 DQQWWWQSRRRSDWSDQSFDDAQWDWRSDDQRDRRRFQAASSRRAAERSQQFDEERSRWAFDADWEEQFESAARSFEQSEEEEQQ
16 DWB QRG FSI ARK QWJ SDM EWL AFO EAN QSP FQU EET DDV SWY RRX FWZ 21 AD DS SE QF FE QE EW RA AW AS FS WQ SR QR DR SW FA SQ ER DE FD 86 FFSAARDWAFEEDDDDFFQQRFWDFQAEESWDDRDEAWDDQWSWQRQSEERQWDDFQFWQRWAQEWDDDDARFFQSDQWRRWRRFQ
14 AFC DFB QSG RDI ARH ESK DAM FWL AEO FRP EQU RRT DSV EEY 11 RW QR SD QW RS AQ EA DF SF WE AF 83 FSAQSAERDFWDWFWEFWDSQSRQSFREDSSWQQRDDAAEDADAWFWEESESEEDFAREQDFAEAFAEFRWDADDFDFAFQRR
1 AAZ 1 AQ 10 AAAAAAAAAA
15 SEB EFG WDI SFH WAJ SQM DSL EWO FFP SSU QAT QQV FDY QRX AEZ 19 SW RD RQ DW EW FS DA SE QD RF AF QA SD DF AE QS AR EF RE 83 WWWDWASFSSEWSQSAEFFFRFFWDWRFSSEFQQQSFWQASSFDQQQEFRSSSEEFADSWDQEWDSFFSQEDSSSQRDSQARE
18 SFC RSB FDG EDI EWH ESK AWJ RAM WRL QFO REN FWP DRU AST QWV FFY AQX EEZ 23 ED WF ES WE AE SA EQ SD QA RS AW SQ WD WS AR QR QD FE WQ WR RF RD DA 92 WSFFFFDFDFDSFASEDQFRAEEAWAWWRAWAQAWAAWAQAWWRAQAASEDESEDFFAQEWAQESQWWASRAEQFSREDREFDQFRSRSSFW
14 FEC DEB EQG SWI AQH RRK DAJ DSM AFO QSN QQP SSU FFX DWZ 5 AD SW AS WF RA 83 QQQFDAEQRRSSEDWFEAFDSEDEDARDAFFDSQSASSDRRSSFESWFDAWRSWFEQEAQFFRRQQFESWEEESSQQSRRQQQ
0 1 DR 8 REWARDED
10 RSB DWI ADK FRL WSO EEN WFU QAT QSV AAY 0 41 RSESWFREQAWSDRDWEEADAAWSWFRWSQSAFRQADWEEE
14 SQC FDB ESG SSI AQK FQJ SWM RWL FFN EWP AFU WAV EEY AEX 14 DQ RE DA SW QS AE EW SA AW RW QE DE FW QF 78 EESSAFQDEQRFDSWDFDRAFSFQAQFQSWAEAQDDWWQEWRWFQFQFFFFFEDSQQESARWAASQSQSWEEFSSAFD
0 4 QW QE RW RE 14 QASDFASDFASDFW
0 4 QW QE WR ER 14 QASDFASDFASDFE
15 WQB RRG FSI DWH EAK EWJ RWM EQL ASN EDU FRT SSV WSY SQX EEZ 9 EW DF DE SD WF WA FR FE EQ 90 EDSSRRDASFRAFSEAEAASRWDEDEQFRWQASEARWWQRRASREAESWSFSSSEESQDWFEWAEEEQEQRRASSSRWFFSSQSSEAAAS
1 ASG 1 AS 10 AQWERASDFS
15 ESB QDG WFI RQH SQK FFJ WAM DSL RAO SSP ASU EWT FDV EEY AAZ 19 AF FW RW RQ SR QD ES AE SD QA EF DR EW AR DW QF EQ FR ED 85 FFQDWFRQESSEWEWQDQAADEESQEWFRWAEFEEAAFDWRRAFFQFFSEEWAESSESFDASDSASAFQDRQDESWFRQWAAAES
0 28 FQ FR FS FW DE DA FA FD FE WQ DW DR DS DQ RW RQ RE AE AQ AR AW EW EQ SR SQ SW SA SE 94 AESDEESDAEEFQDQDEFWRWSSFEWEAQQSASFSDFDARSWQWQDEEDEWEEAAAWRSERRAFRSRWSESQARAFWWREFSDAESWWQAARAF
13 DAC REG SSK QQJ WDM FEL WEO WSU RRT FFV DRY RAX EQZ 20 SR RD ES DF AE AQ RF QS EF EQ ED AD AW WF SF WQ AF WD EW QF 87 FEDRSSRRSSSFWEEEQDRARREQWSREWSWDSDRAQQFFDRWDSSSRRRRWRARRFFWSDRDRDFFWSFEEQAFQQWDEQDAWSWE
14 SRC AAB QWG DAK WDJ DEM SSL QAN ESU DFT FFV WWY QFX EEZ 5 WE SD DA AQ RF 80 EEFFRAFFSSFSRFRQFWWQWDFEEEAAESESQAASQSFQFDFRQDSDERDAWDQFQFAESDAQFQFFFEQWAFEEESDE
18 FDC DDB ADG QFI RWH WWK EDJ FRM SAL RQO SDN QSP RRU EET FSV SEY QDX QQZ 26 FD AS RW RD RQ EQ AR WQ AW SW ER WD WF AF QS AE DE EW RS QD ES AD FE SF AQ SD 88 FDRQFEDQDQFFSQADQDDDFSFFDRWEERWSAREEWDDQQSDQQRREEEESEEFDSEQQFDFRDRQEEEQSRQSRSRQWWWWEADDD
15 WWC RQB RDI DEH FSK SEM QEL WFO AQN DWP RAU SWT AFY RRX EEZ 6 EW AD QR FR SA DF 87 RARWFDRARADQAQDEFSFSSRDSEWRRSDEQERQRRDWQEDEWFWWQDFSQSWRRRQRDADWAQAFAQDWWFWFWFQEEESSEEEE
18 WRC FQB QWG RDI SWH EWK FEJ FSM SRL RQO AFN DWP EEU EAT AWV DDY SSX QDZ 2 SW DS 86 SWWREAFQRQQWQFQFQDSWSREWRQSDWFFERQESRFSAWEERDRRQESSSREEFSDDDDFERRDAFRDSSSEDFQFESWWQDRR
15 EEB RQG ASI DWH FWK ESJ QWM WRL FAO FRP AEU SST SFV DEY AAZ 3 QA AS RA 82 FFWSFRSSSSRSQASDERQAAWRESDWFADDAEQWDWFWARQQDAASFFADEQWFWWRSDFAWRRDRQFDQWQFAFREEWWR
16 AQC SQI WSH WEK DAJ WDM WWL AFO QEN ERP RRU RST DFV FFY EDX EEZ 23 RS RF DS SW ER DF RQ SQ SE RA WE AF AS DE FS AD QW QE DW WA AQ EA FQ 83 WESWWAQRRWEWWWEAQFFERFFEAAQAQEEWDDARRSWEESERDWWAFWWEASQFWWQQESQDQWSEDFAFSDFSFRSRSDA
4 SDB DDM ERT EWN 0 70 EWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWAEWASDAERDDAEW
16 ADC SFB WSG DDI EWH QQK RAM RSL FEO EEN WAP FQU FAT DFV DEY SSX 17 RA AW EQ RQ SF AD DF DS ES DW WR AQ QW RE AF FQ DE 88 ADADAQQQRAAEEFWSADWADFEESSESSSDEFERRSWSDFQQRAADQEWDFRFADSFFRSDDQQSFSSAEEFQWSWARARAWAQQEE
15 FDB AAI DEH FRJ QEM FSL QSO RRN ESP QAU DRT RQV SAY WEX FWZ 23 WF WQ WD DR FD ED SD RA ES DQ FA AW EA SA SW FR AD SQ RW EQ EW ER SF 79 WEFSFRDRAAADSFWQWQFSRRWERWAEFDWEFFRAFDRQWWARSWQAQARQAAESSFWDEEFDSAAAQASAQEDRQQS
15 QQC FEB AEG DDI WRJ SAM SFL WQO FDN FQP ERU FAT ADY RQX SSZ 16 RA AW FE DW SR QE QW DF WS DQ SD DA RF QR AF DR 80 WFARQRWRRDWQSFFEWRWSAAEFQSERQWSFSSWFEEWERSSFDWSDDESWRWWRWRSQQADFARAEQQWQSFWWRFAW
16 SEB ADI SAH EQK SDJ EEM AWL FEO FDN ERP SWU QAT RWV AFY DQX DDZ 0 86 QEEDQEQEQEREQDQQSESAQAQAESWSARWSESWFEARRFSDDDADAFDRERFDAWQRSWEEEFEEQSAADEERFDFEAFADQSW
14 SEC SSB WRG ASI DWK EAM ARL DRO FWN FRU DQT AFV FSX ERZ 5 RW QW RD AQ SA 87 DQAFEAASWRSSSSFSWDWEFWFAFADWARAFARASQEAERDRFSERDWRQFWWSSEAEAEAFDSSFSFRDQSEQWFWDRRSEDRDW
14 SDG RDI DWK SWJ EDM SFO FFN ADP ESU WAT RQV WWY SSX QEZ 7 DE DS ER EF AF SA FQ 80 DWDSFASRSFEDDWRQEADQFEDESAAWARRQSDQESSQEADWWADQADWFRAEFFDWRQRDSWFDWESDWSSAEERQAD
15 SFC RSB FRG AAH SEK DQJ DWM WFL AEO ARN WRU WQT FEY WSX QFZ 1 QD 83 RSDSEQRFRDWADSEWWFQFDQSFWQWSWQSSSQFFRFEFASEARWRRWSDFRFDQWQAEWRAEFEDFDQAAFRSFFDWWFAA
13 SQB FQG WSI RSH SEK WAJ EAM FDN ARU QDT QEY DDX EEZ 17 DF ES AQ DE WE RW WA RS FA WD FQ AE QD AS RE AR WS 78 QEEERFQWSDARQDRWDWRERSAREADDDSRSFDEWAFDARSEFDFWSSWADARWASQRFDAREFQWAWQQAEAFWWA
17 DFC ESB RSG DQI SAH EFK REJ FAM WAL SQO ADN QRP WQU EET FFV DDX RAZ 1 QW 89 RSFDQDQWQSSQESSWAEFQEFWQQRFASAEEWAAEFEREREFRSFFFFRSWDFRARSDQQRRSEFSFFRSEADRSEEFAEDQSQDDAD
15 DDC EFB FRG FWI DFH REJ SAM FFL SFN WQP ARU SDT AAY SEX WRZ 7 DA RA RD FD SE SQ SW 83 FWFFREFRSAFWDQEEFARSEFWQSEARSWQSFRQADDAWWRWEFQAFWQFFFWWQWRASDAAFRAAREDDREWQQWRDFRRE
15 RSC EAB DWI QFH SEK AWL RDO QWN DDP WWU WET FRV AQY FFX FDZ 7 FA WR QR QF DR WA WQ 79 AAWRSSEFRDEQFAWFRDWFFSFARSQEQWQWWWWFDRDERQFRSFRAQDWWEQEQFEEEARSWDDWEQFWSFFSAWAQ
15 QSB AEH AAK ARJ FDM ERL WRO QEN WAP SAU DWT RSV RFY DDX EDZ 6 DA DW RQ RW SR DS 87 ERDWSARFAEFAERDQDWERRSAEWAWAAAAARARQSQSFDRFQQSQWRDWRFRQSFDAFFAEDRARRSAAEDRFWRRFDDARQEER
8 RQC FEI WAH DAK WFL RSO AQU FST 0 77 WWAFERQWASEAFSSWARSAQWFDFEFSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAE
15 WFC QDB EDG SQI ARK AEJ SEM QEL RFO FAN SSP RDU RWY EFX DDZ 9 QS AS AD DS AE AW RW QW ES 82 SSAERFWSSDSSDDDSESSFSESQASWFQDWQEDDFEFWQDFAWEEDRWEEFRDQDFFAAARQDARWWSQWEFFASEQEWFA
16 AAC RFB DQG WWI AFH RAK EAJ DEM SWL FFO FEN ASU EET RSV RRY EWX 11 QF DR AF DF FS EF QD RW QE DE EA 79 RSDRAWFQEWDEAAAARRSWRWWRFESDEDQRFSDWDDEEEAAWRSRSFFFFAFQSQEAEEASERFDQAAQWDEAWAWW
13 DDC SWB SFG QSI SAH ADK DQJ RRM ESL FWN FAV RQY AAX 14 WS QE RQ WA FW FE FD DS WR AE FS DA FQ AF 81 FQSDERRSWDDDAAFWADFWFEFASRREEESESQSSFFRSAQSFWDQWRQEAWSASQSDSFSWSARFWRRFWRQAAAESSW
15 QRC AEG RWI WSH DRJ FEM QFL DWO ESN AAU WFT ADV SFY RSX SQZ 22 QR FS AW FD QE QD SD QW QA RA SR FW ER WR ED SE FQ WE AF EF DR WS 79 FRRSRADSQAASQFEADQFRSWFWSAESFAFDRFESQDRDEDSDRQFARQRQRSRWDWAQREQWAQESEWWSAARWEAD
15 EEC AAB WEI ADK DFJ DQM FFL SFO QWN QQP RQU SWT WRV FAX SQZ 7 EA WR SW FA DE WQ AR 81 EWRWEDFFDSFDFRDWWESRQRQAFASFQWARQDQEEADQQFQWEEADWEQWQWFWESWWFFDDSDQSQFEEERAAAARSW
0 4 QW QE RW RE 14 WASDFASDFASDFQ
15 QAC RFB QDG RWI EQH SRK DSJ FQM ESN WFP DWU SST WWV AWY EAZ 15 WA RS ED SW WE QD DW WR FW RE FQ DR SE DF SD 84 SARDSWFQDSSFDDAAWFQEQRWSRWFFQQDWQDQSREADWERFDWAWSRRFARFARFSSEAQAAEAQWFQDQDESREAADWWW
16 WFC DQB AEG FFH WAK QFJ RWM RQL QQO EQN AQP DRU SRT WSV DDY RAZ 9 SR QR WS WD DE QS QA EQ WR 83 RDDDDWFDAERSREQFDWAAAQQQDWFWSAQDRDDSRRWWFDDRQDSFSAQEWDQSAEDQWAFFWSDDQQQFAQFRRWEQFRA
36 FQV FRK FSL FFT FWJ DDM DET DAH WWN FAK FDN FEL WQG DWM DRG DSB DQX RWM RQL RRC REY AAX AET EEO AQC ARO AWP EWZ EQM QQY SSU SRZ SQC SWI SAZ SET 28 FQ FR FS FW DE DA FA FD FE WQ DW DR DS DQ RW RQ RE AE AQ AR AW EW EQ SR SQ SW SA SE 91 WEAFARAWSSSRDQQRREFDRSESDEAASQRQARAQERAAEFQSDFQAEQAFAFQDEEQFDARFFFQEEEAWQDWDRAFDWQAADEDADWE
16 DFC RAB AEI SSH SWK AQJ QEM WAL QWO SQN FFP DET DDV RSY WFX FEZ 25 RQ AQ WR WS FR DQ WF QF DA AF RD DF SD SQ RE AE SR EF ES DE WQ AR WE WD WA 84 RFEQEWWASSSQQWRSFFSQQDEAESAQRRDFSAEDEAQAQFFWFQDFERAERARSRAARADESWDFAQFRWFDDFWFRSAQDR
17 EEC EQB REI EWH DAK AAJ ESM EAL QRO RWN QDP WAU WQT QQV SWY FQX FWZ 25 DR QE EF DW AW SA AE FW ER AQ WR QR WE RF DA AF QD DS ES SF QW SR RA DF QS 86 ESAAWAQDFWFQRDADAQEWEQEEFFQEQSFWDEADDFQEADSWQQSWEQDWAFFQEQESQRQDDAFDSSWWQESEAWQWQRWDRE
16 QSC QQB FWI SRH SEK AQJ SSM QDL ERO EEN RAP EDU EAT SWY DDX ASZ 19 DW ER FS RW AE WA WF QA AR DF FQ RD DS EQ WQ DA QR WS FE 86 EEQASEAQSERRADQDSRQDQQQSSERSEEDFEAWAQESEEDFWEDSSEAFDDAFSEQASEDEEDDSWRSRSSSSSDDQDSRSSDS
15 WSC SAB DDG QRI QWH SSK QAJ RAM ERN SEP EAU WFT FRV RRX FQZ 22 QE AR ER EA QA DW QW QS FD RS QF AW SE SD ED SW FS EW SA FW QR DQ 88 WSQASQEQWERQRSADDEARAWFSADWFFDWSWFEAFQQRRFRWFDQAFQFQSASAAWSQASEFRWFDDQWRQRDDWSDRSSQWSSEA
0 0 9 DEAFDEARS
18 RFC SDB QWG ERI QFH DWK FFJ FSM SSL WWO SWN ESP EEU EAT DFV EDY RSX RRZ 24 DS EQ SE RW DW EW FQ AF SR ER DF QR AR WF AD RF RD QD EF AE AQ DE WA SW 85 FFEADRFEDWEDDWEEERSFFSWEEWWDFESRSESDWERRQFAAFFFEQWQDFSDSSEDQQASSDFRRDWFSQFSWRSEAEWERA
16 ASC EWB EAG SFI FDH QAK QSJ SWM DWL FQO RDN WRP SRU DET EEY REZ 24 DE QD SD AF SF SW AS EF RF WQ WR WF RE AQ EA SR QR AW ES DA FD RD WE FQ 90 FQQSDEASQSFQEFDQAQADWDEEEASFQRDFDWQSSRFFDWRFQRASEWWFDDWEAQSQAWRESRRDEEDSFASQSFDQAEESWQDSFS
0 4 QW QE RW RE 14 QASDFASDFASDFE
15 FQC FEB REG WAI FWK SWJ EQM SEL WWN RRU AFT SFV ADY RQX SSZ 13 DA SR ES RQ EQ DE DQ RA WR WA ER QS AQ 88 DSEDFWEWDRRFEDEQSREEQDRRAFWASFWAEQEQSWSWRRRQAAADSWFQWWRRSWEQFWAFWWSERFEARERQSSRERQRESESF
14 QRC SQG FWI RSK WEJ DSM DAL RAO SAN QAP DRU AAV AFY FFZ 14 EQ RD AR AE FS DE DW ER DS FD AQ FE DQ DA 85 RFFDDARAQAFFQAEQRSAWQSASQAEAFDSFWDSFWDRWRAAAWRSQADRWEWQFRSFWDDDSRSRSWESQAFAFRAWFFRADA
15 AEC WWB ASG FWI SFK SSJ ARM FQL WQO SDP WEU ERV DRY QQX QAZ 8 FD SA WS RS AD AQ AE DE 83 FESWSWAEQADASWWRSFQQDAEDRSQAQARFDRWQFWFWSFQAARSFQQQWWFWWQFFSFFFWWWAESDEASWWSSWQERWE
15 RWC WWB WAG REH RDK QSJ RQM QQL AFO FFP RSU SWT DSV EQY FEZ 12 EF AQ RA FS SA ER AE SR WE SD AF FR 83 FERRWWARWWWDSQQFEAFAFRQWRSEDWRSEDSQSFFRFEEEQRWFEAEEFERSREWARQFERDARFESWRWFDQWAQQSEQ
0 0 10 AAAAAAAAAA
36 FQX FRI FSU FFG FWH DDH DEO DAM WWY FAV FDC FEJ WQP DWB DRC DSO DQB RWZ RQI RRM REG AAU AEM EEC AQJ ARK AWK EWK EQY QQZ SSG SRI SQG SWH SAT SEP 0 93 WRQREESSQQSEWEEWQEDQQWAESSSRDFWRDSRSEDQQDRDFDSEAWDFQADFRREQWFDFAQRFADQERRESDDRQADEQEEQSQDFDDS
0 4 QW QE RW RE 14 EASDFASDFASDFQ
16 QAC ASG EEI QQH DDJ SWM AEL FAO QSN QWP EFU FRT WWV FDY WAX DEZ 7 AD RW DF SR FE EW SD 84 EWAQFDQWAEWWSWEERASRQWEFSWSWRRDASEFQSFRWEERDFDSWDDQAAEQAQAADEQWSWWAAEWDEWWFAEQQQQSDD
1 ASG 1 AS 10 QQWERASDFS
16 AEC QWB SRG FWI RQH EFK SSJ WSM QFL RAO DRP QQU EQT QSV FDX REZ 17 DA DS QF EF RE FD FS EA EQ RD AR ED DQ WF QR WE QW 83 QSWRRAWDDAEFDSSQSDRWQWWSAQWEEQWSREFSRSAEWSAEDSRFWQQRASEFFDDQFEQRAAEEFFRFWSRQQQFDAFW
15 ASC SRB SEG ARI SFH QAK FWM FEL SWO FRN DSP AAU FDT AFX SSZ 22 FS EW QF RA AS DW QD FD WS WF QR QS ES SD FE WR QA WQ WA RF RS DA 83 SRSWWWAAQASSSEDAFSSFWFEAFAASRDDSQSRSRSEFRAREQAAAWEASFWSSFEESFQDASWFWRAFQFSFFSSRFDFW
1 ESP 0 7 DRESSES
16 SAC RQB FSG QEI RWH QSK AWJ DEM SWO DSN WQP RSU ART ADV RFY DWZ 13 SQ RA RD AE DQ RS WA WS WD DS WE FA QW 85 RWRQDRQSWWQFFDEARDEFAWEFQSAWSWAWRFDDSWQQEAWRQDSEWFSSADSDWRWRFDDSDWFSSAQERSADSAFDERSAF
16 DEC AFB SAG QWI ERH FEJ QAM FRL FFO RSN SSP DWU EWT FWV AAX ESZ 25 SW ES DS RE FQ EQ QS AR QR AF FW WE AW DA EA QA WD RW FS FE DF DE QD AS QW 83 QWSAQAFWERDFRQFRESEDWDEAFAFESEQQQDEAAQWWRSRSWAFQESDAFDDWASARSFFFEQSAEERAFEWQQAAASSF
15 WQC AAB ADG EFI RQH SFK RAJ AEL SSN WDU WET WAV WWY DDX EDZ 3 QD FQ WD 83 AEEFEFWEFWASSEFEDADSFWQDDDDSSWDDWDFWDDRWEWAFFQAEESFAASSSSADSSSADRQWAQSWERASFFAFWWRF
1 QEN 1 AQ 10 AWAREEQESS
6 EWG ASI FDO FFN SST EQY 0 48 FRASDAEQFRASDAEQEWFDSSSSAEWESSDFDWFFFDFFFRASDAEQ
14 SAC SWB DWI WRH QFK WAJ WQM EWL DSN RRU DQT DEV DDY FFX 3 EQ RD QF 81 EASADQSAFFFSWSSWWQDERSWRRRWADWSWFWQFSEWSDDQFERRADSARDWWDDARRDERFWRDDSWDEWREWDSSAE
17 FSC ERB DAG QEI ASH EEK DDJ RDM QQL RRO WSN WRP AFU EDT FFV WQY WEX 25 SA RF FE RS WR DF RA DE AE AF FQ SD QA ER DQ WE DR QW QE WA SW WD SE WF AD 86 WARRQERFFFWQFFWERFSDAWSFSWRDDWEDADAEERDQQRDFRDQQRRARRWQFSERQAFEQQAFSQASFSFQQRASFSEDRDF
14 RWB FFG FEI DWH SQK ADJ ASM AAO RAN AQP RFT WEV QQY SSX 11 FD QA DE WS FW ER WQ AF WA QS RQ 84 RWFEFERWRAQQQQWESRSQWWSWEQQEDEDWAQSSWEASFESDWASASSSRFEEAASFFARAADERWSSFEWEADRFFDRAFF
17 ASC SEB WAG WSI EDH RWJ FWM DQL EEO QQN SSP FFU QRT QEV FEY RRX RSZ 4 WE DR SD QS 86 WWARWWAQQQRRRDFEEESEQESEWSWSWQEAREQERFWQRAQEASEDASFQEWDQSSFFRSFWFEWADQQASFQWAFWFASDQWA
0 4 QW QE WR ER 14 WASDFASDFASDFQ
0 4 QW QE WR ER 14 QASDFASDFASDFW
16 RSC QQB DFG SFI QWK EWJ EQM DQL RRO EFN SQP DEU WRT AAV AWX WWZ 9 AR RQ DF EQ QW AE RF FA WD 86 RDFWSFWRADESQEQQRAQWDESFWWEWEQWWDEDDSQSAWSFQQQDQEWSDFAWWAASQSFQQQSQEFRSSFWREEFSQQWEFRR
6 RFC RRI AEH EDK DDL QDT 0 20 QDAEERFAEDERRSADDRRE
16 WQC DAB WDG SSI EAH QSK FQJ DEM RAL FFN RDP RRU WST AQV ERX ASZ 25 DS FE FR WD AD SQ SR RA SA RE RQ SF FQ QD SE WA DE QW EQ RD SW AE FA FD WE 87 SSDARRQSQSSRADAWDDEDEAFFSSFQWQQRFFFWQWQWSEQWDASDEERRAWSSSRFQAQFFASQRDWSFEAAQQSERRRSFFSD
16 FSC ERB RFG RRI WEH AWK DAL QSO DWN ARP QEU SWT DDV FFY AAX AEZ 9 RD AQ SA RE AD FW WR QF SR 86 FSFSFWFAARDAQEDQSFFAFSFSEFQSARARSWQRFSDWRFQAWRRDDAAFERFFADDAEERSWRFDDDADDDFSDAWERFQEFS
0 1 AD 19 AFEWDRESSESAREFADED""".splitlines()
T = int(input.pop(0))
casenum = 0
for line in input:
casenum += 1
case = line.split()
C = int(case.pop(0))
combine = list()
if C > 0:
case = ["".join(case[:C])]+case[C:]
for i in range(0, 3*C, 3):
combine.append(case[0][i:i+3])
if C > 0:
del case[0]
D = int(case.pop(0))
if D > 0:
case = ["".join(case[:D])]+case[D:]
opposed = list()
for i in range(0, 2*D, 2):
opposed.append(case[0][i:i+2])
if D > 0:
del case[0]
N = int(case.pop(0))
elements = ""
for c in case[0]:
elements += c
finish = False
while not finish:
for pair in combine:
if elements[-2:] in [pair[:2], pair[1]+pair[0]]:
elements = elements[:-2] + pair[2]
break
else:
finish = True
for pair in opposed:
if elements[-1] == pair[0]:
if elements.find(pair[1]) >= 0:
elements = ""
elif elements[-1] == pair[1]:
if elements.find(pair[0]) >= 0:
elements = ""
if not elements:
break
print "Case #{0}: {1}".format(casenum, list(elements)).replace("'", "")
| [
"[email protected]"
] | |
89f1ae74a600f564db6992784235bc311a79bfe9 | beed259c9aaf824c5307d93ffa736255f2d98831 | /month06/Machine_learning/day06/01_grid_search_demo.py | 6e9ec278441e7aa3b3c2e35ec90effc1330900e1 | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_learning_code | 21345f97ebf74c3cad0ef488a93ec8a7fd771a63 | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | refs/heads/main | 2023-05-27T16:17:18.130492 | 2021-06-06T14:23:31 | 2021-06-06T14:23:31 | 338,234,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | # 01_grid_search_demo.py
# 网格搜索示例
""" 网格搜索
1) 寻找超参数组合的一种方式
2) 利用穷举法, 将可选的参数组合, 选择最优者
3) 能够简化对参数选择过程
"""
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
# 读取数据
x, y = [], []
with open("../data/multiple2.txt", "r") as f:
for line in f.readlines():
data = [float(substr)
for substr in line.split(",")]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y)
# 定义需要挑选的参数
params = [
{
"kernel": ["linear"],
"C": [1, 10, 100, 1000]
},
{
"kernel": ["poly"],
"C": [1],
"degree": [2, 3]
},
{
"kernel": ["rbf"],
"C": [1, 10, 100, 1000],
"gamma": [1, 0.1, 0.01, 0.001]
}
]
model = ms.GridSearchCV(svm.SVC(), # 原模型
params, # 待验证的参数
cv=5) # 折叠数量
model.fit(x, y) # 训练
print("最好成绩:", model.best_score_)
print("最优组合:", model.best_params_)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
mp.figure("SVM RBF Classifier", facecolor="lightgray")
mp.title("SVM RBF Classifier", fontsize=14)
mp.xlabel("x", fontsize=14)
mp.ylabel("y", fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap="gray")
C0, C1 = (y == 0), (y == 1)
mp.scatter(x[C0][:, 0], x[C0][:, 1], c="orangered", s=80)
mp.scatter(x[C1][:, 0], x[C1][:, 1], c="limegreen", s=80)
mp.show() | [
"[email protected]"
] | |
08ee0a9717d7a5b84c5ebec7fd711e4b60ec1a77 | 65d93b3db37f488356faa1789f1001f17191e345 | /isi_mip/core/migrations/0005_headerlink_menu_items.py | bbd567159b1e8dd19206dd7ca3b9554ba3cbeffa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ISI-MIP/isimip | b4a19310dd772356eef87259783084836107cf4a | c2a78c727337e38f3695031e00afd607da7d6dcb | refs/heads/master | 2021-09-14T15:42:14.453031 | 2021-05-25T09:33:45 | 2021-05-25T09:33:45 | 237,446,232 | 0 | 0 | MIT | 2020-01-31T14:27:04 | 2020-01-31T14:27:03 | null | UTF-8 | Python | false | false | 803 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-06 12:23
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170118_1443'),
]
operations = [
migrations.AddField(
model_name='headerlink',
name='menu_items',
field=wagtail.core.fields.StreamField((('jump_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('link', wagtail.core.blocks.URLBlock())))), ('page_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('page', wagtail.core.blocks.PageChooserBlock()))))), blank=True, null=True),
),
]
| [
"[email protected]"
] | |
097d789ff031bc02e11ed666f3d663f1e107cd89 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/api_version_detail.py | 3c337f810bd10e1917aa9bfeff016dbb057dce3c | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,327 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApiVersionDetail:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'links': 'list[ApiLink]',
'version': 'str',
'status': 'str',
'updated': 'str',
'min_version': 'str'
}
attribute_map = {
'id': 'id',
'links': 'links',
'version': 'version',
'status': 'status',
'updated': 'updated',
'min_version': 'min_version'
}
def __init__(self, id=None, links=None, version=None, status=None, updated=None, min_version=None):
"""ApiVersionDetail - a model defined in huaweicloud sdk"""
self._id = None
self._links = None
self._version = None
self._status = None
self._updated = None
self._min_version = None
self.discriminator = None
if id is not None:
self.id = id
if links is not None:
self.links = links
if version is not None:
self.version = version
if status is not None:
self.status = status
if updated is not None:
self.updated = updated
if min_version is not None:
self.min_version = min_version
@property
def id(self):
"""Gets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:return: The id of this ApiVersionDetail.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:param id: The id of this ApiVersionDetail.
:type: str
"""
self._id = id
@property
def links(self):
"""Gets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:return: The links of this ApiVersionDetail.
:rtype: list[ApiLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:param links: The links of this ApiVersionDetail.
:type: list[ApiLink]
"""
self._links = links
@property
def version(self):
"""Gets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:return: The version of this ApiVersionDetail.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:param version: The version of this ApiVersionDetail.
:type: str
"""
self._version = version
@property
def status(self):
"""Gets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:return: The status of this ApiVersionDetail.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:param status: The status of this ApiVersionDetail.
:type: str
"""
self._status = status
@property
def updated(self):
"""Gets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:return: The updated of this ApiVersionDetail.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:param updated: The updated of this ApiVersionDetail.
:type: str
"""
self._updated = updated
@property
def min_version(self):
"""Gets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:return: The min_version of this ApiVersionDetail.
:rtype: str
"""
return self._min_version
@min_version.setter
def min_version(self, min_version):
"""Sets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:param min_version: The min_version of this ApiVersionDetail.
:type: str
"""
self._min_version = min_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiVersionDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
91af8f7eafbaafb06ab11369ce37a6fdf95547da | 30302d215a012a079edf05a4e14e932888385def | /tests/core/v5/test_handshake_schemes.py | 0e9e7b6f08153cdd6fef5a60a99152a2d7ba3b25 | [
"MIT"
] | permissive | NhlanhlaHasane/ddht | e54975a7fcf4e9bfa29771ee6b78c5e9a5991aff | 142911d134ff839f3f79ff8fe9e45d3fe5a58cd0 | refs/heads/master | 2023-05-31T05:09:06.371320 | 2021-06-03T22:31:22 | 2021-06-03T22:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,076 | py | from hashlib import sha256
from eth_keys.datatypes import NonRecoverableSignature, PrivateKey
from eth_utils import ValidationError, decode_hex, keccak
from hypothesis import given
import pytest
from ddht.handshake_schemes import ecdh_agree, hkdf_expand_and_extract
from ddht.tools.v5_strategies import id_nonce_st, private_key_st
from ddht.v5.constants import ID_NONCE_SIGNATURE_PREFIX
from ddht.v5.handshake_schemes import SignatureInputs, V4HandshakeScheme
def test_handshake_key_generation():
private_key, public_key = V4HandshakeScheme.create_handshake_key_pair()
V4HandshakeScheme.validate_uncompressed_public_key(public_key)
V4HandshakeScheme.validate_handshake_public_key(public_key)
assert PrivateKey(private_key).public_key.to_bytes() == public_key
@pytest.mark.parametrize("public_key", (b"\x01" * 64, b"\x02" * 64))
def test_handshake_public_key_validation_valid(public_key):
V4HandshakeScheme.validate_handshake_public_key(public_key)
@pytest.mark.parametrize(
"public_key",
(b"", b"\x02" * 31, b"\x02" * 32, b"\x02" * 33, b"\x02" * 63, b"\x02" * 65),
)
def test_handshake_public_key_validation_invalid(public_key):
with pytest.raises(ValidationError):
V4HandshakeScheme.validate_handshake_public_key(public_key)
@given(private_key=private_key_st, id_nonce=id_nonce_st, ephemeral_key=private_key_st)
def test_id_nonce_signing(private_key, id_nonce, ephemeral_key):
ephemeral_public_key = PrivateKey(ephemeral_key).public_key.to_bytes()
signature = V4HandshakeScheme.create_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
private_key=private_key,
)
signature_object = NonRecoverableSignature(signature)
message_hash = sha256(
ID_NONCE_SIGNATURE_PREFIX + id_nonce + ephemeral_public_key
).digest()
assert signature_object.verify_msg_hash(
message_hash, PrivateKey(private_key).public_key
)
@given(private_key=private_key_st, id_nonce=id_nonce_st, ephemeral_key=private_key_st)
def test_valid_id_nonce_signature_validation(private_key, id_nonce, ephemeral_key):
ephemeral_public_key = PrivateKey(ephemeral_key).public_key.to_bytes()
signature = V4HandshakeScheme.create_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
private_key=private_key,
)
public_key = PrivateKey(private_key).public_key.to_compressed_bytes()
V4HandshakeScheme.validate_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
signature=signature,
public_key=public_key,
)
def test_invalid_id_nonce_signature_validation():
id_nonce = b"\xff" * 10
private_key = b"\x11" * 32
ephemeral_public_key = b"\x22" * 64
signature = V4HandshakeScheme.create_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
private_key=private_key,
)
public_key = PrivateKey(private_key).public_key.to_compressed_bytes()
different_public_key = PrivateKey(b"\x22" * 32).public_key.to_compressed_bytes()
different_id_nonce = b"\x00" * 10
different_ephemeral_public_key = b"\x00" * 64
assert different_public_key != public_key
assert different_id_nonce != id_nonce
assert different_ephemeral_public_key != ephemeral_public_key
with pytest.raises(ValidationError):
V4HandshakeScheme.validate_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
signature=signature,
public_key=different_public_key,
)
with pytest.raises(ValidationError):
V4HandshakeScheme.validate_id_nonce_signature(
signature_inputs=SignatureInputs(different_id_nonce, ephemeral_public_key),
signature=signature,
public_key=public_key,
)
with pytest.raises(ValidationError):
V4HandshakeScheme.validate_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, different_ephemeral_public_key),
signature=signature,
public_key=public_key,
)
@given(
initiator_private_key=private_key_st,
recipient_private_key=private_key_st,
id_nonce=id_nonce_st,
)
def test_session_key_derivation(initiator_private_key, recipient_private_key, id_nonce):
initiator_private_key_object = PrivateKey(initiator_private_key)
recipient_private_key_object = PrivateKey(recipient_private_key)
initiator_public_key = initiator_private_key_object.public_key.to_bytes()
recipient_public_key = recipient_private_key_object.public_key.to_bytes()
initiator_node_id = keccak(initiator_private_key_object.public_key.to_bytes())
recipient_node_id = keccak(recipient_private_key_object.public_key.to_bytes())
initiator_session_keys = V4HandshakeScheme.compute_session_keys(
local_private_key=initiator_private_key,
remote_public_key=recipient_public_key,
local_node_id=initiator_node_id,
remote_node_id=recipient_node_id,
salt=id_nonce,
is_locally_initiated=True,
)
recipient_session_keys = V4HandshakeScheme.compute_session_keys(
local_private_key=recipient_private_key,
remote_public_key=initiator_public_key,
local_node_id=recipient_node_id,
remote_node_id=initiator_node_id,
salt=id_nonce,
is_locally_initiated=False,
)
assert (
initiator_session_keys.auth_response_key
== recipient_session_keys.auth_response_key
)
assert (
initiator_session_keys.encryption_key == recipient_session_keys.decryption_key
)
assert (
initiator_session_keys.decryption_key == recipient_session_keys.encryption_key
)
@pytest.mark.parametrize(
["local_secret_key", "remote_public_key", "shared_secret_key"],
[
[
decode_hex(
"0xfb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736"
),
decode_hex(
"0x9961e4c2356d61bedb83052c115d311acb3a96f5777296dcf297351130266231503061ac4aaee666073d" # noqa: E501
"7e5bc2c80c3f5c5b500c1cb5fd0a76abbb6b675ad157"
),
decode_hex(
"0x033b11a2a1f214567e1537ce5e509ffd9b21373247f2a3ff6841f4976f53165e7e"
),
]
],
)
def test_official_key_agreement(local_secret_key, remote_public_key, shared_secret_key):
assert ecdh_agree(local_secret_key, remote_public_key) == shared_secret_key
@pytest.mark.parametrize(
[
"secret",
"initiator_node_id",
"recipient_node_id",
"id_nonce",
"initiator_key",
"recipient_key",
"auth_response_key",
],
[
[
decode_hex(
"0x02a77e3aa0c144ae7c0a3af73692b7d6e5b7a2fdc0eda16e8d5e6cb0d08e88dd04"
),
decode_hex(
"0xa448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"
),
decode_hex(
"0x885bba8dfeddd49855459df852ad5b63d13a3fae593f3f9fa7e317fd43651409"
),
decode_hex(
"0x0101010101010101010101010101010101010101010101010101010101010101"
),
decode_hex("0x238d8b50e4363cf603a48c6cc3542967"),
decode_hex("0xbebc0183484f7e7ca2ac32e3d72c8891"),
decode_hex("0xe987ad9e414d5b4f9bfe4ff1e52f2fae"),
]
],
)
def test_official_key_derivation(
secret,
initiator_node_id,
recipient_node_id,
id_nonce,
initiator_key,
recipient_key,
auth_response_key,
):
derived_keys = hkdf_expand_and_extract(
secret, initiator_node_id, recipient_node_id, id_nonce
)
assert derived_keys[0] == initiator_key
assert derived_keys[1] == recipient_key
assert derived_keys[2] == auth_response_key
@pytest.mark.parametrize(
["id_nonce", "ephemeral_public_key", "local_secret_key", "id_nonce_signature"],
[
[
decode_hex(
"0xa77e3aa0c144ae7c0a3af73692b7d6e5b7a2fdc0eda16e8d5e6cb0d08e88dd04"
),
decode_hex(
"0x9961e4c2356d61bedb83052c115d311acb3a96f5777296dcf297351130266231503061ac4aaee666"
"073d7e5bc2c80c3f5c5b500c1cb5fd0a76abbb6b675ad157"
),
decode_hex(
"0xfb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736"
),
decode_hex(
"0xc5036e702a79902ad8aa147dabfe3958b523fd6fa36cc78e2889b912d682d8d35fdea142e141f690"
"736d86f50b39746ba2d2fc510b46f82ee08f08fd55d133a4"
),
]
],
)
def test_official_id_nonce_signature(
id_nonce, ephemeral_public_key, local_secret_key, id_nonce_signature
):
created_signature = V4HandshakeScheme.create_id_nonce_signature(
signature_inputs=SignatureInputs(id_nonce, ephemeral_public_key),
private_key=local_secret_key,
)
assert created_signature == id_nonce_signature
| [
"[email protected]"
] | |
734d7779a3e742141face3d32132d429ae9aaf73 | c2e16633921d1efe584d93d769eaa7892a2fd8f3 | /OOP/project.zoo/animal_init.py | 4a1d1f4ef87641924483dded75122042ed61a8ed | [] | no_license | yosifnandrov/softuni-stuff | bd53d418fe143ea4633a5488c1f80648da0b9ef7 | 2a76e5aee2029edf901634750d28cf153d73ece3 | refs/heads/main | 2023-04-17T19:53:30.254790 | 2021-05-06T11:33:39 | 2021-05-06T11:33:39 | 364,884,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | def initialization_animals(self,*args):
attributes = self.__annotations__
for attr,argument in zip(attributes,args):
setattr(self,attr,argument)
| [
"[email protected]"
] | |
b82c181401e82d543fbec8baf19ab692ca195486 | 1280ef2fd358ddb094f4147a25d1c21c5d718c43 | /socialNewsApp/src/mikroblog/urls.py | bbc8ecb1247ff3a183a88abc172e69b049bd33b7 | [] | no_license | PatkaSm/SocialNewsApp | aeac569c76f554b637efa2235afad8230bf5bb44 | bea67724e86523939fad20411807513202c1a1b8 | refs/heads/master | 2023-04-06T11:52:31.755797 | 2021-04-21T07:35:54 | 2021-04-21T07:35:54 | 279,606,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | from comment.views import delete_micropost_comment, like_micropost_comment
from django.urls import path
from mikroblog.views import MicroPostListView, micro_post_delete, micro_post_like
urlpatterns = [
path('', MicroPostListView.as_view(), name='mikroblog'),
path('mikro-post/delete/<int:pk>',micro_post_delete , name='micro-post-delete'),
path('mikro-post/like/<int:pk>', micro_post_like, name='micro-post-like'),
path('wpis/comment/delete/<int:id>/', delete_micropost_comment, name='micropost-comment-delete'),
path('wpis/comment/like/<int:id>/', like_micropost_comment, name='micropost-comment-like'),
] | [
"[email protected]"
] | |
fb60256c701c3ceddc6c6bad0eed101f230346b1 | db7aac75e31d35c4a18c966170b46f269d015d0b | /webgl_canvas_gadget/apps/projects/migrations/0044_auto_20160819_0115.py | 3c13e9f0687f9d9d8dba9341538656d11f099466 | [] | no_license | jjpastprojects/Django | 12fbf3cf27a9230db98a21cc1013216aeadaae1e | c55562be7226f29b4ec213f8f018b6c2dd50c420 | refs/heads/master | 2022-12-12T22:15:49.493289 | 2017-09-05T12:51:20 | 2017-09-05T12:51:20 | 101,995,798 | 0 | 0 | null | 2022-12-07T23:21:46 | 2017-08-31T11:55:00 | JavaScript | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-08-18 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0043_auto_20160816_2354'),
]
operations = [
migrations.AddField(
model_name='model3d',
name='camera_max_distance',
field=models.FloatField(default=6),
),
migrations.AddField(
model_name='model3d',
name='camera_min_distance',
field=models.FloatField(default=1.5),
),
]
| [
"[email protected]"
] | |
40779a1244722e9b010944110d3bb639931b5b94 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_331/ch40_2019_04_03_22_25_14_448588.py | 342e2f8292e3195be274356568c6453d16509eb9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | def fatorial(n):
s=1
while n>0:
s*=n
n=n-1
return s | [
"[email protected]"
] | |
84214983efbd775549489eac0f074bacbb7d4429 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /day06/day6/3.今日内容.py | 95e2682ddf4ad17f4ff7f20bcc9fad627442c403 | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 343 | py | # 内置模块
# re模块讲完
# os模块:和操作系统打交道
# 时间模块 time和datetime
# sys模块 :和python解释器相关的
# 序列化模块 :json pickle
# logging模块:打印日志用,规范日志格式
# hashlib模块 :摘要算法的集合 - 密文的登录验证
# 下周开始:开面向对象 | [
"[email protected]"
] | |
f967e820b74cfdce9295d196ecf1cddad3653ee7 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /67/67.add-binary.282298911.Accepted.leetcode.python3.py | bacf67b5bc939359b252bd80e3cb448d5e8ac740 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | """
Problem Link: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
# there are two cases for every bit
# 1. they are equal
# - 1 and 1 -> 0 but result will depend on carry.
# - 0 and 0 -> 0 but result will depend on carry
# [if carry is 1, res = 1, if carry is 0, res = 0]
# 2. they are unequal
# - 1 and 0 -> result carry + 1 -> result = carry === 0 ? 1 : 0
# - 0 and 1 -> result carry + 1 -> result = carry === 0 ? 1 : 0
# don't touch the carry because if it was 0, it will remain 0 even after this sum
# why?
# - carry = 0, now bits are: 1 + 0 = 1. Add carry: 1 + 0(carry) -> carry will remain 0
# - carry = 1, now bits are: 1 + 0 = 1. Add carry: 1 + 1(carry) -> carry will remain 1
class Solution:
# Method 1
def addBinary(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = '0'
while i >= 0 or j >=0:
ach = a[i] if i >= 0 else '0'
bch = b[j] if j >= 0 else '0'
if ach == bch:
res.append(carry)
carry = ach
else:
res.append('1' if carry == '0' else '0')
i -= 1
j -= 1
if carry == '1':
res.append(carry)
return ''.join(res[::-1])
# Method 2
def addBinary_integer(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = 0
while i >= 0 or j >=0:
ach = int(a[i]) if i >= 0 else 0
bch = int(b[j]) if j >= 0 else 0
summ = ach + bch + carry
res.append(str(summ%2))
carry = 1 if summ > 1 else 0
i -= 1
j -= 1
if carry == 1:
res.append(str(carry))
return ''.join(res[::-1])
# Method 3
def addBinary_xor(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = 0
while i >= 0 or j >= 0 or carry > 0:
ach = ord(a[i]) - ord('0') if i >= 0 else 0
bch = ord(b[j]) - ord('0') if j >= 0 else 0
res.append(str(ach ^ bch ^ carry))
carry = (ach+bch+carry) >> 1 # It is equivalent to (a+b+c)/2
i -= 1
j -= 1
return ''.join(res[::-1])
| [
"[email protected]"
] | |
8df572327d1476dac62dac98652932fcec33a758 | 124df74bce796598d224c4380c60c8e95756f761 | /com.raytheon.viz.avnconfig/localization/aviation/python/TafViewer.py | 063ff9b84526cb0322a560a288dcb2089c769676 | [] | no_license | Mapoet/AWIPS-Test | 19059bbd401573950995c8cc442ddd45588e6c9f | 43c5a7cc360b3cbec2ae94cb58594fe247253621 | refs/heads/master | 2020-04-17T03:35:57.762513 | 2017-02-06T17:17:58 | 2017-02-06T17:17:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,841 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Name:
# TafViewer.py
# GFS1-NHD:A6644.0000-SCRIPT;1.10
#
# Status:
# DELIVERED
#
# History:
# Revision 1.10 (DELIVERED)
# Created: 14-MAY-2007 10:04:47 OBERFIEL
# Removed references to the obsolete prototype XTF product.
# Allow decoder and encoder to format TAF in two different
# ways. New format will be triggered by day and time to be
# specified at a later date.
#
# Revision 1.9 (DELIVERED)
# Created: 23-JAN-2006 08:23:18 TROJAN
# stdr 956
#
# Revision 1.8 (DELIVERED)
# Created: 19-SEP-2005 13:47:39 TROJAN
# spr 7011
#
# Revision 1.7 (DELIVERED)
# Created: 06-JUL-2005 18:16:42 TROJAN
# spr 6548
#
# Revision 1.6 (DELIVERED)
# Created: 07-MAY-2005 11:39:14 OBERFIEL
# Added Item Header Block
#
# Revision 1.5 (DELIVERED)
# Created: 24-JAN-2005 21:18:48 TROJAN
# spr 6612
#
# Revision 1.4 (APPROVED)
# Created: 09-JUL-2004 19:11:05 OBERFIEL
# Replaced busy dialogs
#
# Revision 1.3 (APPROVED)
# Created: 01-JUL-2004 14:59:55 OBERFIEL
# Update
#
# Revision 1.2 (DELIVERED)
# Created: 08-JAN-2004 21:40:30 PCMS
# Updating for code cleanup
#
# Revision 1.1 (APPROVED)
# Created: 06-NOV-2003 16:46:22 OBERFIEL
# date and time created -2147483647/-2147483648/-2147481748
# -2147483648:-2147483648:-2147483648 by oberfiel
#
# Change Document History:
# 1:
# Change Document: GFS1-NHD_SPR_7277
# Action Date: 19-MAR-2008 07:59:13
# Relationship Type: In Response to
# Status: CLOSED
# Title: AvnFPS: (OB8.2) AvnFPS decoders need to conform to new ICAO format for TAFs
#
#
import logging, time
from Tkinter import *
import Pmw
from Balloon import Balloon
import Avn, AvnLib, Busy, Globals
TAG = 'warning'
_Logger = logging.getLogger(__name__)
class Viewer(object):
def __init__(self, master, getcmd, editcmd):
# master: parent widget (page in a notebook)
# getdata: data access method, returnig dictionary
# d = { 'raw': raw, 'dcd': decoded}
self._master = master
self._id = None
self._taf = None
self._getcmd = getcmd
self._editcmd = editcmd
self._tkShowHeaders = IntVar()
self._tkShowHeaders.set(int(master.option_get('showHeaders', '')))
self._tkNumTaf = IntVar()
self._tkNumTaf.set(int(master.option_get('numTafs', '')))
frame = Frame(master)
btnbox = Pmw.ButtonBox(frame)
btn = btnbox.add('Text Editor', command=self._editcmd)
Balloon().bind(btn, 'Initializes editor page with current forecast')
btnbox.alignbuttons()
btnbox.pack(side='left', expand='no', fill='x')
menu = Pmw.OptionMenu(frame,
labelpos='w',
label_text='Num TAFs',
menubutton_width=3,
menubutton_textvariable=self._tkNumTaf,
items=('1', '3', '99'),
command=self.load,
)
menu.pack(side='right', expand='no', fill='x', padx=2)
Balloon().bind(menu, 'Number of TAFs to display')
cb = Checkbutton(frame,
text='Show Headers',
variable=self._tkShowHeaders,
command=self.load,
)
cb.pack(side='right', padx=5)
Balloon().bind(cb, 'Display WMO header')
frame.pack(side='top', expand='no', fill='x')
self.text = Pmw.ScrolledText(master,
borderframe = 1,
vscrollmode='static',
text_state='disabled',
text_wrap='word',
)
self.text.pack(side='top', expand='yes', fill='both')
self.text.tag_configure(TAG, background='red')
self.text.component('text').bind('<Button-3>', self.__popupMenu)
self.popmenu = Menu(master,
tearoff=0,
type='normal',
)
self.popmenu.add_command(label='Copy', command=self.__copy)
def __copy(self):
try:
t = self.text.component('text')
t.selection_own()
selection = t.selection_get()
t.clipboard_clear()
t.clipboard_append(selection)
except:
pass
def __popupMenu(self, e):
self.popmenu.tk_popup(e.widget.winfo_rootx() + e.x,
e.widget.winfo_rooty() + e.y)
##############################################################################
# public methods
##############################################################################
def highlight(self, mtrdata):
# called by MetarViewer
# needs to change logic if other viewers use this method
if not self._taf or not 'group' in self._taf.dcd \
or not self._taf.dcd['group']:
return
p = self._taf.dcd['group'][0]
t = max(time.time(), p['prev']['time']['from'])
for p in self._taf.dcd['group']:
if t < p['prev']['time']['to']:
if 'ocnl' in p and \
p['ocnl']['time']['from'] <= t < p['ocnl']['time']['to']:
tempo = p['ocnl']
else:
tempo = None
prev = p['prev']
break
else:
return
if self._taf.header and self._tkShowHeaders.get():
hlen = self._taf.header.count('\n')
else:
hlen = 0
for e in [e for e in mtrdata['status'] if \
mtrdata['status'][e].severity >= 2 and e != 'tempo']:
for ix in AvnLib.findIndex(e, prev, hlen):
self.text.tag_add(TAG, *ix)
if tempo:
for ix in AvnLib.findIndex(e, tempo, hlen):
self.text.tag_add(TAG, *ix)
def load(self, arg=None):
self.text.configure(text_state='normal')
self.text.clear()
try:
self._taf = self._getcmd(self._id)
if self._taf is None:
raise Avn.AvnError('')
if self._tkShowHeaders.get():
self.text.insert('end', self._taf.header)
# first line of most recent TAF
self.text.insert('end', self._taf.text+'\n')
for taf in Globals.DRC.getTafs(self._id, False, 0,
self._tkNumTaf.get())[1:]:
if self._tkShowHeaders.get():
self.text.insert('end', taf.header)
self.text.insert('end', taf.text+'\n')
except Exception:
msg = 'Cannot load data for %s' % self._id
_Logger.exception(msg)
if self._master.winfo_ismapped():
Busy.showwarning(msg, self._master)
self.text.configure(text_state='disabled')
def setSite(self, id):
self._id = id
| [
"[email protected]"
] | |
a7dac8a6c149989da09d918c09c3700c2a8ee2d9 | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/client/models/v1_subject.py | 1e131c521a19b4ff2a12fcdefa5275c6f8708630 | [
"Apache-2.0"
] | permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,118 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Subject(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, api_group=None, kind=None, name=None, namespace=None):
"""
V1Subject - a model defined in Swagger
"""
self._api_group = None
self._kind = None
self._name = None
self._namespace = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.kind = kind
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def api_group(self):
"""
Gets the api_group of this V1Subject.
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.
:return: The api_group of this V1Subject.
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""
Sets the api_group of this V1Subject.
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.
:param api_group: The api_group of this V1Subject.
:type: str
"""
self._api_group = api_group
@property
def kind(self):
"""
Gets the kind of this V1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.
:return: The kind of this V1Subject.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.
:param kind: The kind of this V1Subject.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1Subject.
Name of the object being referenced.
:return: The name of this V1Subject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Subject.
Name of the object being referenced.
:param name: The name of this V1Subject.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.
:return: The namespace of this V1Subject.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.
:param namespace: The namespace of this V1Subject.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Subject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
36d3e7e80d615098ce61bc85d31fe8ad49acb08b | bc2d2d99ed7a814fd36eab5ca3defc4fbfc9ea29 | /hesab/__init__.py | c91d821b54591fd6ac370087fb20c187a97e7785 | [
"MIT"
] | permissive | soul4code/django-hesab | f4ba7d8167ff1e42ab775e0bdea61a7c11bdfb98 | 3ec72def22283475d958a60abc3a572d8ccb63cc | refs/heads/master | 2021-12-30T08:47:44.131542 | 2016-07-30T20:14:00 | 2016-07-30T20:14:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
__title__ = 'django-hesab'
__version__ = '1.0.0'
__author__ = 'Rolf Haavard Blindheim'
__license__ = 'MIT License'
VERSION = __version__
| [
"[email protected]"
] | |
eb26945e850a7ec79e2f98859ec3962e49ed2159 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_315/ch4_2020_09_14_20_47_35_320810.py | fcf55ef1a3a2c80781e08fabde27b771f5906d87 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | def classifica_idade (x):
if x <= 11:
return ('Criança')
elif 12 >= x <= 17:
return ('Adolescentes')
else:
return ('Adulto')
idade =
print (classifica_idade(idade)) | [
"[email protected]"
] | |
ca6dc956467dab844f128b9f78f9895994507ef7 | f0354782628e51b1a301eba1a69e9808b4adc664 | /Problem/1837.py | 3079b66b692fc1dbe6e5fc8cf1cfaff6a2a29e35 | [] | no_license | HyunIm/Baekjoon_Online_Judge | 9b289ea27440c150ef34372dc91e6f92f4102659 | f3a4670ea2b6ee81fa4b1bdcad3412cb995e64f2 | refs/heads/master | 2023-05-26T16:54:39.643360 | 2023-05-23T04:07:08 | 2023-05-23T04:07:08 | 119,958,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def get_prime_list(n):
sieve = [True] * n
m = int(n ** 0.5)
for i in range(2, m + 1):
if sieve[i] == True:
for j in range(i+i, n, i):
sieve[j] = False
return [i for i in range(2, n) if sieve[i] == True]
def isGood(P, primeList):
for i in primeList:
if P % i == 0:
return ('BAD ' + str(i))
return 'GOOD'
P, K = map(int, input().split())
primeList = get_prime_list(K)
result = isGood(P, primeList)
print(result)
| [
"[email protected]"
] | |
c16aabe7c644a2982c6ea02d45ae4030dc12cb68 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/docutils-0.14/test/test_dependencies.py | 298e8a43bbe4945559d1c54dd12072619e493bde | [
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-3.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-proprietary-license"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 4,961 | py | #! /usr/bin/env python
# $Id: test_dependencies.py 8059 2017-04-19 16:47:35Z milde $
# Author: Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Test module for the --record-dependencies option.
"""
import os.path
import unittest
import sys
import DocutilsTestSupport # must be imported before docutils
import docutils.core
import docutils.utils
import docutils.io
from docutils.parsers.rst.directives.images import PIL
# docutils.utils.DependencyList records POSIX paths,
# i.e. "/" as a path separator even on Windows (not os.path.join).
paths = {'include': u'data/include.txt', # included rst file
'raw': u'data/raw.txt', # included raw "HTML file"
'scaled-image': u'../docs/user/rst/images/biohazard.png',
'figure-image': u'../docs/user/rst/images/title.png',
'stylesheet': u'data/stylesheet.txt',
}
class RecordDependenciesTests(unittest.TestCase):
def get_record(self, **settings):
recordfile = 'record.txt'
recorder = docutils.utils.DependencyList(recordfile)
# (Re) create the record file by running a conversion:
settings.setdefault('source_path',
os.path.join('data', 'dependencies.txt'))
settings.setdefault('settings_overrides', {})
settings['settings_overrides'].update(_disable_config=True,
record_dependencies=recorder)
docutils.core.publish_file(destination=DocutilsTestSupport.DevNull(),
**settings)
recorder.close()
# Read the record file:
record = docutils.io.FileInput(source_path=recordfile,
encoding='utf8')
return record.read().splitlines()
def test_dependencies(self):
# Note: currently, raw input files are read (and hence recorded) while
# parsing even if not used in the chosen output format.
# This should change (see parsers/rst/directives/misc.py).
keys = ['include', 'raw']
if PIL:
keys += ['figure-image']
expected = [paths[key] for key in keys]
record = self.get_record(writer_name='xml')
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_dependencies_html(self):
keys = ['include', 'raw']
if PIL:
keys += ['figure-image', 'scaled-image']
expected = [paths[key] for key in keys]
# stylesheets are tested separately in test_stylesheet_dependencies():
so = {'stylesheet_path': None, 'stylesheet': None}
record = self.get_record(writer_name='html', settings_overrides=so)
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_dependencies_latex(self):
# since 0.9, the latex writer records only really accessed files, too.
# Note: currently, raw input files are read (and hence recorded) while
# parsing even if not used in the chosen output format.
# This should change (see parsers/rst/directives/misc.py).
keys = ['include', 'raw']
if PIL:
keys += ['figure-image']
expected = [paths[key] for key in keys]
record = self.get_record(writer_name='latex')
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_csv_dependencies(self):
try:
import csv
csvsource = os.path.join('data', 'csv_dep.txt')
self.assertEqual(self.get_record(source_path=csvsource),
['data/csv_data.txt'])
except ImportError:
pass
def test_stylesheet_dependencies(self):
stylesheet = paths['stylesheet']
so = {'stylesheet_path': paths['stylesheet'],
'stylesheet': None}
so['embed_stylesheet'] = False
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet not in record,
'%r should not be in %r' % (stylesheet, record))
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet not in record,
'%r should not be in %r' % (stylesheet, record))
so['embed_stylesheet'] = True
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet in record,
'%r should be in %r' % (stylesheet, record))
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet in record,
'%r should be in %r' % (stylesheet, record))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
486034fb7355dde440f6d12c08d49916145a742b | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/argo/models/io_k8s_api_extensions_v1beta1_daemon_set_status.py | 28f99b9e0ea5351eb392974c3994672409b1adb9 | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,114 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from argo.models.io_k8s_api_extensions_v1beta1_daemon_set_condition import IoK8sApiExtensionsV1beta1DaemonSetCondition # noqa: F401,E501
class IoK8sApiExtensionsV1beta1DaemonSetStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collision_count': 'int',
'conditions': 'list[IoK8sApiExtensionsV1beta1DaemonSetCondition]',
'current_number_scheduled': 'int',
'desired_number_scheduled': 'int',
'number_available': 'int',
'number_misscheduled': 'int',
'number_ready': 'int',
'number_unavailable': 'int',
'observed_generation': 'int',
'updated_number_scheduled': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_number_scheduled': 'currentNumberScheduled',
'desired_number_scheduled': 'desiredNumberScheduled',
'number_available': 'numberAvailable',
'number_misscheduled': 'numberMisscheduled',
'number_ready': 'numberReady',
'number_unavailable': 'numberUnavailable',
'observed_generation': 'observedGeneration',
'updated_number_scheduled': 'updatedNumberScheduled'
}
def __init__(self, collision_count=None, conditions=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None): # noqa: E501
"""IoK8sApiExtensionsV1beta1DaemonSetStatus - a model defined in Swagger""" # noqa: E501
self._collision_count = None
self._conditions = None
self._current_number_scheduled = None
self._desired_number_scheduled = None
self._number_available = None
self._number_misscheduled = None
self._number_ready = None
self._number_unavailable = None
self._observed_generation = None
self._updated_number_scheduled = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
self.current_number_scheduled = current_number_scheduled
self.desired_number_scheduled = desired_number_scheduled
if number_available is not None:
self.number_available = number_available
self.number_misscheduled = number_misscheduled
self.number_ready = number_ready
if number_unavailable is not None:
self.number_unavailable = number_unavailable
if observed_generation is not None:
self.observed_generation = observed_generation
if updated_number_scheduled is not None:
self.updated_number_scheduled = updated_number_scheduled
@property
def collision_count(self):
"""Gets the collision_count of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
Represents the latest available observations of a DaemonSet's current state. # noqa: E501
:return: The conditions of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: list[IoK8sApiExtensionsV1beta1DaemonSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
Represents the latest available observations of a DaemonSet's current state. # noqa: E501
:param conditions: The conditions of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: list[IoK8sApiExtensionsV1beta1DaemonSetCondition]
"""
self._conditions = conditions
@property
def current_number_scheduled(self):
"""Gets the current_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The current_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._current_number_scheduled
@current_number_scheduled.setter
def current_number_scheduled(self, current_number_scheduled):
"""Sets the current_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param current_number_scheduled: The current_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
if current_number_scheduled is None:
raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`") # noqa: E501
self._current_number_scheduled = current_number_scheduled
@property
def desired_number_scheduled(self):
"""Gets the desired_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The desired_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._desired_number_scheduled
@desired_number_scheduled.setter
def desired_number_scheduled(self, desired_number_scheduled):
"""Sets the desired_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param desired_number_scheduled: The desired_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
if desired_number_scheduled is None:
raise ValueError("Invalid value for `desired_number_scheduled`, must not be `None`") # noqa: E501
self._desired_number_scheduled = desired_number_scheduled
@property
def number_available(self):
"""Gets the number_available of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:return: The number_available of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_available
@number_available.setter
def number_available(self, number_available):
"""Sets the number_available of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:param number_available: The number_available of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
self._number_available = number_available
@property
def number_misscheduled(self):
"""Gets the number_misscheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The number_misscheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_misscheduled
@number_misscheduled.setter
def number_misscheduled(self, number_misscheduled):
"""Sets the number_misscheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param number_misscheduled: The number_misscheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
if number_misscheduled is None:
raise ValueError("Invalid value for `number_misscheduled`, must not be `None`") # noqa: E501
self._number_misscheduled = number_misscheduled
@property
def number_ready(self):
"""Gets the number_ready of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. # noqa: E501
:return: The number_ready of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_ready
@number_ready.setter
def number_ready(self, number_ready):
"""Sets the number_ready of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. # noqa: E501
:param number_ready: The number_ready of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
if number_ready is None:
raise ValueError("Invalid value for `number_ready`, must not be `None`") # noqa: E501
self._number_ready = number_ready
@property
def number_unavailable(self):
"""Gets the number_unavailable of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:return: The number_unavailable of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_unavailable
@number_unavailable.setter
def number_unavailable(self, number_unavailable):
"""Sets the number_unavailable of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:param number_unavailable: The number_unavailable of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
self._number_unavailable = number_unavailable
@property
def observed_generation(self):
"""Gets the observed_generation of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The most recent generation observed by the daemon set controller. # noqa: E501
:return: The observed_generation of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The most recent generation observed by the daemon set controller. # noqa: E501
:param observed_generation: The observed_generation of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def updated_number_scheduled(self):
"""Gets the updated_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
The total number of nodes that are running updated daemon pod # noqa: E501
:return: The updated_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_number_scheduled
@updated_number_scheduled.setter
def updated_number_scheduled(self, updated_number_scheduled):
"""Sets the updated_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus.
The total number of nodes that are running updated daemon pod # noqa: E501
:param updated_number_scheduled: The updated_number_scheduled of this IoK8sApiExtensionsV1beta1DaemonSetStatus. # noqa: E501
:type: int
"""
self._updated_number_scheduled = updated_number_scheduled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IoK8sApiExtensionsV1beta1DaemonSetStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoK8sApiExtensionsV1beta1DaemonSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
7cc8883ebb5f0dd25746047288ce32ce7fa4947c | 72ec201effe17c3875f3d26ab98d6e56f808b0ac | /aoomuki_comp/app/migrations/0031_auto_20210118_1549.py | aa56a163b40bbbd5237d06285a7476af7d32fbd8 | [
"MIT"
] | permissive | Kamelgasmi/aoomuki_competences | 549f9c9167f82d084ef6048cec72e87fe90f4c35 | e02f3546f7efb54b825dbcfab968296607775903 | refs/heads/master | 2023-04-06T17:48:35.921460 | 2021-04-16T08:49:15 | 2021-04-16T08:49:15 | 330,929,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # Generated by Django 2.1 on 2021-01-18 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0030_collaborater_interest'),
]
operations = [
migrations.AddField(
model_name='competence',
name='interest',
field=models.ManyToManyField(blank=True, related_name='competences', to='app.ListInterest'),
),
migrations.AddField(
model_name='competence',
name='level',
field=models.ManyToManyField(blank=True, related_name='competences', to='app.ListLevel'),
),
]
| [
"[email protected]"
] | |
0be49ab140e271f258fe295a477f0b5a297ad08b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/operations/_favorite_processes_operations.py | 8b818b20a71c6821823558790cdd0752dc84b1e3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 29,342 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, test_base_account_name: str, package_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-01-preview")
) # type: Literal["2022-04-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"testBaseAccountName": _SERIALIZER.url("test_base_account_name", test_base_account_name, "str"),
"packageName": _SERIALIZER.url("package_name", package_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_request(
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-01-preview")
) # type: Literal["2022-04-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"testBaseAccountName": _SERIALIZER.url("test_base_account_name", test_base_account_name, "str"),
"packageName": _SERIALIZER.url("package_name", package_name, "str"),
"favoriteProcessResourceName": _SERIALIZER.url(
"favorite_process_resource_name", favorite_process_resource_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-01-preview")
) # type: Literal["2022-04-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"testBaseAccountName": _SERIALIZER.url("test_base_account_name", test_base_account_name, "str"),
"packageName": _SERIALIZER.url("package_name", package_name, "str"),
"favoriteProcessResourceName": _SERIALIZER.url(
"favorite_process_resource_name", favorite_process_resource_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-01-preview")
) # type: Literal["2022-04-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"testBaseAccountName": _SERIALIZER.url("test_base_account_name", test_base_account_name, "str"),
"packageName": _SERIALIZER.url("package_name", package_name, "str"),
"favoriteProcessResourceName": _SERIALIZER.url(
"favorite_process_resource_name", favorite_process_resource_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class FavoriteProcessesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.testbase.TestBase`'s
:attr:`favorite_processes` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, test_base_account_name: str, package_name: str, **kwargs: Any
) -> Iterable["_models.FavoriteProcessResource"]:
"""Lists the favorite processes for a specific package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FavoriteProcessResource or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.testbase.models.FavoriteProcessResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-04-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FavoriteProcessListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
package_name=package_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FavoriteProcessListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses"} # type: ignore
@overload
def create(
self,
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
parameters: _models.FavoriteProcessResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FavoriteProcessResource:
"""Create or replace a favorite process for a Test Base Package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:param favorite_process_resource_name: The resource name of a favorite process in a package. If
the process name contains characters that are not allowed in Azure Resource Name, we use
'actualProcessName' in request body to submit the name. Required.
:type favorite_process_resource_name: str
:param parameters: Parameters supplied to create a favorite process in a package. Required.
:type parameters: ~azure.mgmt.testbase.models.FavoriteProcessResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FavoriteProcessResource or the result of cls(response)
:rtype: ~azure.mgmt.testbase.models.FavoriteProcessResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create(
self,
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FavoriteProcessResource:
"""Create or replace a favorite process for a Test Base Package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:param favorite_process_resource_name: The resource name of a favorite process in a package. If
the process name contains characters that are not allowed in Azure Resource Name, we use
'actualProcessName' in request body to submit the name. Required.
:type favorite_process_resource_name: str
:param parameters: Parameters supplied to create a favorite process in a package. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FavoriteProcessResource or the result of cls(response)
:rtype: ~azure.mgmt.testbase.models.FavoriteProcessResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create(
self,
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
parameters: Union[_models.FavoriteProcessResource, IO],
**kwargs: Any
) -> _models.FavoriteProcessResource:
"""Create or replace a favorite process for a Test Base Package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:param favorite_process_resource_name: The resource name of a favorite process in a package. If
the process name contains characters that are not allowed in Azure Resource Name, we use
'actualProcessName' in request body to submit the name. Required.
:type favorite_process_resource_name: str
:param parameters: Parameters supplied to create a favorite process in a package. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.testbase.models.FavoriteProcessResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FavoriteProcessResource or the result of cls(response)
:rtype: ~azure.mgmt.testbase.models.FavoriteProcessResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-04-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FavoriteProcessResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "FavoriteProcessResource")
request = build_create_request(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
package_name=package_name,
favorite_process_resource_name=favorite_process_resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("FavoriteProcessResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
**kwargs: Any
) -> None:
"""Deletes a favorite process for a specific package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:param favorite_process_resource_name: The resource name of a favorite process in a package. If
the process name contains characters that are not allowed in Azure Resource Name, we use
'actualProcessName' in request body to submit the name. Required.
:type favorite_process_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-04-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
package_name=package_name,
favorite_process_resource_name=favorite_process_resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
test_base_account_name: str,
package_name: str,
favorite_process_resource_name: str,
**kwargs: Any
) -> _models.FavoriteProcessResource:
"""Gets a favorite process for a Test Base Package.
:param resource_group_name: The name of the resource group that contains the resource.
Required.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account. Required.
:type test_base_account_name: str
:param package_name: The resource name of the Test Base Package. Required.
:type package_name: str
:param favorite_process_resource_name: The resource name of a favorite process in a package. If
the process name contains characters that are not allowed in Azure Resource Name, we use
'actualProcessName' in request body to submit the name. Required.
:type favorite_process_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FavoriteProcessResource or the result of cls(response)
:rtype: ~azure.mgmt.testbase.models.FavoriteProcessResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-04-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FavoriteProcessResource]
request = build_get_request(
resource_group_name=resource_group_name,
test_base_account_name=test_base_account_name,
package_name=package_name,
favorite_process_resource_name=favorite_process_resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("FavoriteProcessResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/favoriteProcesses/{favoriteProcessResourceName}"} # type: ignore
| [
"[email protected]"
] | |
ee6bdb9c84c524d31881c748f2c22d11a57d93ab | 1fb60677cf35066e631d618ec002d48e21aeda7a | /profil3r/modules/domain/domain.py | cfe36160927867d187c6c8c800da38fbc2629d67 | [
"MIT"
] | permissive | dannymas/Profil3r | 32c57b7f17f1c0718c7486b89ff90efed616afba | 7e3ed9d832c5bdc5a55516b5a60df4f34524d41a | refs/heads/main | 2023-04-24T23:45:27.699593 | 2021-05-03T19:56:50 | 2021-05-03T19:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | import requests
import time
class Domain:
def __init__(self, config, permutations_list):
# 100 ms
self.delay = config['plateform']['domain']['rate_limit'] / 1000
# {permutation}.{tld}
self.format = config['plateform']['domain']['format']
# Top level domains
self.tld = config['plateform']['domain']['TLD']
# domains are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# domain
self.type = config['plateform']['domain']['type']
# Generate all potential domains names
def possible_domains(self):
possible_domains = []
# search all TLD (.com, .net, .org...), you can add more in the config.json file
for domain in self.tld:
for permutation in self.permutations_list:
possible_domains.append(self.format.format(
permutation = permutation,
domain = domain
))
return possible_domains
def search(self):
domains_lists = {
"type": self.type,
"accounts": []
}
possible_domains_list = self.possible_domains()
for domain in possible_domains_list:
try:
r = requests.head(domain, timeout=5)
except requests.ConnectionError:
pass
# If the domain exists
if r.status_code < 400:
domains_lists ["accounts"].append({"value": domain})
time.sleep(self.delay)
return domains_lists | [
"[email protected]"
] | |
beadf6d20bb969ef983bf3ec9d9c14a4d1743966 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02407/s171824787.py | 1e477cdfe641dd6789e56cca6b240f49558d20ad | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | num = int(input())
line = input().split(" ")
line.reverse()
print(*line)
| [
"[email protected]"
] | |
5dbe73547db33cba83f0d8333afa2a3a9d171a97 | 782e9c3d4391750463c0a5b05878d2415d4a06a3 | /db/category/rest.py | 27e7f8bca3e7e2391a3b0314c980acde624232fa | [] | no_license | powered-by-wq/insideoutside.wq.io | 23f94e671eeb5c2aabd822c8cd24cf689841556f | c248529e67df3106ddd5b36b00ae10b3519e1e0f | refs/heads/master | 2021-01-22T04:27:50.242369 | 2017-06-21T20:55:57 | 2017-06-21T20:56:33 | 92,464,066 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from wq.db import rest
from .models import Category
rest.router.register_model(
Category,
fields="__all__",
)
| [
"[email protected]"
] | |
c55f523611483c34014135fc431d81dfc7d59241 | 7c7a258f52a6a2a1710507b3543a0c082933250b | /webscaff/commands/run/dj.py | cf5b4d01d7b8c54242efc464153dd9b9a997fd2d | [
"BSD-3-Clause"
] | permissive | idlesign/webscaff | c487407da7a1a89bbfb52d803b219b49e15a8c18 | 407bbd3e1870aaab80036b3131054599b58072de | refs/heads/master | 2022-12-25T06:42:42.009768 | 2022-12-09T14:56:30 | 2022-12-09T14:56:30 | 211,610,572 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | from pathlib import Path
from invoke import task
from .fs import create_dir
from ..sys import fs as sys_fs
from ..utils import link_config, echo
@task
def manage(ctx, cmd):
"""Runs Django manage command(s).
:param str|list cmd:
"""
if not isinstance(cmd, list):
cmd = [cmd]
project_name = ctx.project.name
for command in cmd:
ctx.sudo(f'{project_name} {command}', pty=True, user=project_name)
def rollout(ctx):
"""Rolls out migrations and statics."""
migrate(ctx)
manage(ctx, 'collectstatic --noinput')
@task
def migrate(ctx):
"""Runs Django manage command for project to launch migrations."""
manage(ctx, 'migrate')
def create_superuser(ctx):
"""Runs Django manage command for project to create a superuser.
Tries to get e-mail from settings, and username from e-mail.
"""
command = 'createsuperuser'
username = ''
email = ctx.project.email or ''
if email:
username = email.partition('@')[0]
command += f' --email {email} --username {username}'
echo('\nCreating Django superuser %s ...' % f'[{username}]' if username else '')
manage(ctx, command)
def bootstrap(ctx):
"""Puts Django production settings file to remote."""
# Create media and static directories.
dir_state = ctx.paths.remote.project.state
create_dir(ctx, dir_state.static)
create_dir(ctx, dir_state.media)
link_config(
ctx,
title='Django',
name_local='env_production.py',
name_remote='env_production.py',
dir_remote_confs=Path(ctx.paths.remote.project.base) / 'settings'
)
migrate(ctx)
create_superuser(ctx)
def dump(ctx, target_dir):
"""Dumps Django related stuff into a target directory."""
sys_fs.gzip_dir(
ctx,
ctx.paths.remote.project.state.media,
target_dir,
)
def restore(ctx, source_dir):
"""Restores Django related stuff from a source directory."""
sys_fs.gzip_extract(
ctx,
archive=source_dir / 'media.tar.gz',
target_dir=ctx.paths.remote.project.state.media
)
| [
"[email protected]"
] | |
ce9cb02dae46b73e35c4b0bbc4bb10048d9627bb | 3114430ce15c18281117459e26eea4b774e3998a | /day5/blog/views.py | 48de02fcbfba9df1b78cc55c5c432bd1cee81630 | [
"MIT"
] | permissive | Joseamica/Easily-written-Django | c02e7333e84ca2257b7b8bfae3f6732898c5000a | 0b746638751702c453db9490fe29ef6d34e4a3bc | refs/heads/master | 2021-05-27T20:25:41.341149 | 2014-05-25T08:25:53 | 2014-05-25T08:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from django.shortcuts import render
from django.http import HttpResponse
def me(request):
return HttpResponse('Hi :)')
| [
"[email protected]"
] | |
78f74a8eb59a7952d622fd4781ae53f2185f223d | 0f923ef8d024915edbe4088ce0da24ee952cd63e | /venv/Scripts/pip3-script.py | 4c1d9b27d8335a7cb149c92842c69228cf3051f7 | [] | no_license | Rpaul88/POM_Naukrii | 95c53cb1d314eca8798a07f5b6be01e34aaded0e | f399248a29c8e08bf173864f8080828ed34184e4 | refs/heads/master | 2020-06-18T03:01:25.009516 | 2019-07-10T06:26:23 | 2019-07-10T06:26:23 | 196,144,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | #!"C:\Users\Guest User\PycharmProjects\POM_Practice1\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
d8ff7cb47e5c9e6b1480a7b97b390f610bd742e8 | 9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15 | /186 Reverse Words in a String II.py | 5d43c1616b0c5b156304fff51182135263cb7232 | [
"MIT"
] | permissive | Aminaba123/LeetCode | 178ed1be0733cc7390f30e676eb47cc7f900c5b2 | cbbd4a67ab342ada2421e13f82d660b1d47d4d20 | refs/heads/master | 2020-04-20T10:40:00.424279 | 2019-01-31T08:13:58 | 2019-01-31T08:13:58 | 168,795,374 | 1 | 0 | MIT | 2019-02-02T04:50:31 | 2019-02-02T04:50:30 | null | UTF-8 | Python | false | false | 789 | py | """
Premium Question
"""
__author__ = 'Daniel'
class Solution(object):
def reverseWords(self, s):
"""
in-place without allocating extra space
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
self.reverse(s, 0, len(s))
i = 0
while i < len(s):
j = i+1
while j < len(s) and s[j] != " ":
j += 1
self.reverse(s, i, j)
i = j+1
def reverse(self, s, start, end):
i = start
j = end
while i < j-1:
s[i], s[j-1] = s[j-1], s[i]
i += 1
j -= 1
if __name__ == "__main__":
lst = list("the sky is blue")
Solution().reverseWords(lst)
assert "".join(lst) == "blue is sky the" | [
"[email protected]"
] | |
472e0f1f2bcbc12ab46ecc36e9b0b889f774b546 | d8ea695288010f7496c8661bfc3a7675477dcba0 | /examples/sound/baidu_to_mp3.py | 18dc62be42aa2966c7e393740f7042699d421b77 | [] | no_license | dabolau/demo | de9c593dabca26144ef8098c437369492797edd6 | 212f4c2ec6b49baef0ef5fcdee6f178fa21c5713 | refs/heads/master | 2021-01-17T16:09:48.381642 | 2018-10-08T10:12:45 | 2018-10-08T10:12:45 | 90,009,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from aip import AipSpeech
""" 你的百度 APPID AK SK
https://console.bce.baidu.com/ai/#/ai/speech/app/list 应用列表
http://ai.baidu.com/docs#/TTS-Online-Python-SDK/top API
"""
APP_ID = '9288864'
API_KEY = '7OOA9UFvHwC3pplzPZnqQ9pF'
SECRET_KEY = '4ea30a42379528355abb0fa6e31516a2'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
text = input('请输入要转换为语音的文本:')
result = client.synthesis(text, 'zh', 1, {
'vol': 5,
})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
with open('auido.wav', 'wb') as f:
f.write(result) | [
"[email protected]"
] | |
99c12bbc5aaaf4c9ef74e2e7c22addb524520f57 | 3ab7e700203054e104e6c60295c0a8455bc388b1 | /i_entity_extractor/extractors/financing_events/financing_events_extractor.py | 5aed8640d1b6ed90cab151ea36dcd81fd788ed26 | [] | no_license | youfeng243/crawler | e8114ab5ef68bb9fd7e4296452d63b53d3d4080a | 59eaabef94de67444f09cfe5b25d481034d10f29 | refs/heads/master | 2021-07-11T10:12:27.946819 | 2017-10-12T11:35:27 | 2017-10-12T11:35:27 | 106,583,181 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,315 | py | # coding=utf8
import sys
import re
import time
sys.path.append('../../')
sys.path.append('../../../')
from i_entity_extractor.extractors.default.default_extractor import DefaultExtractor
from i_entity_extractor.common_parser_lib import etl_tool
import copy
class FinancingEventsExtractor(DefaultExtractor):
def __init__(self, topic_info, log):
DefaultExtractor.__init__(self, topic_info, log)
def format_extract_data(self, extract_data, topic_id):
"""实体解析抽取数据"""
item = copy.deepcopy(extract_data)
proportion_share_transfer = item.get('proportion_share_transfer', '')
item['proportion_share_transfer'] = etl_tool.regex_remove_na(proportion_share_transfer).strip()
# 处理行业 行业按级拆分
industry = item.get('industry', '')
lst_industry = etl_tool.regex_chinese(industry)
item['industry'] = '/'.join(lst_industry)
# 处理省市县的拆分
lst_location = etl_tool.regex_chinese(item.get('region', ''))
map_region = etl_tool.map_region(lst_location)
for region_key in map_region.keys():
item[region_key] = map_region[region_key]
item['region'] = ''.join(lst_location)
# 融资金额
amount = item.get('amount', '')
res_amount = self.parser_tool.money_parser.new_trans_money(amount, u"万", False)
item['amounts'] = res_amount[0]
item['units'] = res_amount[1]
item['currency'] = res_amount[2] if res_amount[2] else u'人民币'
# 发布时间
public_date = item.get(u'public_date', u'')
public_date = etl_tool.str2datetime(public_date, '%Y-%m-%d %H:%M:%S')
item["public_date"] = '' if not public_date else public_date.strftime("%Y-%m-%d")
return item
if __name__ == '__main__':
import pytoml
import sys
import time
from common.log import log
sys.path.append('../../')
with open('../../entity.toml', 'rb') as config:
conf = pytoml.load(config)
log.init_log(conf, console_out=conf['logger']['console'])
conf['log'] = log
topic_id = 33
from i_entity_extractor.entity_extractor_route import EntityExtractorRoute
from i_entity_extractor.common_parser_lib.mongo import MongDb
import json
route = EntityExtractorRoute(conf)
topic_info = route.all_topics.get(topic_id, None)
begin_time = time.time()
obj = FinancingEventsExtractor(topic_info, log)
extract_data = {
"_site_record_id": "http://need.pedata.cn/265460.html",
"amount": "",
"amounts": "NaN",
"city": "",
"currency": "",
"describe": "投资界消息,拟融资企业无锡睿泰科技有限公司参加“无锡服务外包企业投融资合作对接洽谈会”。",
"district": "",
"enterprise_full_name": "无锡睿泰科技有限公司",
"enterprise_short_name": "",
"enterprise_short_name_en": "",
"enterprise_site": "",
"industry": "软件外包",
"information_sources": "投资界资讯",
"innotree_score": "",
"mode": "私募融资",
"phone": "",
"project_highlights": "",
"project_name": "睿泰科技",
"proportion_share_transfer": "",
"province": "",
"public_date": "2011-11-01",
"region": "北京 · 朝阳区",
"round": "A",
"source_site": "私募通",
"tag": [],
"units": "万元"
}
entity_data = obj.format_extract_data(extract_data,topic_id)
print "-----------------------------"
for key, value in entity_data.items():
if isinstance(value, list):
for v in value:
print key, ":", v
elif isinstance(value, dict):
for key2, value2 in value.items():
print key2, ":", value2
else:
print key, ":", value
keys = ['units', 'source_site', 'describe', 'currency', 'tag', 'city', 'enterprise_short_name_en', 'district', 'amounts', 'innotree_score', 'public_date', 'founders', 'province', 'project_name', 'phone', 'enterprise_full_name', 'information_sources', 'proportion_share_transfer', 'enterprise_short_name', 'industry', 'region', 'enterprise_site', 'amount', 'project_highlights', 'mode', 'round', '_in_time', '_src', '_record_id', '_id']
transfer_data(keys, 'financing_events')
print keys
| [
"[email protected]"
] | |
cb98b3436e683f576b7be3d1ab7586b93b861a14 | 631b26854aa790915fc3ee65dc68a7c9bd1fed5b | /2048 Game/main.py | 77358e640f0730a2d973005d69316862e6533d66 | [] | no_license | Aniketthani/Kivy-Projects | 6e7dc1c079255697acc812497a481f1ffdeada54 | 0aec55f10d82b8f756a132fdc833c5aaab695a6f | refs/heads/main | 2023-04-29T10:17:15.262192 | 2021-05-05T17:29:02 | 2021-05-05T17:29:02 | 341,630,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,085 | py | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import BorderImage,Color
from kivy.core.window import Window,Keyboard
from kivy.utils import get_color_from_hex
from kivy.properties import NumericProperty,ListProperty
import random
from kivy.animation import Animation
from kivy.vector import Vector
from kivy.uix.label import Label
from kivy.uix.button import Button
spacing=10
#27:38
key_vectors={Keyboard.keycodes['up']:(0,1),Keyboard.keycodes['right']:(1,0),Keyboard.keycodes['down']:(0,-1),Keyboard.keycodes['left']:(-1,0)}
colors=['EEE4DA','EDE0C8','F2B179','F59563','F6765F','F65E3B','EDCF72','EDCC61','EDC850','EDC53F','EDC22E']
tile_colors={2**i:color for i,color in enumerate(colors,start=1)}
def all_cells(flip_x=False,flip_y=False):
for x in (reversed(range(4) if flip_x else range(4))):
for y in (reversed(range(4)) if flip_y else range(4)):
yield(x,y)
class Tile(Widget):
font_size=NumericProperty(24)
number=NumericProperty(2)
color=ListProperty(get_color_from_hex(tile_colors[2]))
number_color=ListProperty(get_color_from_hex('776E65'))
def __init__(self,number=2,**kwargs):
super(Tile,self).__init__(**kwargs)
self.font_size=self.width*0.5
self.number=number
self.update_colors()
def update_colors(self):
self.color=get_color_from_hex(tile_colors[self.number])
if self.number>4:
self.number_color=get_color_from_hex('F9F6F2')
def resize(self,pos,size):
self.pos=pos
self.size=size
self.font_size=0.5*self.width
class Board(Widget):
game_won=False
moving=False
b=None
def is_deadlocked(self):
for x,y in all_cells():
if self.b[x][y] is None:
return False
number=self.b[x][y].number
if (self.can_combine(x+1,y,number) or self.can_combine(x,y+1,number)):
return False
return True
def can_combine(self,board_x,board_y,number):
return (self.valid_cells(board_x,board_y) and self.b[board_x][board_y] is not None and self.b[board_x][board_y].number==number)
def on_touch_up(self,touch):
v=Vector(touch.pos) - Vector(touch.opos) #touch.opos is initial position of touch
if v.length() <20: #discarding small touches or taps
return
if abs(v.x) > abs(v.y):
v.y=0
else:
v.x=0
v=v.normalize()
v.x=int(v.x)
v.y=int(v.y)
self.move(*v)
def valid_cells(self,board_x,board_y):
return(board_x>=0 and board_y>=0 and board_x<=3 and board_y<=3)
def can_move(self,board_x,board_y):
return(self.valid_cells(board_x,board_y) and self.b[board_x][board_y] is None)
def move(self,dir_x,dir_y):
if self.game_won:
return
if self.moving:
return
for board_x,board_y in all_cells(dir_x>0,dir_y>0):
tile=self.b[board_x][board_y]
if not tile:
continue
x,y=board_x,board_y
while self.can_move(x+dir_x,y+dir_y):
self.b[x][y]=None
x+=dir_x
y+=dir_y
self.b[x][y]=tile
if self.can_combine(x+dir_x,y+dir_y,tile.number):
self.b[x][y]=None
x+=dir_x
y+=dir_y
self.remove_widget(self.b[x][y])
self.b[x][y]=tile
self.b[x][y].number *=2
self.b[x][y].update_colors()
if self.b[x][y].number==2048:
message_box=self.parent.ids.message_box
message_box.add_widget(Label(text="Congratulation !!You Won The Game",font_size=20,color=(0,0,0,1),bold=True))
message_box.add_widget(Button(text="New Game", font_size=20,on_press=app.new_game))
self.game_won=True
if x==board_x and y==board_y:
continue
anim=Animation(pos=self.cell_pos(x,y),duration=0.25,transition="linear")
if not self.moving:
anim.on_complete=self.new_tile
self.moving=True
anim.start(tile)
def new_tile(self,*args):
empty_cells=[(x,y) for x,y in all_cells() if self.b[x][y]==None]
x,y=random.choice(empty_cells)
tile=Tile(pos=self.cell_pos(x,y),size=self.cell_size)
self.b[x][y]=tile
self.add_widget(tile)
if len(empty_cells)==1 and self.is_deadlocked():
message_box=self.parent.ids.message_box
message_box.add_widget(Label(text="Game over (board is deadlocked)",font_size=20,color=(0,0,0,1),bold=True))
message_box.add_widget(Button(text="New Game", font_size=20,on_press=app.new_game))
self.moving=False
def reset(self,*args):
self.b=[[None for i in range(4)] for j in range(4)]
self.new_tile()
self.new_tile()
def __init__(self, **kwargs):
super(Board, self).__init__(**kwargs)
self.resize()
def resize(self, *args):
self.cell_size=(0.25*(self.width-5*spacing),)*2
self.canvas.before.clear()
with self.canvas.before:
BorderImage(pos=self.pos, size=self.size, source='images/board.png')
Color(*get_color_from_hex("ccc0b4"))
for board_x,board_y in all_cells():
BorderImage(pos=self.cell_pos(board_x,board_y),size=self.cell_size,source="images/cell.png")
if not self.b:
return
for board_x,board_y in all_cells():
tile=self.b[board_x][board_y]
if tile:
tile.resize(pos=self.cell_pos(board_x,board_y),size=self.cell_size)
def on_key_down(self,window,key,*args):
if key in key_vectors:
self.move(*key_vectors[key])
def cell_pos(self,board_x,board_y):
return (self.x + spacing + (self.cell_size[0] + spacing)*board_x , self.y + spacing + (self.cell_size[1] + spacing)*board_y)
on_pos = resize
on_size = resize
class GameApp(App):
def on_start(self):
board = self.root.ids.board
board.reset()
Window.bind(on_key_down=board.on_key_down)
def new_game(self,*args):
message_box=self.root.ids.message_box
m_children=message_box.children[:]
for w in m_children:
message_box.remove_widget(w)
board=self.root.ids.board
b_children=board.children[:]
for wid in b_children:
board.remove_widget(wid)
board.b=[[None for i in range(4)] for j in range(4)]
board.new_tile()
board.new_tile()
self.game_won=False
if __name__ == '__main__':
Window.clearcolor=(1,1,1,1)
app=GameApp()
app.run()
| [
"[email protected]"
] | |
cee91f204aa9e2228d918ecb5502764b58f7d261 | 9912570da4f0fc380c3eece7797b8deb5a4240c1 | /colcon_core/topological_order.py | 73d49ac0ed96cfd3cfa8a06bbc31696f2fe91b63 | [
"Apache-2.0"
] | permissive | ruffsl/colcon-core | 2c26aa3d71ed4a1178e0e94e0371d4a13fab9aff | 8dc3d7ec98e36397f349ede03e487da0cad336f4 | refs/heads/master | 2021-06-25T13:10:24.936062 | 2019-10-17T20:44:19 | 2019-10-17T20:44:19 | 215,967,652 | 0 | 0 | Apache-2.0 | 2019-10-18T07:38:14 | 2019-10-18T07:38:12 | null | UTF-8 | Python | false | false | 3,071 | py | # Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from collections import OrderedDict
import copy
from colcon_core.package_decorator import PackageDecorator
def topological_order_packages(
descriptors, direct_categories=None, recursive_categories=None,
):
"""
Order packages topologically.
:param descriptors: the package descriptors
:type descriptors: set of
:py:class:`colcon_core.package_descriptor.PackageDescriptor`
:returns: list of package decorators
:rtype: list of :py:class:`colcon_core.package_decorator.PackageDecorator`
"""
# get recursive dependencies for all packages
queued = set()
for descriptor in descriptors:
rec_deps = descriptor.get_recursive_dependencies(
descriptors,
direct_categories=direct_categories,
recursive_categories=recursive_categories)
d = _PackageDependencies(
descriptor=descriptor,
recursive_dependencies=rec_deps,
remaining_dependencies=copy.deepcopy(rec_deps),
)
queued.add(d)
ordered = OrderedDict()
while len(ordered) < len(descriptors):
# remove dependencies on already ordered packages
ordered_names = {descriptor.name for descriptor in ordered.keys()}
for q in queued:
q.remaining_dependencies -= ordered_names
# find all queued packages without remaining dependencies
ready = list(filter(lambda q: not q.remaining_dependencies, queued))
if not ready:
lines = [
'%s: %s' % (
q.descriptor.name, sorted(q.remaining_dependencies))
for q in queued]
lines.sort()
raise RuntimeError(
'Unable to order packages topologically:\n' + '\n'.join(lines))
# order ready jobs alphabetically for a deterministic order
ready.sort(key=lambda d: d.descriptor.name)
# add all ready jobs to ordered dictionary
for r in ready:
ordered[r.descriptor] = r.recursive_dependencies
queued.remove(r)
# create ordered list of package decorators
decorators = []
ordered_keys = [descriptor.name for descriptor in ordered.keys()]
for descriptor, recursive_dependencies in ordered.items():
decorator = PackageDecorator(descriptor)
# reorder recursive dependencies according to the topological ordering
decorator.recursive_dependencies = sorted(
(d for d in recursive_dependencies if d in ordered_keys),
key=ordered_keys.index)
decorators.append(decorator)
return decorators
class _PackageDependencies:
__slots__ = (
'descriptor', 'recursive_dependencies', 'remaining_dependencies')
def __init__(
self, descriptor, recursive_dependencies, remaining_dependencies,
):
self.descriptor = descriptor
self.recursive_dependencies = recursive_dependencies
self.remaining_dependencies = remaining_dependencies
| [
"[email protected]"
] | |
12005d23ef4de33c4530ad05d94c0ad17d294d7d | 6ca3acb227e340edbee80668591e7008cc256b5b | /flask_appbuilder/security/forms.py | 4f908185ef48991a76bb2d12b026c79356b05360 | [
"BSD-3-Clause"
] | permissive | tuxskar/Flask-AppBuilder | 4c69dce5c13f85b930d5b4761945b33ffb231ef7 | 4f65bbbd7edc6e7ca7c5f62a499677565e0662e1 | refs/heads/master | 2021-01-12T21:04:22.702263 | 2014-12-17T00:20:58 | 2014-12-17T00:20:58 | 28,113,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | from wtforms import StringField, BooleanField, PasswordField
from flask.ext.wtf.recaptcha import RecaptchaField
from flask.ext.babelpkg import lazy_gettext
from wtforms.validators import DataRequired, EqualTo, Email
from ..fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from ..forms import DynamicForm
from ..validators import Unique
class LoginForm_oid(DynamicForm):
openid = StringField(lazy_gettext('openid'), validators=[DataRequired()])
username = StringField(lazy_gettext('User Name'))
remember_me = BooleanField(lazy_gettext('remember_me'), default=False)
class LoginForm_db(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()])
password = PasswordField(lazy_gettext('Password'), validators=[DataRequired()])
class ResetPasswordForm(DynamicForm):
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
class RegisterUserDBForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
recaptcha = RecaptchaField()
class RegisterUserOIDForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
recaptcha = RecaptchaField()
| [
"[email protected]"
] | |
44a597405bfe6e771c71784c3ec66d5a28942841 | 72d70d86bde200aab004ffe019b84f89f0978fd1 | /postfix_mta_sts_resolver/internal_cache.py | 30607f837ca672c5e3b9b1db6896707d403da275 | [
"MIT"
] | permissive | Kernel-Error/postfix-mta-sts-resolver | 4da8d198e449d4bb6460a19a78a64d1a5783fdf7 | 5d1fba4d45d022bdd419e4a352d8555c4c66a3a3 | refs/heads/master | 2020-05-01T16:58:06.581584 | 2019-03-28T07:19:05 | 2019-03-28T07:19:05 | 177,587,050 | 0 | 0 | MIT | 2019-03-25T12:59:44 | 2019-03-25T12:59:44 | null | UTF-8 | Python | false | false | 612 | py | import collections
class InternalLRUCache(object):
def __init__(self, capacity=10000):
self._capacity = capacity
self._cache = collections.OrderedDict()
async def get(self, key):
try:
value = self._cache.pop(key)
self._cache[key] = value
return value
except KeyError:
return None
async def set(self, key, value):
try:
self._cache.pop(key)
except KeyError:
if len(self._cache) >= self._capacity:
self._cache.popitem(last=False)
self._cache[key] = value
| [
"[email protected]"
] | |
ac825cfa60bf4b58f7ffaaeb66f3d47eb41b178a | a718de5d51c8d430e791aca6092669c04548fd64 | /Census-Analyser-master/census_analyser/stateCensusAnalyser.py | 65bb4d6c9734650262bd48799052c4630712bd47 | [] | no_license | santoshikalaskar/Basic_Advance_python_program | d0fef4134ed4b14f84ff05a3b37e1773c111a2d1 | 84df5c336d5304c3c727102194ba62417640643a | refs/heads/master | 2023-01-22T15:06:24.909145 | 2020-12-02T14:01:29 | 2020-12-02T14:01:29 | 314,511,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,858 | py | import pandas as pd
from custom_exceptions import (FileIsNotCSVTypeException,
EmptyFileException,
InvalidDelimiterException)
from abc import ABC, abstractmethod
import json
'''
StatusCensusAnalyser class will load StateCensus data
'''
class StateCensusAnalyser:
def __init__(self):
self.state = 'State'
self.population = 'Population'
self.areaInSqKm = 'AreaInSqKm'
self.densityPerSqKm = 'DensityPerSqKm'
def __repr__(self):
return self.state +','+ self.population +','+ self.areaInSqKm +','+ self.densityPerSqKm
'''
CSVState class will load data from state code csv file
'''
class CSVState:
def __init__(self):
self.srNo = 'SrNo'
self.stateName = 'StateName'
self.tin = 'TIN'
self.stateCode = 'StateCode'
def __repr__(self):
return self.srNo +','+ self.stateName +','+ self.tin +','+ self.stateCode
'''
CSVStateCensus class will inherit StateCensusAnalyser and CSVState to load data from csv file.
'''
class CSVStateCensus(StateCensusAnalyser, CSVState):
def __init__(self, file_name):
self.file_name = file_name
@property
def col_list(self):
if self.file_name == 'StateCode.csv':
col_list = repr(CSVState()).split(",")
else:
col_list = repr(StateCensusAnalyser()).split(",")
return col_list
@property
def load_CSV(self):
if self.file_name[-4:] != '.csv':
raise FileIsNotCSVTypeException
try:
df = pd.read_csv(self.file_name, usecols=self.col_list)
if df.isnull().values.any():
raise InvalidDelimiterException
return df
except pd.errors.EmptyDataError:
raise EmptyFileException
except ValueError:
return "InvalidHeader"
def iterate_df(self, dataframe): #Iterate dataframe into touples
df_list = [list(row) for row in dataframe.values]
return df_list
def number_of_records(self, dataframe): #Return Number of rows in csv or records
return len(dataframe) - 1
'''
SortData class will have all method according to all sorting method and save data into json
'''
class SortData(CSVStateCensus):
def __init__(self):
self.code_data_frame = CSVStateCensus("StateCode.csv").load_CSV
self.census_data_frame = CSVStateCensus("IndiaStateCensusData.csv").load_CSV
def __sorting_function(self,dataframe,col_name,ascending=True): #Sorting functtion
return dataframe.sort_values([col_name],ascending=ascending)
def __sort_InidaCensusData_in_alphabetical_order_in_JSON(self): #sort and returns stateCensus data according to state
sorted_df = self.__sorting_function(self.census_data_frame,"State")
sorted_df.to_json(r'IndiStateCensusData.json', orient='records')
with open('IndiStateCensusData.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_StateCode_in_stateCode_order_in_JSON(self): #sort and returns stateCode data according to Code
sorted_df = self.__sorting_function(self.code_data_frame,'StateCode')
sorted_df.to_json(r'StateCode.json', orient='records')
with open('StateCode.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_asc_population_order_in_JSON(self): #sort and returns stateCensus data according to population
sorted_df = self.__sorting_function(self.census_data_frame,'Population')
sorted_df.to_json(r'IndiStateCensusData_asc_population.json', orient='records')
with open('IndiStateCensusData_asc_population.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_asc_population_density_order_in_JSON(self): #sort and returns stateCensus data according to populationSensity
sorted_df = self.__sorting_function(self.census_data_frame,"DensityPerSqKm")
sorted_df.to_json(r'IndiStateCensusData_asc_populationDensity.json', orient='records')
with open('IndiStateCensusData_asc_populationDensity.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_desc_area_order_in_JSON(self): #sort and returns stateCensus data according to descending area Area
sorted_df = self.__sorting_function(self.census_data_frame,"AreaInSqKm",ascending=False)
sorted_df.to_json(r'IndiStateCensusData_desc_area.json', orient='records')
with open('IndiStateCensusData_desc_area.json','r') as json_file:
census = json.load(json_file)
return census
'''
Mapping class inherits SortData class and map state census with state code.
'''
class Mapping(SortData):
def __map_state_census_with_state_code_according_to_code(self):
merge_inner = pd.merge(left=self.code_data_frame, right=self.census_data_frame,left_on='StateName',right_on='State')
merged_data = merge_inner.drop(['SrNo'], axis=1)
sort_state_code = merged_data.sort_values('StateCode')
sort_state_code.to_json(r'Mapped_data_acc_to_stateCode.json', orient='records')
with open('Mapped_data_acc_to_stateCode.json','r') as map_file:
map_data = json.load(map_file)
return map_data
# file_name = "IndiaStateCensusData.csv"
# invalid_header_file = "csv_with_invalid_header.csv"
# invalid_delimiter_file = "csv_with_invalid_delimiter.csv"
# demo_empty_csv = "demo_empty.csv"
# demo_txt = "demo_empty.txt"
# code_csv = 'StateCode.csv'
# obj = CSVStateCensus(file_name)
# df = obj.load_CSV
# d = df.sort_values(['State'])
# print(d)
# s = sort_ref.sort_InidaCensusData_in_asc_population_density_order_in_JSON(df)
# print(s)
# print(sorted_df)
# print(df)
# df_list = obj.iterate_df(df)
# print(df_list)
# if df.isnull().values.any():
# print("yes")
# for index in df.index:
# print(df['DensityPerSqKm'][index])
# if df['DensityPerSqKm'][index] == None:
# print("Invalid")
# print(df)
# print(df._engine.data.dialect.delimiter)
# total_records = obj.number_of_records(df)
# print(total_records)
# print(len(df))
# obj.iterate_df(df)
# df_goa = df.loc[df["State"]=="Goa"]
# print(df_goa)
# print(df_goa['Population'])
# for ind in df.index:
# print(df['State'][ind])
# s = SortData()
# d = s._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
# print(d)
# for data in d:
# print(data['State'])
# m = Mapping()
# c = m._Mapping__map_state_census_with_state_code_according_to_code()
# print(c)
| [
"[email protected]"
] | |
ce93ee342b53ef4659e02efb6bf0f51b77633dac | eb7ef1340440c36a51b155943a3536f6e37fc6da | /codeonline/migrations/0015_auto_20160801_1430.py | b89f90a6231a391b265c768ac5a1db499a9c6628 | [] | no_license | shineforever/codeonlinesystem | 48140d113ba36646c0d91cf7c6e7980a31cc1f41 | d3fd33238b1a0148e3bce86a60e29fedf34b93f1 | refs/heads/master | 2020-06-12T13:09:45.238748 | 2016-09-12T09:36:58 | 2016-09-12T09:36:58 | 75,816,574 | 0 | 1 | null | 2016-12-07T08:42:15 | 2016-12-07T08:42:15 | null | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codeonline', '0014_auto_20160801_1429'),
]
operations = [
migrations.AlterField(
model_name='onlinerequest',
name='maintenance_manager_comfirm_time',
field=models.DateTimeField(null=True, verbose_name='\u8fd0\u7ef4\u7ecf\u7406\u786e\u8ba4\u65f6\u95f4', blank=True),
),
]
| [
"[email protected]"
] | |
e5948b84b17b5070adb8d26774c3c4ee72d3152a | 5ad9f0e5602c9986c190215c0e5957a35d1a43cb | /venv/Lib/site-packages/nltk/corpus/reader/pl196x.py | 93b8b1983800682bf157e724bbfef42133a5f86c | [
"MIT"
] | permissive | wlsl4239/Tacotron-Wavenet-Vocoder | cef6606a113add0391f467e102b4a6736d94e2fd | afc60aac989f1fed827e9cf8f7df0c0c05c67885 | refs/heads/master | 2020-07-17T10:18:03.653974 | 2019-11-20T12:40:43 | 2019-11-20T12:40:43 | 205,998,408 | 2 | 0 | MIT | 2019-09-03T05:47:35 | 2019-09-03T05:47:35 | null | UTF-8 | Python | false | false | 11,292 | py | # Natural Language Toolkit:
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Piotr Kasprzyk <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from six import string_types
from nltk.corpus.reader.api import *
from nltk.corpus.reader.xmldocs import XMLCorpusReader
PARA = re.compile(r'<p(?: [^>]*){0,1}>(.*?)</p>')
SENT = re.compile(r'<s(?: [^>]*){0,1}>(.*?)</s>')
TAGGEDWORD = re.compile(r'<([wc](?: [^>]*){0,1}>)(.*?)</[wc]>')
WORD = re.compile(r'<[wc](?: [^>]*){0,1}>(.*?)</[wc]>')
TYPE = re.compile(r'type="(.*?)"')
ANA = re.compile(r'ana="(.*?)"')
TEXTID = re.compile(r'text id="(.*?)"')
class TEICorpusView(StreamBackedCorpusView):
def __init__(self, corpus_file,
tagged, group_by_sent, group_by_para,
tagset=None, head_len=0, textids=None):
self._tagged = tagged
self._textids = textids
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
# WARNING -- skip header
StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len)
_pagesize = 4096
def read_block(self, stream):
block = stream.readlines(self._pagesize)
block = concat(block)
while (block.count('<text id') > block.count('</text>')) \
or block.count('<text id') == 0:
tmp = stream.readline()
if len(tmp) <= 0:
break
block += tmp
block = block.replace('\n', '')
textids = TEXTID.findall(block)
if self._textids:
for tid in textids:
if tid not in self._textids:
beg = block.find(tid) - 1
end = block[beg:].find('</text>') + len('</text>')
block = block[:beg] + block[beg + end:]
output = []
for para_str in PARA.findall(block):
para = []
for sent_str in SENT.findall(para_str):
if not self._tagged:
sent = WORD.findall(sent_str)
else:
sent = list(
map(self._parse_tag, TAGGEDWORD.findall(sent_str)))
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
if self._group_by_para:
output.append(para)
else:
output.extend(para)
return output
def _parse_tag(self, tag_word_tuple):
(tag, word) = tag_word_tuple
if tag.startswith('w'):
tag = ANA.search(tag).group(1)
else: # tag.startswith('c')
tag = TYPE.search(tag).group(1)
return word, tag
class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader):
head_len = 2770
def __init__(self, *args, **kwargs):
if 'textid_file' in kwargs:
self._textids = kwargs['textid_file']
else:
self._textids = None
XMLCorpusReader.__init__(self, *args)
CategorizedCorpusReader.__init__(self, kwargs)
self._init_textids()
def _init_textids(self):
self._f2t = defaultdict(list)
self._t2f = defaultdict(list)
if self._textids is not None:
with open(self._textids) as fp:
for line in fp:
line = line.strip()
file_id, text_ids = line.split(' ', 1)
if file_id not in self.fileids():
raise ValueError(
'In text_id mapping file %s: %s not found'
% (self._textids, file_id)
)
for text_id in text_ids.split(self._delimiter):
self._add_textids(file_id, text_id)
def _add_textids(self, file_id, text_id):
self._f2t[file_id].append(text_id)
self._t2f[text_id].append(file_id)
def _resolve(self, fileids, categories, textids=None):
tmp = None
if len(filter(lambda accessor: accessor is None,
(fileids, categories, textids))) != 1:
raise ValueError('Specify exactly one of: fileids, '
'categories or textids')
if fileids is not None:
return fileids, None
if categories is not None:
return self.fileids(categories), None
if textids is not None:
if isinstance(textids, string_types):
textids = [textids]
files = sum((self._t2f[t] for t in textids), [])
tdict = dict()
for f in files:
tdict[f] = (set(self._f2t[f]) & set(textids))
return files, tdict
def decode_tag(self, tag):
# to be implemented
return tag
def textids(self, fileids=None, categories=None):
"""
In the pl196x corpus each category is stored in single
file and thus both methods provide identical functionality. In order
to accommodate finer granularity, a non-standard textids() method was
implemented. All the main functions can be supplied with a list
of required chunks---giving much more control to the user.
"""
fileids, _ = self._resolve(fileids, categories)
if fileids is None: return sorted(self._t2f)
if isinstance(fileids, string_types):
fileids = [fileids]
return sorted(sum((self._f2t[d] for d in fileids), []))
def words(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
False, False, False,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
False, False, False,
head_len=self.head_len)
for fileid in fileids])
def sents(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
False, True, False,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
False, True, False,
head_len=self.head_len)
for fileid in fileids])
def paras(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
False, True, True,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
False, True, True,
head_len=self.head_len)
for fileid in fileids])
def tagged_words(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
True, False, False,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
True, False, False,
head_len=self.head_len)
for fileid in fileids])
def tagged_sents(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
True, True, False,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
True, True, False,
head_len=self.head_len)
for fileid in fileids])
def tagged_paras(self, fileids=None, categories=None, textids=None):
fileids, textids = self._resolve(fileids, categories, textids)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
if textids:
return concat([TEICorpusView(self.abspath(fileid),
True, True, True,
head_len=self.head_len,
textids=textids[fileid])
for fileid in fileids])
else:
return concat([TEICorpusView(self.abspath(fileid),
True, True, True,
head_len=self.head_len)
for fileid in fileids])
def xml(self, fileids=None, categories=None):
fileids, _ = self._resolve(fileids, categories)
if len(fileids) == 1:
return XMLCorpusReader.xml(self, fileids[0])
else:
raise TypeError('Expected a single file')
def raw(self, fileids=None, categories=None):
fileids, _ = self._resolve(fileids, categories)
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
| [
"[email protected]"
] | |
ec2bd921cea11a71e77592eb7d65b7d23ba058d8 | 0cdcee391e178092d7073734957075c72681f037 | /leetcode/LeetCode-150/Linked-Lists/146-LRU-Cache.py | 6b25ffca12654a902b028bc202ab82000ff7cde2 | [] | no_license | hrishikeshtak/Coding_Practises_Solutions | 6b483bbf19d5365e18f4ea1134aa633ff347a1c1 | 86875d7436a78420591a60b716acd2780287b4a8 | refs/heads/master | 2022-10-06T18:44:56.992451 | 2022-09-25T03:29:03 | 2022-09-25T03:29:03 | 125,744,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | """
146. LRU Cache
"""
class DLLNode:
def __init__(self, key, val):
self.key = key
self.val = val
self.prev = self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.cache = {}
self.capacity = capacity
self.left, self.right = DLLNode(0, 0), DLLNode(0, 0)
self.left.next = self.right
self.right.prev = self.left
def insert(self, node):
# insert at right
# update left ptr
prev, nxt = self.right.prev, self.right
prev.next = nxt.prev = node
node.next = nxt
node.prev = prev
def remove(self, node):
# remove from left
prev, nxt = node.prev, node.next
prev.next = nxt
nxt.prev = prev
def get(self, key: int) -> int:
if key in self.cache:
self.remove(self.cache[key])
self.insert(self.cache[key])
return self.cache[key].val
return -1
def put(self, key: int, value: int) -> None:
if key in self.cache:
self.remove(self.cache[key])
self.cache[key] = DLLNode(key, value)
self.insert(self.cache[key])
if len(self.cache) > self.capacity:
# remove from list and delete from cache
lru = self.left.next
self.remove(lru)
del self.cache[lru.key]
# Your LRUCache object will be instantiated and called as such:
capacity = 2
obj = LRUCache(capacity)
print(f"get 1: {obj.get(1)}")
obj.put(1, 1)
obj.put(2, 2)
print(f"get 1: {obj.get(1)}")
obj.put(3, 3)
print(f"get 1: {obj.get(1)}")
| [
"[email protected]"
] | |
09155e31de77f9be942b8e3ed3e5fe489bea2798 | 0deefdcfc6219f20fa1ff9ff8d766baa30af993a | /smappdragon/tools/tweet_cleaner.py | 5e4ae3993517fd74494508526ab58fe179285b3b | [
"MIT"
] | permissive | yinleon/smappdragon | c743af04ff9b46ef1221ca1b7824c4cc6fd00a24 | 691cb66f26543e47293f38cf5658c4172d676013 | refs/heads/master | 2021-01-19T04:14:57.897437 | 2017-03-29T20:17:58 | 2017-03-29T20:17:58 | 87,361,256 | 1 | 0 | null | 2017-04-05T22:14:05 | 2017-04-05T22:14:05 | null | UTF-8 | Python | false | false | 1,774 | py | import os
import csv
import glob
import bson
import json
def clean_tweets(input_file_path, output_file_path, error_file_path):
json_handle = open(input_file_path, 'r', encoding='utf-8')
with open(output_file_path, 'w', encoding='utf-8') as fo:
with open(error_file_path, 'w', encoding='utf-8') as f:
for count, line in enumerate(json_handle):
try:
tweet = json.loads(line)
fo.write(json.dumps(tweet))
fo.write('\n')
except:
f.write(line)
json_handle.close()
def clean_tweets_multiple(input_file_pattern, output_file_path, error_file_path):
for path in glob.glob(os.path.expanduser(input_file_pattern)):
json_handle = open(path, 'r', encoding='utf-8')
with open(output_file_path, 'a', encoding='utf-8') as fo:
with open(error_file_path, 'a', encoding='utf-8') as f:
for count, line in enumerate(json_handle):
try:
tweet = json.loads(line)
fo.write(json.dumps(tweet))
fo.write('\n')
except:
f.write(line)
json_handle.close()
class SmappError(Exception):
pass
'''
@yvan
can be used to clean tweets in a general catch all sense
kept separate from the data sources bson_collection, etc
to keep datasource implementation simple, its also not
a core function, its really more of an extra, how you clean
your data is up to you, we jsut offer this way.
methods can get big and out of hand very quickly for cleaning
data. so im putting this here in an attempt to keep other parts
of the code from getting too crazy and unmaintainable.
''' | [
"[email protected]"
] | |
c6ae54111ef04b17b81e9f854d4bd1972e7d553a | 74f5a4630c708e71224af55bb3eb11a503014f6f | /test01/wifi.py | 03924499a81ca6e6519bb39a0059234054c32e4c | [] | no_license | msgpo/lopy_tests | e6bbc3c77d701a7303567e5c53856e9e04615b2b | 513178b01422e46cab3cc5f39b4c65d1e5f5a408 | refs/heads/master | 2021-06-12T23:07:26.697394 | 2017-02-13T16:05:32 | 2017-02-13T16:05:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | # Wifi Configuration
#
import machine
from network import WLAN
import time
_wlan = WLAN(mode=WLAN.STA)
# connect to wifi
def connect():
global wifi_config, _wlan
nets = _wlan.scan()
for net in nets:
if net.ssid == wifi_config.name:
print('Wifi: network %s found.' % net.ssid)
_wlan.connect(net.ssid, auth=(net.sec, wifi_config.password), timeout=5000)
tries=15
for i in range(tries):
print("%d/%d. Trying to connect." %(i+1, tries))
machine.idle()
time.sleep(1)
if _wlan.isconnected(): break
break
if _wlan.isconnected():
print('Wifi: connection succeeded!')
print(_wlan.ifconfig())
else:
print('Wifi: connection failed!')
accesspoint()
def accesspoint():
global _wlan
print('Wifi: activating accesspoint.')
_wlan = WLAN(mode=WLAN.AP)
def connected():
return _wlan.isconnected()
def config():
return _wlan.ifconfig()
def delete():
import os
os.remove("wifi_config.py")
# TODO: clear internal wifi assignment
accesspoint()
def remove():
delete()
def scan():
nets = _wlan.scan()
l=[]
for n in nets:
l.append( n.ssid )
return l
# write config and connect
def setup( name, password ):
global wifi_config
f=open("wifi_config.py", "w")
f.write("name=\"%s\"\npassword=\"%s\"" % (name,password))
f.close()
wifi_config.name = name
wifi_config.password = password
connect()
# Try to find wifi_config
try:
import wifi_config
connect()
except ImportError:
class wifi_config():
pass
| [
"[email protected]"
] | |
f0277cc63056e502ced1d713fdc7bbe567df39d1 | 2157782cf5875767f8d1fe0bb07243da2e87600d | /test_from_myself/djangoTest/Testsite/Testsite/settings.py | fbac8fa33d4eb59c9d67cff359e472346d15d4b0 | [] | no_license | mouday/SomeCodeForPython | 9bc79e40ed9ed851ac11ff6144ea080020e01fcd | ddf6bbd8a5bd78f90437ffa718ab7f17faf3c34b | refs/heads/master | 2021-05-09T22:24:47.394175 | 2018-05-11T15:34:22 | 2018-05-11T15:34:22 | 118,750,143 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | """
Django settings for Testsite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')(3%s0c#_7ns&a-7-5x@-io63fuy1^ojl+a#$1$60#yj9@n599'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Testsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "blog/templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Testsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'myblog',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
261d930a3beec9c0e30f08186acacbe12e3b86f9 | 22f5818e99c3593dc1c405a155ea6d7aa0c765a1 | /backend/home/migrations/0002_load_initial_data.py | c6809ed748daf29a7b5fb9d036d76db99016723e | [] | no_license | crowdbotics-apps/new-expo-app-dev-8468 | 2ecc8f7824505ab68ae538eb07aeb1969c7a206a | d249a27a9db5b32efb5111bd2a96d31bc54b6ac3 | refs/heads/master | 2022-11-28T01:23:00.770574 | 2020-08-04T16:56:17 | 2020-08-04T16:56:17 | 285,042,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "New expo app"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">New expo app</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "new-expo-app-dev-8468.botics.co"
site_params = {
"name": "New expo app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
c58fe73f582a1e2ec908eb34aa02357ebd5bb5c3 | acff427a36d6340486ff747ae9e52f05a4b027f2 | /main/x11/misc/xdm/actions.py | 02b788d616e653a2c5eb2ed167bd24162614505b | [] | no_license | jeremie1112/pisilinux | 8f5a03212de0c1b2453132dd879d8c1556bb4ff7 | d0643b537d78208174a4eeb5effeb9cb63c2ef4f | refs/heads/master | 2020-03-31T10:12:21.253540 | 2018-10-08T18:53:50 | 2018-10-08T18:53:50 | 152,126,584 | 2 | 1 | null | 2018-10-08T18:24:17 | 2018-10-08T18:24:17 | null | UTF-8 | Python | false | false | 1,282 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static \
--enable-unix-transport \
--enable-tcp-transport \
--enable-local-transport \
--enable-secure-rpc \
--enable-xpm-logos \
--enable-xdm-auth \
--with-pam \
--with-libaudit \
--with-xdmconfigdir=/etc/X11/xdm \
--with-default-vt=vt7 \
--with-config-type=ws \
--with-xft \
--with-pixmapdir=/usr/share/X11/xdm/pixmaps \
")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodir("/var/lib/xdm")
pisitools.dodoc("AUTHORS", "COPYING", "README")
| [
"[email protected]"
] | |
6b9b1ca6b331ddb6677f82ecb8c974dfb1bc9620 | 974dc8113a265ebe0d54b818333be78f000d293f | /google-api-client-generator/src/googleapis/codegen/import_definition.py | 03b506f51f213eaaa0605a157fef431809c0d777 | [
"Apache-2.0"
] | permissive | googleapis/discovery-artifact-manager | 9e0948600ec4c2f05a889d7f157c1eaec12ea6b4 | 19f268e0b7935ea3d87d0d124c7791efb5a78646 | refs/heads/master | 2023-08-30T02:49:49.476556 | 2023-08-29T18:24:14 | 2023-08-29T18:24:14 | 72,768,841 | 43 | 53 | Apache-2.0 | 2023-09-14T21:56:14 | 2016-11-03T17:17:02 | Java | UTF-8 | Python | false | false | 921 | py | #!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
"""Contains information necessary to define an import."""
__author__ = '[email protected] (Ravi Mistry)'
class ImportDefinition(object):
"""Contains all required information about an import.
Intended for use in the type_format_to_datatype_and_imports dictionaries.
"""
def __init__(self, imports=None, template_values=None):
"""Construct a definition of an import.
Args:
imports: (sequence of str) Contains all imports required by a data type.
template_values: (sequence of str) Contains all required additional
template values that are required to be set to handle the imports.
"""
self._imports = imports or []
self._template_values = template_values or []
@property
def imports(self):
return self._imports
@property
def template_values(self):
return self._template_values
| [
"[email protected]"
] | |
5fee350412b5d59a8441bdc2cc29a017d518de6b | 9657d72ca77081e699c472241f4d565e03cda32e | /fable/fable_sources/libtbx/command_line/sge_available_slots.py | 5354cace06085aee47e03bc6afadc19bbbb26a54 | [
"BSD-3-Clause-LBNL",
"MIT"
] | permissive | hickerson/bbn | b37a4a64a004982dd219a3bd92e519b62f4ea2b3 | 17ef63ad1717553ab2abb50592f8de79228c8523 | refs/heads/master | 2021-01-17T09:50:23.863448 | 2018-08-12T19:04:58 | 2018-08-12T19:04:58 | 20,693,028 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from __future__ import division
def run(args):
assert len(args) == 0
from libtbx import easy_run
qstat_buffer = easy_run.fully_buffered(command="qstat -g c")
el = qstat_buffer.stderr_lines
ol = qstat_buffer.stdout_lines
if (len(el) != 0):
print -1
elif (len(ol) < 3):
print -2
elif ( " ".join(ol[0].split())
!= "CLUSTER QUEUE CQLOAD USED AVAIL TOTAL aoACDS cdsuE"):
print -3
elif (not ol[1].startswith("----------")):
print -4
else:
sum_available = 0
for line in ol[2:]:
flds = line.split()
assert len(flds) == 7
sum_available += int(flds[3])
print sum_available
if (__name__ == "__main__"):
import sys
run(sys.argv[1:])
| [
"[email protected]"
] | |
225125f179e96196d2131b86cc89a97751d4cf96 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02848/s598772866.py | 282413bef5247516e1db318c8edad90021b349bc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | N = int(input())
S = input()
chars = "abcdefghijklmnopqrstuvwxyz" + "abcdefghijklmnopqrstuvwxyz"
chars = chars.upper()
dic = {chars[i]: chars[i+N] for i in range(26)}
ans = ""
for i in range(len(S)):
ans += dic[S[i]]
print(ans) | [
"[email protected]"
] | |
c7530d80f8eab8e9d0c69ff3835ccebf526d4d34 | 5bab92faf12fe329b78743b877f0c154da6b0498 | /aula_1/ex_5.py | 4090c1a596e7660bab8bf3d2918b0d041410ca89 | [] | no_license | elrrocha/python-521 | 943bd4ab23c7bb4bae2e9653085c93bf43b73256 | 8837e5f911bb8b0e389bb45f5d1684e23648aebe | refs/heads/master | 2020-06-27T22:28:01.979671 | 2019-08-01T20:15:20 | 2019-08-01T20:15:20 | 200,068,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import requests
DOMAIN_URL = 'https://gen-net.herokuapp.com/api/users/{}'
response = requests.put(DOMAIN_URL)
user_id = input ('Digite seu id: ')
name = input('Digite seu nome: ')
email = input('Digite seu email: ')
password = ('Digite sua senha: ')
payload = {
'name': name,
'email': email,
'password': password
}
response = requests.put(DOMAIN_URL.format(user_id), payload)
if response.status_code ==200:
print('Usuário atualizado com sucesso')
else:
print('Erro ao atualizar o usuário') | [
"[email protected]"
] | |
5343e91b21a1d17b86b3d2134e35ceb2a8c7d7b9 | fb63c25920d15f9b48b8e9e604ffbbde99a7aeae | /svmrfe/run_sgdrfe.py | c60a611d3bd8fcb5299bfd8e0ea8fa28aa157d0c | [] | no_license | kellyhennigan/cueexp_scripts | 763385c96772867a4875c734732bb32a3b051d1c | 7264b0033a9cc83cc362c96cd04d94706728c994 | refs/heads/master | 2023-07-21T20:35:11.607859 | 2022-01-10T20:18:12 | 2022-01-10T20:18:12 | 55,538,490 | 1 | 1 | null | 2023-07-06T21:16:20 | 2016-04-05T19:17:21 | Python | UTF-8 | Python | false | false | 2,453 | py | """
Script to run svmrfe on a given subject with a number of different parameters.
usage: python run_sgdrfe.py [test subject id]
"""
import os
import sys
from sgdrfe import SGDRFE
######################### USER PARAMS #################################
SUBJECT_BASE_PATH = '/scratch/PI/knutson/cuesvm/' #where we find subj folder
SUBJECT_FILE = '/scratch/PI/knutson/cuesvm/cue_patients_subjects.txt'
NIFTII = 'pp_cue_tlrc_afni.nii.gz'
BEHAVIORAL = 'drugs_vs_neutral_trial_onsets.1D'
NIFTII_OUT_NAME = 'cue_drug_trial_vs_neutral.nii.gz'
TRS = [1, 2, 3, 4]
LAG = 2 # so really we're looking at trs 2+trs = [3, 4] of every trial (1-indexed)
CLASSIFIERS = ['linearsvc', 'elasticnet']
CUT = .05 # throw out the bottom cut % of features every iteration
STOP_THRESHOLD = .025 # stop at this % of features out of what we start with
TEST = False
######################################################################
class Subject(object):
def __init__(self, name):
self.name = name
self.path = os.path.join(SUBJECT_BASE_PATH, name)
def file_path(self, filename):
return os.path.join(self.path, filename)
def has_file(self, filename):
return os.path.exists(self.file_path(filename))
class Project(object):
def __init__(self, subs):
self.subjects = [Subject(x) for x in subs]
if __name__=="__main__":
if not TEST:
try:
test_subject = sys.argv[1]
except IndexError:
test_subject = None
with open(SUBJECT_FILE, 'r') as f:
subjects = [x for x in f.read().split('\n') if len(x) == 8]
if not TEST and test_subject not in subjects:
print("No test subject found, using all subjects...")
test_subject = None
if TEST:
test_subject = subjects[2]
subjects = subjects[:3]
for clf in CLASSIFIERS:
for cval in [100.]:
#[.0001,.001,.01,.1,1.,10.,100.,1000.]:
project = Project(subjects)
rfe = SGDRFE(project, NIFTII, BEHAVIORAL, TRS,
test_subj=test_subject, lag=LAG, clftype=clf, cut=CUT,
C=cval, stop_threshold=STOP_THRESHOLD)
rfe.run()
test_sub_name = test_subject if test_subject is not None else 'all_subjects'
niftii_name = '_'.join([test_sub_name, str(cval), clf, NIFTII_OUT_NAME ])
rfe.save_nii(savename=niftii_name)
if TEST:
break
| [
"[email protected]"
] | |
15c33705404a528dcb3a06706e16d33f5ccdb127 | bcd8f5a7352a1e0b2e1a83fea9ae2e4182eda805 | /algobook/Py2Adt/src/ch6/ListNode3.py | 40ee447a973587684ac20f5f8322cc147d3772f7 | [] | no_license | JediChou/jedichou-study-algo | 455c7709250d424cad1b326efd99f0be7a000edc | 4f0ce79556d7543870dfc13399c7ae85ba773508 | refs/heads/master | 2020-06-24T23:21:34.310122 | 2020-04-01T05:53:33 | 2020-04-01T05:53:33 | 199,122,658 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | class ListNode:
def __init__(self, data):
self.data = data
self.next = None
def iterateSingleList(node):
print(node.data)
if node.next is not None:
iterateSingleList(node.next)
if __name__ == "__main__":
# Define node.
n1 = ListNode(78)
n2 = ListNode(83)
n3 = ListNode(6)
n4 = ListNode(73)
n5 = ListNode(68)
# Create linked list.
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
# Iterate single linked list.
iterateSingleList(n1)
| [
"[email protected]"
] | |
5c001144063e44c44a0ae582a3ef5dbdb61e2310 | e7451193592aaee2536924ef03846eee920bcf94 | /ucscentralsdk/mometa/adaptor/AdaptorExtEthIf.py | f181ffcfab71227b2d09021e80eeb700cd7c4952 | [
"Apache-2.0"
] | permissive | vinayravish/ucscentralsdk | eb33191f3c7675561298af8cef9b30f6e220b7b2 | 809a3782d26c69f50cf7237700e107f1a9857870 | refs/heads/master | 2021-01-18T01:51:57.275207 | 2016-07-20T05:37:26 | 2016-07-20T05:37:26 | 62,137,219 | 0 | 0 | null | 2016-06-28T12:00:34 | 2016-06-28T12:00:34 | null | UTF-8 | Python | false | false | 11,680 | py | """This module contains the general information for AdaptorExtEthIf ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class AdaptorExtEthIfConsts():
CHASSIS_ID_N_A = "N/A"
DISCOVERY_ABSENT = "absent"
DISCOVERY_MIS_CONNECT = "mis-connect"
DISCOVERY_MISSING = "missing"
DISCOVERY_NEW = "new"
DISCOVERY_PRESENT = "present"
DISCOVERY_UN_INITIALIZED = "un-initialized"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
LINK_STATE_ADMIN_DOWN = "admin-down"
LINK_STATE_DOWN = "down"
LINK_STATE_ERROR = "error"
LINK_STATE_OFFLINE = "offline"
LINK_STATE_UNALLOCATED = "unallocated"
LINK_STATE_UNAVAILABLE = "unavailable"
LINK_STATE_UNKNOWN = "unknown"
LINK_STATE_UP = "up"
MAC_ADDR_TYPE_FLOATING = "floating"
MAC_ADDR_TYPE_OPERATIONAL = "operational"
MAC_ADDR_TYPE_UNKNOWN = "unknown"
OPER_STATE_ADMIN_DOWN = "admin-down"
OPER_STATE_DOWN = "down"
OPER_STATE_ERROR_DISABLED = "error-disabled"
OPER_STATE_FAILED = "failed"
OPER_STATE_HARDWARE_FAILURE = "hardware-failure"
OPER_STATE_INDETERMINATE = "indeterminate"
OPER_STATE_LINK_DOWN = "link-down"
OPER_STATE_LINK_UP = "link-up"
OPER_STATE_NO_LICENSE = "no-license"
OPER_STATE_SFP_NOT_PRESENT = "sfp-not-present"
OPER_STATE_SOFTWARE_FAILURE = "software-failure"
OPER_STATE_UDLD_AGGR_DOWN = "udld-aggr-down"
OPER_STATE_UP = "up"
PEER_CHASSIS_ID_N_A = "N/A"
PURPOSE_CROSSLINK = "crosslink"
PURPOSE_GENERAL = "general"
PURPOSE_MANAGEMENT = "management"
PURPOSE_OVERLAY = "overlay"
PURPOSE_UNUSED = "unused"
PURPOSE_UTILITY = "utility"
SIDE_LEFT = "left"
SIDE_RIGHT = "right"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_MGMT = "mgmt"
class AdaptorExtEthIf(ManagedObject):
"""This is AdaptorExtEthIf class."""
consts = AdaptorExtEthIfConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("AdaptorExtEthIf", "adaptorExtEthIf", "ext-eth-[id]", VersionMeta.Version111a, "InputOutput", 0x7f, [], ["admin", "ext-lan-config", "ext-lan-policy", "pn-equipment", "pn-maintenance", "read-only"], [u'adaptorUnit'], [], ["Get"])
prop_meta = {
"adapter_id": MoPropertyMeta("adapter_id", "adapterId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, [], []),
"aggr_port_id": MoPropertyMeta("aggr_port_id", "aggrPortId", "uint", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"chassis_id": MoPropertyMeta("chassis_id", "chassisId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"discovery": MoPropertyMeta("discovery", "discovery", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["absent", "mis-connect", "missing", "new", "present", "un-initialized"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"link_state": MoPropertyMeta("link_state", "linkState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["admin-down", "down", "error", "offline", "unallocated", "unavailable", "unknown", "up"], []),
"lldp_mac": MoPropertyMeta("lldp_mac", "lldpMac", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", [], []),
"mac_addr_type": MoPropertyMeta("mac_addr_type", "macAddrType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["floating", "operational", "unknown"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["admin-down", "down", "error-disabled", "failed", "hardware-failure", "indeterminate", "link-down", "link-up", "no-license", "sfp-not-present", "software-failure", "udld-aggr-down", "up"], []),
"oper_state_desc": MoPropertyMeta("oper_state_desc", "operStateDesc", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"peer_aggr_port_id": MoPropertyMeta("peer_aggr_port_id", "peerAggrPortId", "uint", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_chassis_id": MoPropertyMeta("peer_chassis_id", "peerChassisId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"peer_port_id": MoPropertyMeta("peer_port_id", "peerPortId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_slot_id": MoPropertyMeta("peer_slot_id", "peerSlotId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"phys_ep_dn": MoPropertyMeta("phys_ep_dn", "physEpDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"port_id": MoPropertyMeta("port_id", "portId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"purpose": MoPropertyMeta("purpose", "purpose", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["crosslink", "general", "management", "overlay", "unused", "utility"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"side": MoPropertyMeta("side", "side", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["left", "right"], []),
"slot_id": MoPropertyMeta("slot_id", "slotId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
}
prop_map = {
"adapterId": "adapter_id",
"adminState": "admin_state",
"aggrPortId": "aggr_port_id",
"chassisId": "chassis_id",
"childAction": "child_action",
"discovery": "discovery",
"dn": "dn",
"epDn": "ep_dn",
"id": "id",
"ifRole": "if_role",
"ifType": "if_type",
"linkState": "link_state",
"lldpMac": "lldp_mac",
"locale": "locale",
"mac": "mac",
"macAddrType": "mac_addr_type",
"name": "name",
"operState": "oper_state",
"operStateDesc": "oper_state_desc",
"peerAggrPortId": "peer_aggr_port_id",
"peerChassisId": "peer_chassis_id",
"peerDn": "peer_dn",
"peerPortId": "peer_port_id",
"peerSlotId": "peer_slot_id",
"physEpDn": "phys_ep_dn",
"portId": "port_id",
"purpose": "purpose",
"rn": "rn",
"side": "side",
"slotId": "slot_id",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.adapter_id = None
self.admin_state = None
self.aggr_port_id = None
self.chassis_id = None
self.child_action = None
self.discovery = None
self.ep_dn = None
self.if_role = None
self.if_type = None
self.link_state = None
self.lldp_mac = None
self.locale = None
self.mac = None
self.mac_addr_type = None
self.name = None
self.oper_state = None
self.oper_state_desc = None
self.peer_aggr_port_id = None
self.peer_chassis_id = None
self.peer_dn = None
self.peer_port_id = None
self.peer_slot_id = None
self.phys_ep_dn = None
self.port_id = None
self.purpose = None
self.side = None
self.slot_id = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
ManagedObject.__init__(self, "AdaptorExtEthIf", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
] | |
c91fa793eaaf0fd3f563d34be77f1ff4359be204 | fd4510e0bf959de7527bd0c62d3b4fb3f78cee5e | /detection/CD1/std.py | 3be0d5c0f1b57f11db53818081eabe2ad0add0f4 | [] | no_license | RuoAndo/nii-cyber-security-admin | 8dde8ab68b0f7fa882adbe8e828546aa1739e685 | e77b9d581e124f9fd5f721e18cd77d3bccecad19 | refs/heads/master | 2022-12-13T21:40:46.330389 | 2022-12-07T14:01:00 | 2022-12-07T14:01:00 | 71,614,880 | 5 | 1 | null | 2020-10-13T08:40:46 | 2016-10-22T03:41:30 | Python | UTF-8 | Python | false | false | 897 | py | import numpy as np
import sys
argvs = sys.argv
argc = len(argvs)
f = open(argvs[3])
line = f.readline()
instid = []
instname = []
while line:
tmp = line.split("\t")
instid.append(tmp[0])
instname.append(tmp[1])
line = f.readline()
f.close()
f = open(argvs[1])
line = f.readline()
l = np.array([])
while line:
tmp = line.split(",")
l = np.append( l, float(tmp[1]) )
line = f.readline()
def main():
std = np.std(l)
lr = l / np.linalg.norm(l)
#print(lr)
tmp = str(argvs[2]).split("-")
counter = 0
for i in instid:
if int(i) == int(tmp[1]):
uname = instname[counter]
counter = counter + 1
counter = 0
for i in lr:
print(str(counter)+","+str(i))+","+str(tmp[1])+","+uname.strip()
counter = counter + 1
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c30fa16878570e0c12a069b540ddadb291b6b66a | 3012e5a0f34dd54fbac568c70506826973192ce1 | /pylib/points.py | 23464ed8064625df58f47a9559f4b6586991896f | [] | no_license | metatab-packages/civicknowledge.com-osm-demosearch | 89999227bda7bae91259c10bd651f220ae35c52f | d4ecb7775662a50413c848c3ae5a901b147ef532 | refs/heads/master | 2023-05-14T12:39:25.328559 | 2021-06-08T13:52:39 | 2021-06-08T13:52:39 | 334,572,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | """
"""
from itertools import chain
import geopandas as gpd
import libgeohash as gh
import numpy as np
import pandas as pd
import shapely
from shapely.geometry import Point
from shapely.wkt import loads as loads_wkt
from tqdm import tqdm
tqdm.pandas()
from pathlib import Path
from demosearch.util import run_mp
from .lines import open_cache
import logging
from metapack.appurl import SearchUrl
SearchUrl.initialize() # This makes the 'index:" urls work
points_logger = logging.getLogger(__name__)
extract_tag_names = ['amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking']
def extract_tags(df, extract_tags):
from sqlalchemy.dialects.postgresql import HSTORE
h = HSTORE()
f = h.result_processor(None, None)
# Prune the dataset to just the records that have the tags we want.
# before getting to the more expensive operation of extracting the tags.
# This should reduce the dataset from 24M rows to less than 6M.
t = df.dropna(subset=['other_tags'])
t = t[t.highway.isnull()]
flags = [t.other_tags.str.contains(e) for e in extract_tags]
comb_flags = [any(e) for e in list(zip(*flags))]
t = t[comb_flags]
rows = []
errors = []
for idx, r in t.set_index('osm_id')[['other_tags']].iterrows():
try:
d = f(r.other_tags)
rows.append([idx] + [d.get(e) for e in extract_tags])
except TypeError as e:
errors.append(r, e)
return (rows, errors)
def make_tags_df(pkg):
"""Create the tags dataframe"""
cache = open_cache(pkg)
points_logger.info('Make tags dataframe')
try:
tags_df = cache.get_df('points/tags_df')
except KeyError:
points_df = pkg.reference('points').read_csv(low_memory=False)
# Split the file and extract tags in multiprocessing
N_task = 200
tasks = [(e, extract_tag_names) for e in np.array_split(points_df, N_task)]
results = run_mp(extract_tags, tasks, 'Split OSM other_tags')
tags = list(chain(*[e[0] for e in results]))
errors = list(chain(*[e[1] for e in results]))
tags_df = pd.DataFrame(tags, columns=['osm_id'] + extract_tag_names)
# 1/2 the entries, 2.7M are trees and rocks
tags_df = tags_df[~tags_df.natural.isin(['tree', 'rock'])]
tags_df = pd.merge(tags_df, points_df[['osm_id', 'geometry']], on='osm_id')
def encode(v):
return gh.encode(*list(map(float, v[7:-1].split()))[::-1])
tags_df['geohash'] = tags_df.geometry.progress_apply(encode)
tags_df['geometry'] = tags_df.geometry.progress_apply(shapely.wkt.loads)
tags_df = gpd.GeoDataFrame(tags_df, geometry='geometry', crs=4326)
cache.put_df('points/tags_df', tags_df)
return tags_df
def extract_class_columns(tags_df):
tags_df['class'] = tags_df.loc[:, ('amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking')].fillna(
method='ffill', axis=1).fillna(method='bfill', axis=1).iloc[:, 0]
replace = {'parking': 'parking_space',
'pub': 'bar',
}
cls = ['restaurant', 'bar', 'cafe', 'fast_food', 'supermarket', 'grave_yard', 'playground',
'bicycle_parking', 'park', 'fuel', 'bank', 'hotel', 'fitness_centre',
'laundry', 'clothes', 'convenience', 'parking', 'parking_space']
t = tags_df[['geohash', 'class']].replace(replace)
t = t[t['class'].isin(cls)]
cls_df = t.groupby([t.geohash.str.slice(0, 8), 'class']).count().unstack().fillna(0).droplevel(0, axis=1)
return cls_df
def make_geotags_df(pkg, tags_df, cls_df):
# At 8 digits, geohashes are, on average 4m by 20M over the US
# At 6, 146m x 610m
# At 4, 4Km x 20Km
# Clip to 8 because it's really unlikely that there are actually more than 10
# amenities in a cell.
pkg_root = Path(pkg.path).parent
f = pkg_root.joinpath('data', 'point_tags.csv')
if f.exists():
points_logger.info(f'Geotags dataframe {f} already exists')
return
points_logger.info('Make geotags dataframe')
group_counts = tags_df.groupby(tags_df.geohash.str.slice(0, 8)) \
[['amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking']].count().clip(0, 10)
t = group_counts.join(cls_df, how='outer').fillna(0).astype(int)
t['geometry'] = [Point(gh.decode(e)[::-1]) for e in t.index]
t = gpd.GeoDataFrame(t, geometry='geometry', crs=4326).reset_index()
cbsa = pkg.reference('cbsa').geoframe().to_crs(4326)
geohash_tags = gpd.sjoin(t, cbsa[['geometry', 'geoid']], how='left')
cols = ['geohash', 'geoid'] + list(geohash_tags.loc[:, 'amenity':'supermarket'].columns) + ['geometry']
geohash_tags = geohash_tags[cols]
geohash_tags.to_csv(f, index=False)
return geohash_tags
def build_points(pkg):
tags_df = make_tags_df(pkg)
points_logger.info('Extract class Columns')
cls_df = extract_class_columns(tags_df)
make_geotags_df(pkg, tags_df, cls_df) | [
"[email protected]"
] | |
613776148909d6748ac5dcc5401258ee19f74b31 | 4519b4b24f3907da1dde513f72d432fd9b4391f4 | /crds/jwst/specs/niriss_gain.spec | 00ecc62e7c0cb6112bf6dbeadffff39454d1d588 | [
"BSD-2-Clause"
] | permissive | spacetelescope/crds | 0bd712b7c7c6864c274987e7ba94a051e19d1e48 | 08da10721c0e979877dc9579b4092c79f4ceee27 | refs/heads/master | 2023-07-23T17:07:33.889579 | 2023-06-29T20:04:56 | 2023-06-29T20:04:56 | 52,045,957 | 9 | 29 | NOASSERTION | 2023-09-14T17:42:28 | 2016-02-18T23:15:38 | Python | UTF-8 | Python | false | false | 468 | spec | {
'derived_from' : 'cloning tool 0.05b (2013-04-12) used on 2013-10-04',
'file_ext' : '.fits',
'filekind' : 'GAIN',
'filetype' : 'GAIN',
'instrument' : 'NIRISS',
'mapping' : 'REFERENCE',
'name' : 'jwst_niriss_gain_0000.rmap',
'observatory' : 'JWST',
'parkey' : (('META.INSTRUMENT.DETECTOR', 'META.INSTRUMENT.FILTER'),),
'sha1sum' : 'fee4fbd0950196f5211a6badfb7b51067489072b',
'suffix' : 'gain',
'text_descr' : 'Gain',
}
| [
"[email protected]@stsci.edu"
] | [email protected]@stsci.edu |
a150e33292f2b4df7313dc18e92ceae92ca16a81 | 803bab6f782099d995bcdb99d163486f4fff8c50 | /habitat-lab/habitat/sims/habitat_simulator/actions.py | 6252636c3399b2da8795019b46dbe220986fa63d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-SA-3.0"
] | permissive | facebookresearch/habitat-lab | 7088506509f64da6d682f5dc69427589f71a58a9 | f5b29e62df0788d70ba3618fc738fa4e947428ba | refs/heads/main | 2023-08-24T14:00:02.707343 | 2023-08-23T04:53:48 | 2023-08-23T04:53:48 | 169,164,391 | 792 | 298 | MIT | 2023-09-14T15:20:03 | 2019-02-04T23:12:51 | Python | UTF-8 | Python | false | false | 2,517 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Dict
import attr
from habitat.core.utils import Singleton
class _DefaultHabitatSimActions(Enum):
stop = 0
move_forward = 1
turn_left = 2
turn_right = 3
look_up = 4
look_down = 5
@attr.s(auto_attribs=True, slots=True)
class HabitatSimActionsSingleton(metaclass=Singleton):
r"""Implements an extendable Enum for the mapping of action names
to their integer values.
This means that new action names can be added, but old action names cannot
be removed nor can their mapping be altered. This also ensures that all
actions are always contigously mapped in :py:`[0, len(HabitatSimActions) - 1]`
This accesible as the global singleton :ref:`HabitatSimActions`
"""
_known_actions: Dict[str, int] = attr.ib(init=False, factory=dict)
def __attrs_post_init__(self):
for action in _DefaultHabitatSimActions:
self._known_actions[action.name] = action.value
def extend_action_space(self, name: str) -> int:
r"""Extends the action space to accommodate a new action with
the name :p:`name`
:param name: The name of the new action
:return: The number the action is registered on
Usage:
.. code:: py
from habitat.sims.habitat_simulator.actions import HabitatSimActions
HabitatSimActions.extend_action_space("MY_ACTION")
print(HabitatSimActions.MY_ACTION)
"""
assert (
name not in self._known_actions
), "Cannot register an action name twice"
self._known_actions[name] = len(self._known_actions)
return self._known_actions[name]
def has_action(self, name: str) -> bool:
r"""Checks to see if action :p:`name` is already register
:param name: The name to check
:return: Whether or not :p:`name` already exists
"""
return name in self._known_actions
def __getattr__(self, name):
return self._known_actions[name]
def __getitem__(self, name):
return self._known_actions[name]
def __len__(self):
return len(self._known_actions)
def __iter__(self):
return iter(self._known_actions)
HabitatSimActions: HabitatSimActionsSingleton = HabitatSimActionsSingleton()
| [
"[email protected]"
] | |
90bee35d3c81017c7b8558351920c8d208c8605e | ed843fd5c2f6693e3ee682cf579e49dbd9957375 | /savu/test/data_test.py | dd5a1001c6042ce98a794f9095a4d42a5158e006 | [
"CC-BY-2.0",
"Apache-2.0"
] | permissive | mjn19172/Savu | 98707cd0f89b1b6626341b67211f7890d969ac97 | b9b456928387eaf81d4d0f314394f6d337bbb90b | refs/heads/master | 2020-12-30T22:56:49.605584 | 2015-09-18T12:32:07 | 2015-09-18T12:32:07 | 36,367,826 | 0 | 0 | null | 2015-09-18T12:32:08 | 2015-05-27T13:16:27 | Python | UTF-8 | Python | false | false | 1,284 | py | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_test
:platform: Unix
:synopsis: unittest test class data structures
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
import savu.test.test_utils as tu
import savu.plugins.utils as pu
class Test(unittest.TestCase):
@unittest.skip("This test should be updated to use the new setup system")
def test_create_smaller_data_block(self):
data = tu.get_nx_tomo_test_data()
plugin = pu.load_plugin("savu.plugins.downsample_filter")
output = tu.get_appropriate_output_data(plugin, data)[0]
self.assertEqual(output.get_data_shape(), (111, 68, 80))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
c35da0bd6fcac14b0c3221c0d46ff93bb1b5cb86 | c7262339c9436af9e71f9a9beb3408d92016d04c | /CVServer/basic_view.py | 47bb4e4248a0cf4279f1256a08171ec062db6030 | [] | no_license | Zachary4biz/dm_server | 3edbe8ee2072c53dabfb36036565fd9b700ff926 | 8172fae7ae8c5146bb7bbe61defefdabc34f256c | refs/heads/master | 2022-08-23T07:46:51.363195 | 2020-05-25T04:01:10 | 2020-05-25T04:01:10 | 195,803,576 | 2 | 1 | null | 2022-06-21T23:03:40 | 2019-07-08T12:02:43 | Python | UTF-8 | Python | false | false | 788 | py | # encoding=utf-8
from django.http import HttpResponse
# 返回渲染过的html页面
from django.shortcuts import render
outside_value="abc"
def hello(request):
context = {}
context['param1'] = outside_value
context['section'] = request.GET['section'] if 'section' in request.GET else ''
return render(request, 'basic_view.html', context)
def hello_post(request):
sec = request.POST['section'] if 'section' in request.POST else ''
return render(request, 'basic_view.html', {"param1":"First Param","section":sec})
import json
def test(request):
params = request.GET
if 'img_url' in params and 'id' in params:
json_str = json.dumps({"img_url":params["img_url"], "id":params["id"]})
return HttpResponse(json_str, status=200, content_type="application/json,charset=utf-8")
| [
"[email protected]"
] | |
9ef8b78b6ef1f97f7c36da6e1bf048580bb6b4ee | 28def0cefc61b38723a393ea76610e7a3fbc27c0 | /LipSDP/examples/mnist_example.py | 5179207e908a8e2c9bf11ee432fb85f414d3a6de | [
"MIT"
] | permissive | arobey1/LipSDP | 1d9d1645cf9b11680113a65979669e0fd90a15b3 | bf01cafe97195330f4eb516d7b3a57247b31c5b3 | refs/heads/master | 2022-03-16T06:54:46.580493 | 2022-03-07T15:44:14 | 2022-03-07T15:44:14 | 217,307,826 | 49 | 16 | MIT | 2019-12-22T04:21:15 | 2019-10-24T13:36:41 | MATLAB | UTF-8 | Python | false | false | 5,145 | py | import torch
import torch.nn as nn
from torchvision import datasets, transforms
import torch.optim as optim
from torchsummary import summary
from MNIST_Net import Network
from scipy.io import savemat
import numpy as np
import os
INPUT_SIZE = 784
OUTPUT_SIZE = 10
BATCH_SIZE = 100
NUM_EPOCHS = 10
LEARNING_RATE = 1e-3
def main():
train_loader, test_loader = create_data_loaders()
fname = os.path.join(os.getcwd(), 'saved_weights/mnist_weights.mat')
# define neural network model and print summary
net_dims = [INPUT_SIZE, 50, OUTPUT_SIZE]
model = Network(net_dims, activation=nn.ReLU).net
summary(model, (1, INPUT_SIZE))
# train model
accuracy = train_network(model, train_loader, test_loader)
# save data to saved_weights/ directory
weights = extract_weights(model)
data = {'weights': np.array(weights, dtype=np.object)}
savemat(fname, data)
def extract_weights(net):
"""Extract weights of trained neural network model
params:
* net: torch.nn instance - trained neural network model
returns:
* weights: list of arrays - weights of neural network
"""
weights = []
for param_tensor in net.state_dict():
tensor = net.state_dict()[param_tensor].detach().numpy().astype(np.float64)
if 'weight' in param_tensor:
weights.append(tensor)
return weights
def train_network(model, train_loader, test_loader):
"""Train a neural network with Adam optimizer
params:
* model: torch.nn instance - neural network model
* train_loader: DataLoader instance - train dataset loader
* test_loader: DataLoader instance - test dataset loader
returns:
* accuracy: float - accuracy of trained neural network
"""
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
for epoch_num in range(1, NUM_EPOCHS + 1):
train_model(model, train_loader, optimizer, criterion, epoch_num)
accurary = test_model(model, test_loader)
return accurary
def create_data_loaders():
"""Create DataLoader instances for training and testing neural networks
returns:
* train_loader: DataLoader instance - loader for training set
* test_loader: DataLoader instance - loader for test set
"""
train_set = datasets.MNIST('/tmp', train=True, download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
test_set = datasets.MNIST('/tmp', train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
return train_loader, test_loader
def train_model(model, train_loader, optimizer, criterion, epoch_num, log_interval=200):
"""Train neural network model with Adam optimizer for a single epoch
params:
* model: nn.Sequential instance - NN model to be tested
* train_loader: DataLoader instance - Training data for NN
* optimizer: torch.optim instance - Optimizer for NN
* criterion: torch.nn.CrossEntropyLoss instance - Loss function
* epoch_num: int - Number of current epoch
* log_interval: int - interval to print output
modifies:
weights of neural network model instance
"""
model.train() # Set model to training mode
for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(BATCH_SIZE, -1)
optimizer.zero_grad() # Zero gradient buffers
output = model(data) # Pass data through the network
loss = criterion(output, target) # Calculate loss
loss.backward() # Backpropagate
optimizer.step() # Update weights
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tCross-Entropy Loss: {:.6f}'.format(
epoch_num, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
def test_model(model, test_loader):
"""Test neural network model using argmax classification
params:
* model: nn.Sequential instance - torch NN model to be tested
* test_loader: - Test data for NN
returns:
* test_accuracy: float - testing classification accuracy
"""
model.eval()
total, correct = 0, 0
with torch.no_grad():
for data, labels in test_loader:
data = data.view(BATCH_SIZE, -1)
output = model(data)
_, predicted = torch.max(output.data, 1)
total += labels.size(0) # Increment the total count
correct += (predicted == labels).sum() # Increment the correct count
test_accuracy = 100 * correct.numpy() / float(total)
print('Test Accuracy: %.3f %%\n' % test_accuracy)
return test_accuracy
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3100f7b08481605f1da12a5cf74588aab159e40a | 39a1d46fdf2acb22759774a027a09aa9d10103ba | /inference-engine/ie_bridges/python/sample/speech_sample/speech_sample.py | 8019746d177088fb0f9f28648903084a40a78d39 | [
"Apache-2.0"
] | permissive | mashoujiang/openvino | 32c9c325ffe44f93a15e87305affd6099d40f3bc | bc3642538190a622265560be6d88096a18d8a842 | refs/heads/master | 2023-07-28T19:39:36.803623 | 2021-07-16T15:55:05 | 2021-07-16T15:55:05 | 355,786,209 | 1 | 3 | Apache-2.0 | 2021-06-30T01:32:47 | 2021-04-08T06:22:16 | C++ | UTF-8 | Python | false | false | 10,128 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import re
import sys
from timeit import default_timer
import numpy as np
from arg_parser import parse_args
from file_options import read_utterance_file, write_utterance_file
from openvino.inference_engine import ExecutableNetwork, IECore
def get_scale_factor(matrix: np.ndarray) -> float:
"""Get scale factor for quantization using utterance matrix"""
# Max to find scale factor
target_max = 16384
max_val = np.max(matrix)
if max_val == 0:
return 1.0
else:
return target_max / max_val
def infer_data(data: dict, exec_net: ExecutableNetwork, input_blobs: list, output_blobs: list) -> np.ndarray:
"""Do a synchronous matrix inference"""
matrix_shape = next(iter(data.values())).shape
result = {}
for blob_name in output_blobs:
batch_size, num_of_dims = exec_net.outputs[blob_name].shape
result[blob_name] = np.ndarray((matrix_shape[0], num_of_dims))
slice_begin = 0
slice_end = batch_size
while slice_begin < matrix_shape[0]:
vectors = {blob_name: data[blob_name][slice_begin:slice_end] for blob_name in input_blobs}
num_of_vectors = next(iter(vectors.values())).shape[0]
if num_of_vectors < batch_size:
temp = {blob_name: np.zeros((batch_size, vectors[blob_name].shape[1])) for blob_name in input_blobs}
for blob_name in input_blobs:
temp[blob_name][:num_of_vectors] = vectors[blob_name]
vectors = temp
vector_results = exec_net.infer(vectors)
for blob_name in output_blobs:
result[blob_name][slice_begin:slice_end] = vector_results[blob_name][:num_of_vectors]
slice_begin += batch_size
slice_end += batch_size
return result
def compare_with_reference(result: np.ndarray, reference: np.ndarray):
error_matrix = np.absolute(result - reference)
max_error = np.max(error_matrix)
sum_error = np.sum(error_matrix)
avg_error = sum_error / error_matrix.size
sum_square_error = np.sum(np.square(error_matrix))
avg_rms_error = np.sqrt(sum_square_error / error_matrix.size)
stdev_error = np.sqrt(sum_square_error / error_matrix.size - avg_error * avg_error)
log.info(f'max error: {max_error:.7f}')
log.info(f'avg error: {avg_error:.7f}')
log.info(f'avg rms error: {avg_rms_error:.7f}')
log.info(f'stdev error: {stdev_error:.7f}')
def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = parse_args()
# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
log.info('Creating Inference Engine')
ie = IECore()
# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation---------------
if args.model:
log.info(f'Reading the network: {args.model}')
# .xml and .bin files
net = ie.read_network(model=args.model)
# ---------------------------Step 3. Configure input & output----------------------------------------------------------
log.info('Configuring input and output blobs')
# Get names of input and output blobs
if args.input_layers:
input_blobs = re.split(', |,', args.input_layers)
else:
input_blobs = [next(iter(net.input_info))]
if args.output_layers:
output_name_port = [output.split(':') for output in re.split(', |,', args.output_layers)]
try:
output_name_port = [(blob_name, int(port)) for blob_name, port in output_name_port]
except ValueError:
log.error('Output Parameter does not have a port.')
sys.exit(-4)
net.add_outputs(output_name_port)
output_blobs = [blob_name for blob_name, port in output_name_port]
else:
output_blobs = [list(net.outputs.keys())[-1]]
# Set input and output precision manually
for blob_name in input_blobs:
net.input_info[blob_name].precision = 'FP32'
for blob_name in output_blobs:
net.outputs[blob_name].precision = 'FP32'
net.batch_size = args.batch_size
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
devices = args.device.replace('HETERO:', '').split(',')
plugin_config = {}
if 'GNA' in args.device:
gna_device_mode = devices[0] if '_' in devices[0] else 'GNA_AUTO'
devices[0] = 'GNA'
plugin_config['GNA_DEVICE_MODE'] = gna_device_mode
plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}'
# Get a GNA scale factor
if args.import_gna_model:
log.info(f'Using scale factor from the imported GNA model: {args.import_gna_model}')
else:
utterances = read_utterance_file(args.input.split(',')[0])
key = sorted(utterances)[0]
scale_factor = get_scale_factor(utterances[key])
log.info(f'Using scale factor of {scale_factor:.7f} calculated from first utterance.')
plugin_config['GNA_SCALE_FACTOR'] = str(scale_factor)
if args.export_embedded_gna_model:
plugin_config['GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model
plugin_config['GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration
device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[0]
log.info('Loading the model to the plugin')
if args.model:
exec_net = ie.load_network(net, device_str, plugin_config)
else:
exec_net = ie.import_network(args.import_gna_model, device_str, plugin_config)
input_blobs = [next(iter(exec_net.input_info))]
output_blobs = [list(exec_net.outputs.keys())[-1]]
if args.input:
input_files = re.split(', |,', args.input)
if len(input_blobs) != len(input_files):
log.error(f'Number of network inputs ({len(input_blobs)}) is not equal '
f'to number of ark files ({len(input_files)})')
sys.exit(-3)
if args.reference:
reference_files = re.split(', |,', args.reference)
if len(output_blobs) != len(reference_files):
log.error('The number of reference files is not equal to the number of network outputs.')
sys.exit(-5)
if args.output:
output_files = re.split(', |,', args.output)
if len(output_blobs) != len(output_files):
log.error('The number of output files is not equal to the number of network outputs.')
sys.exit(-6)
if args.export_gna_model:
log.info(f'Writing GNA Model to {args.export_gna_model}')
exec_net.export(args.export_gna_model)
return 0
if args.export_embedded_gna_model:
log.info(f'Exported GNA embedded model to file {args.export_embedded_gna_model}')
log.info(f'GNA embedded model export done for GNA generation {args.embedded_gna_configuration}')
return 0
# ---------------------------Step 5. Create infer request--------------------------------------------------------------
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
# instance which stores infer requests. So you already created Infer requests in the previous step.
# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
file_data = [read_utterance_file(file_name) for file_name in input_files]
input_data = {
utterance_name: {
input_blobs[i]: file_data[i][utterance_name] for i in range(len(input_blobs))
}
for utterance_name in file_data[0].keys()
}
if args.reference:
references = {output_blobs[i]: read_utterance_file(reference_files[i]) for i in range(len(output_blobs))}
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
log.info('Starting inference in synchronous mode')
results = {blob_name: {} for blob_name in output_blobs}
infer_times = []
for key in sorted(input_data):
start_infer_time = default_timer()
# Reset states between utterance inferences to remove a memory impact
for request in exec_net.requests:
for state in request.query_state():
state.reset()
result = infer_data(input_data[key], exec_net, input_blobs, output_blobs)
for blob_name in result.keys():
results[blob_name][key] = result[blob_name]
infer_times.append(default_timer() - start_infer_time)
# ---------------------------Step 8. Process output--------------------------------------------------------------------
for blob_name in output_blobs:
for i, key in enumerate(sorted(results[blob_name])):
log.info(f'Utterance {i} ({key})')
log.info(f'Output blob name: {blob_name}')
log.info(f'Frames in utterance: {results[blob_name][key].shape[0]}')
log.info(f'Total time in Infer (HW and SW): {infer_times[i] * 1000:.2f}ms')
if args.reference:
compare_with_reference(results[blob_name][key], references[blob_name][key])
log.info('')
log.info(f'Total sample time: {sum(infer_times) * 1000:.2f}ms')
if args.output:
for i, blob_name in enumerate(results):
write_utterance_file(output_files[i], results[blob_name])
log.info(f'File {output_files[i]} was created!')
# ----------------------------------------------------------------------------------------------------------------------
log.info('This sample is an API example, '
'for any performance measurements please use the dedicated benchmark_app tool\n')
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
aa24bd9aa8388674cc963220cfe43d39a2a3bb60 | d78ed7a8a1eef3e386d0ac88b9a96647d004e540 | /pitchmyob/apps/candidacy/api/serializers.py | cbe8d5b1d97c309abb62bf5ff81cdc1beee77930 | [] | no_license | yannistannier/django-pitchmyjob | d4cdcb39da72c28dc2867af4e3271a5d44fcf053 | 7b78feac01bb4b8b2ad8da8e82323f22fba478c4 | refs/heads/master | 2021-05-09T01:48:29.967322 | 2018-01-27T17:36:44 | 2018-01-27T17:36:44 | 119,186,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,142 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from rest_framework import serializers
from django.utils import timezone
from django.utils.translation import ugettext as _
from apps.applicant.api.serializers import ApplicantFullSerializer
from apps.authentication.api.serializers import UserSerializer
from apps.job.api.serializers import JobFullSerializer, ValidateJobSerializer
from ..models import Candidacy, CandidacyComment
class CandidacyProReadSerializer(serializers.ModelSerializer):
applicant = ApplicantFullSerializer()
class Meta:
model = Candidacy
fields = ('id', 'applicant', 'status', 'date_matching', 'date_like', 'date_request', 'date_video',
'date_decision')
class CandidacyProResumeSerializer(serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
class CandidacyProRequestSerializer(ValidateJobSerializer, serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
read_only_fields = ('id', 'status',)
def get_validated_data(self, validated_data):
validated_data.update({
'collaborator': self.context.get('request').user,
'status': Candidacy.REQUEST,
'date_request': timezone.now(),
})
return validated_data
def create(self, validated_data):
return super(CandidacyProRequestSerializer, self).create(self.get_validated_data(validated_data))
def update(self, instance, validated_data):
return super(CandidacyProRequestSerializer, self).update(instance, self.get_validated_data(validated_data))
class CandidacyProActionSerializer(serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
read_only_fields = ('job', 'applicant', 'status')
def update(self, instance, validated_data):
return super(CandidacyProActionSerializer, self).update(instance, {
'status': self.status_value,
'date_decision': timezone.now()
})
class CandidacyProApproveSerializer(CandidacyProActionSerializer):
status_value = Candidacy.SELECTED
class CandidacyProDisapproveSerializer(CandidacyProActionSerializer):
status_value = Candidacy.NOT_SELECTED
class CandidacyApplicantReadSerializer(serializers.ModelSerializer):
job = JobFullSerializer()
class Meta:
model = Candidacy
fields = ('id', 'job', 'status', 'date_matching', 'date_like', 'date_request', 'date_video', 'date_decision',
'matching_score')
class CandidacyApplicantActionSerializer(CandidacyProActionSerializer):
class Meta:
model = Candidacy
fields = ('job', 'applicant', 'status')
read_only_fields = ('job', 'applicant', 'status')
def update(self, instance, validated_data):
return super(CandidacyApplicantActionSerializer, self).update(instance, {
'status': self.status_value,
self.date_field: timezone.now()
})
class CandidacyApplicantLikeSerializer(CandidacyApplicantActionSerializer):
status_value = Candidacy.LIKE
date_field = 'date_like'
class CandidacyApplicantVideoSerializer(CandidacyApplicantActionSerializer):
status_value = Candidacy.VIDEO
date_field = 'date_video'
class CandidacyProCommentSerializer(serializers.ModelSerializer):
collaborator = serializers.PrimaryKeyRelatedField(read_only=True, default=serializers.CurrentUserDefault())
collaborator_extra = UserSerializer(source='collaborator', read_only=True)
class Meta:
model = CandidacyComment
fields = ('id', 'candidacy', 'collaborator', 'collaborator_extra', 'message', 'created')
read_only_fields = ('id',)
def validate_candidacy(self, value):
request = self.context.get('request')
if value.job.pro != request.user.pro:
raise serializers.ValidationError(_('La candidature ne correspond pas à une offre de votre structure'))
return value
| [
"[email protected]"
] | |
783268fec98dfa0e163bb100237680e80bc7922d | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-1129.py | a58276df6085978dee05b5aa8fd7af4fdbd39de1 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = $Literal
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1 | [
"[email protected]"
] | |
34cab7fee83819bf804417cf82f7a6de1598ece0 | 0141361f7c4d276f471ac278580479fa15bc4296 | /Stack/nextGreaterElement.py | 984c8067d73140262d8e6272db8305856be9046a | [] | no_license | tr1503/LeetCode | a7f2f1801c9424aa96d3cde497290ac1f7992f58 | 6d361cad2821248350f1d8432fdfef86895ca281 | refs/heads/master | 2021-06-24T19:03:08.681432 | 2020-10-09T23:53:22 | 2020-10-09T23:53:22 | 146,689,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | class Solution:
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
res = []
stack = []
m = {}
for num in nums2:
while len(stack) != 0 and stack[-1] < num:
m[stack[-1]] = num
stack.pop()
stack.append(num)
for num in nums1:
if num in m:
res.append(m[num])
else:
res.append(-1)
return res
| [
"[email protected]"
] | |
32c6be63566f504b8b09cc046016f6ba660d6254 | 1790d681f7d40e37e0173ff7dff241eaadc28d8c | /quokka/core/__init__.py | 84b2307ee25d41a554c8f210337c38d2fa1420cd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mysky528/quokka | 97e88b24799e1e4696210ee1ac841535341dca7a | d2c3c169f6b78cace154274747297f8e1dc56825 | refs/heads/development | 2023-05-26T17:14:02.348809 | 2017-02-07T15:43:00 | 2017-02-07T15:43:00 | 82,263,205 | 0 | 0 | NOASSERTION | 2023-05-24T02:47:10 | 2017-02-17T06:05:42 | Python | UTF-8 | Python | false | false | 127 | py | # -*- coding: utf-8 -*-
TEXT_FORMATS = (
("html", "html"),
("markdown", "markdown"),
("plaintext", "plaintext")
)
| [
"[email protected]"
] | |
b9ecce42b1202cc56ef15d028af8b46a2235c8d4 | 28badfbfa1e1325ffb9da62e92e0b524e747f8e1 | /1678. Goal Parser Interpretation/1678.py | e581adf94fa949417a99e5c3b317f360b14b2f86 | [] | no_license | saransappa/My-leetcode-solutions | b53fab3fc9bcd96ac0bc4bb03eb916820d17584c | 3c5c7a813489877021109b152b190456cdc34de6 | refs/heads/master | 2021-08-16T13:52:33.230832 | 2021-07-30T11:54:06 | 2021-07-30T11:54:06 | 217,449,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | class Solution:
def interpret(self, command: str) -> str:
s = command
s = s.replace("()",'o')
s = s.replace("(al)","al")
return s | [
"[email protected]"
] | |
7d30204d076af405d7b2c34ac0d756fd412717e6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04019/s057711170.py | ebbfa57436c2dfdd2cd7a6c08d43fac7bb08cc94 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | s = input();
st = set(s)
print("Yes" if st == set("WE") or st == set("NS") or st == set("SENW") else "No") | [
"[email protected]"
] | |
e2a0f865427c8f0fdbd23b1ba4230258a10d1af1 | 376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb | /graphics/py-ggplot/patches/patch-ggplot_utils.py | b76148150a1804a15c7f31595981c388dcbc5a11 | [] | no_license | NetBSD/pkgsrc | a0732c023519650ef821ab89c23ab6ab59e25bdb | d042034ec4896cc5b47ed6f2e5b8802d9bc5c556 | refs/heads/trunk | 2023-09-01T07:40:12.138283 | 2023-09-01T05:25:19 | 2023-09-01T05:25:19 | 88,439,572 | 321 | 138 | null | 2023-07-12T22:34:14 | 2017-04-16T20:04:15 | null | UTF-8 | Python | false | false | 357 | py | $NetBSD: patch-ggplot_utils.py,v 1.1 2019/06/17 20:27:16 adam Exp $
Fix for newer Pandas.
--- ggplot/utils.py.orig 2019-06-17 20:04:20.000000000 +0000
+++ ggplot/utils.py
@@ -78,7 +78,7 @@ def is_iterable(obj):
return False
date_types = (
- pd.tslib.Timestamp,
+ pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
| [
"[email protected]"
] | |
d3011297ed94f18b0fae79b598ac857fbfc0a18b | 0c2a1211cd4a0e104d9345d6524bd58f6d778d1d | /creer/validate.py | 8d8917c3e5c5791b241e6a4d9567dd64705b2962 | [
"MIT"
] | permissive | siggame/Creer | 91bc3d70d894fe1e46e1337b83a45d397f3336fe | ca52dd4082b3447a0e128ff3c0434b58cf9ef144 | refs/heads/master | 2022-03-17T06:26:15.515254 | 2022-02-23T01:52:16 | 2022-02-23T01:52:16 | 32,945,795 | 0 | 3 | MIT | 2020-10-17T16:46:46 | 2015-03-26T18:48:28 | Python | UTF-8 | Python | false | false | 8,700 | py | # this validates a prototype to ensure none of the data/types/setup will screw with an output template
# basically, this validates Creer input data after it has been parsed
import re
_primitives = [
'string',
'boolean',
'int',
'float',
'list',
'dictionary'
]
_dangerous_names = [
'true',
'false',
'if',
'else',
'continue',
'for',
'end',
'function',
'pass',
'assert',
'eval',
'break',
'import',
'from',
'catch',
'finally',
'null',
'while',
'double',
'float',
'goto',
'return'
]
_valid_types = []
_game_classes = []
def _check(obj, location, key, expected_type):
if type(obj) != dict:
raise Exception(location + " is not a dict to check if it contains " + key)
if not key in obj:
raise Exception("No '{}' in {}".format(key, location))
if type(obj[key]) != expected_type:
raise Exception("{}[{}] is not the expected type '{}'".format(location, key, expected_type))
def _validate_type(obj, location, type_key="type"):
_check(obj, location, type_key, dict)
type_obj = obj[type_key]
_check(type_obj, location + "'s type", "name", str)
name = type_obj['name']
if name == "list" or name == "dictionary":
_validate_type(type_obj, "{}.{}[valueType]".format(location, name), "valueType")
if name == "dictionary":
if not 'keyType' in type_obj:
raise Exception("No 'keyType' for type '{}' at '{}'".format(name, location))
_validate_type(type_obj, "{}.{}[keyType]".format(location, name), "keyType")
if not name in _valid_types:
raise Exception("Type named '{}' is not a primitive or custom class in {}.".format(name, location))
def _validate_description(obj, location):
_check(obj, location, "description", str)
desc = obj["description"]
for c in ['"', "\n", "\t", "\r"]:
if c in desc:
escaped = c.translate(str.maketrans({"-": r"\-", "]": r"\]", "\\": r"\\", "^": r"\^", "$": r"\$", "*": r"\*", ".": r"\."}))
raise Exception("{} description contains illegal character {}".format(location, escaped))
if desc[0].upper() != desc[0]:
raise Exception("Capitalize your doc string in " + location + "'s description")
if desc[-1] != ".":
raise Exception("End your doc strings as sentences with periods in " + location + "'s description")
_required = {
'type': _validate_type,
'description': _validate_description
}
def _check_required(obj, location, additional_reqs=None):
for key, call in _required.items():
call(obj, location)
if additional_reqs:
for key, expected_type in additional_reqs.items():
_check(obj, location, key, expected_type)
def _validate_name(key, obj, pascal=False):
base_err = '"{}" is not a valid name for {}. '.format(key, obj)
search_re = '([A-Z][a-z]+)+' if pascal else '([a-z]+([A-Za-z])?)+'
casing = 'PascalCase' if pascal else 'camelCase'
match = re.search(search_re, key)
if not match or match[0] != key:
raise Exception(base_err + 'Name must be in {}.'.format(casing))
if key.lower() in _primitives:
raise Exception(base_err + 'Too similar to primitive type.')
if key.lower() in _dangerous_names:
raise Exception(base_err + 'Name too similar to popular programming keywords for some clients.')
###############################################################################
## Public Function To Call ##
###############################################################################
def validate(prototype):
for primitive in _primitives:
_valid_types.append(primitive)
for key, value in prototype.items():
if key[0] != "_" and key != "Game" and key != "AI":
_validate_name(key, "custom Game Object", pascal=True)
_game_classes.append(key)
_valid_types.append(key)
for key, value in prototype.items():
if key.startswith("_"):
continue
if key is not "AI":
_validate_description(value, key)
_check(value, key, 'attributes', dict)
for attr_key, attr in value['attributes'].items():
_check_required(attr, key + "." + attr_key)
if key is not "Game" and key is not "GameObject":
if not "parentClasses" in value:
raise Exception(key + " expected to be game object sub class, but has no parent class(es)")
for parent_class in value['parentClasses']:
if not parent_class in _game_classes:
raise Exception("{} has invalid parentClass '{}'".format(key, parent_class))
for attr_name, attr in value['attributes'].items():
_validate_name(attr_name, 'an attribute in ' + key)
_check(value, key, 'functions', dict)
for funct_key, funct in value['functions'].items():
loc = key + "." + funct_key
_check(funct, loc, "description", str)
if "arguments" in funct:
_check(funct, loc, "arguments", list)
optional = None
for i, arg in enumerate(funct['arguments']):
arg_loc = "{}.arguments[{}]".format(loc, i)
_check_required(arg, arg_loc, {'name': str })
_validate_name(arg['name'], arg_loc)
arg_loc += " (" + arg['name'] + ")"
if 'default' in arg and arg['default'] != None:
default = arg['default']
optional = i
def_type = arg['type']['name']
type_of_default = type(default)
if def_type == "string":
if type_of_default != str:
raise Exception("{} default value should be a string, not a {}".format(arg_loc, type_of_default))
elif def_type == "int":
if type_of_default != int:
raise Exception("{} default value should be an integer, not a {}".format(arg_loc, type_of_default))
elif def_type == "float":
if type_of_default != int and type_of_default != float:
raise Exception("{} default value should be a float, not a {}".format(arg_loc, type_of_default))
elif def_type == "boolean":
if type_of_default != bool:
raise Exception("{} default value should be a bool, not a {}".format(arg_loc, type_of_default))
else: # dict, list, or GameObject
if type_of_default != type(None):
raise Exception("{} default value must be null for dictionaries/lists/GameObjects, not a {}".format(arg_loc, type_of_default))
if optional != None and not 'default' in arg:
raise Exception("{} has no default to make it optional, by prior index {} was optional. Optional args must all be at the end.".format(arg_loc, i))
if 'returns' in funct and funct['returns'] != None:
_check_required(funct['returns'], loc + ".returns")
if 'invalidValue' not in funct['returns']:
raise Exception("{} requires an invalidValue for the return".format(loc))
type_of_invalidValue = type(funct['returns']['invalidValue'])
expected_type_name_of_invalidValue = funct['returns']['type']['name']
if expected_type_name_of_invalidValue == 'string' and type_of_invalidValue != str:
raise Exception("{}.invalidValue is not of expected string type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'boolean' and type_of_invalidValue != bool:
raise Exception("{}.invalidValue is not of expected boolean type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'int' and type_of_invalidValue != int:
raise Exception("{}.invalidValue is not of expected int type (was {})".format(loc, type_of_invalidValue))
if expected_type_name_of_invalidValue == 'float' and type_of_invalidValue != int and type_of_invalidValue != float:
raise Exception("{}.invalidValue is not of expected int type (was {})".format(loc, type_of_invalidValue))
| [
"[email protected]"
] | |
4cf9668b29f56dbff467585ac5b1cf3e5e6c1448 | 3a055205aa06db55ed1bf508f74b606ec5760e6e | /books/introducing-python-master/dev/style1.py | 1361f86cdaa58b6b367fbe797a924672477dcff9 | [] | no_license | OpRay/NCHC_PythonReading | 5561701afdd391e868b1d84ca4486ad387908c60 | 3e6ff5d654e13febdee8a9450ba45546903f2f53 | refs/heads/master | 2021-01-18T21:38:55.936413 | 2016-06-14T13:16:26 | 2016-06-14T13:16:26 | 54,468,540 | 1 | 2 | null | 2019-08-04T13:33:23 | 2016-03-22T11:07:38 | Python | UTF-8 | Python | false | false | 39 | py | a = 1
b = 2
print(a)
print(b)
print(c)
| [
"[email protected]"
] | |
a1d79cca2c5104fb9206731a66b2537a7d285f19 | b92c39c8498e0c6579a65430e63b7db927d01aea | /python/zookeeper/t.py | 8e6891b9cc06ac9523ab041255547bfa5f4cfd5d | [] | no_license | szqh97/test | 6ac15ad54f6d36e1d0efd50cbef3b622d374bb29 | ba76c6ad082e2763554bdce3f1b33fea150865dc | refs/heads/master | 2020-04-06T05:40:55.776424 | 2019-01-14T06:37:38 | 2019-01-14T06:37:38 | 14,772,703 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!/usr/bin/env python
import os
import logging
import sys
log = logging.getLogger('test-log')
formatter = logging.Formatter('%(threadName)s %(asctime)s %(name)-15s %(levelname)-8s: %(message)s\n')
file_handler = logging.FileHandler('test.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stderr)
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.info('sss')
| [
"[email protected]"
] | |
1cd5da762984aa9eec883c2279dafda8f823ba8d | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2N/2N-3FN_MD_NVT_rerun/set.py | f0fb68d8e34600e77687fd0bb497d33d7acb1086 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2N/MD/ti_one-step/2N_3FN/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2N-3FN_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
07b00fdfdd9a5990d21bb541c473a757afb40f7e | 1816378da612c7db376934b033e4fd64951338b6 | /gui/services/migrations/0034_auto__add_unique_iscsitargetextent_iscsi_target_extent_name.py | 1a5c794031463cb1ff768dfb5eeb60e9e5afdba3 | [] | no_license | quater/freenas-9.2-xen | 46517a7a23546764347d3c91108c70a8bd648ec6 | 96e580055fa97575f0a0cb23a72495860467bcfb | refs/heads/master | 2021-01-16T22:21:38.781962 | 2014-02-07T05:59:13 | 2014-02-07T05:59:13 | 16,609,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,215 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for d in orm.iSCSITargetExtent.objects.all():
name = d.iscsi_target_extent_name
while True:
exs = orm.iSCSITargetExtent.objects.filter(iscsi_target_extent_name=name).exclude(pk=d.id)
if exs.count() == 0:
break
name = name + "0"
if name != d.iscsi_target_extent_name:
d.iscsi_target_extent_name = name
d.save()
# Adding unique constraint on 'iSCSITargetExtent', fields ['iscsi_target_extent_name']
db.create_unique('services_iscsitargetextent', ['iscsi_target_extent_name'])
def backwards(self, orm):
# Removing unique constraint on 'iSCSITargetExtent', fields ['iscsi_target_extent_name']
db.delete_unique('services_iscsitargetextent', ['iscsi_target_extent_name'])
models = {
'services.activedirectory': {
'Meta': {'object_name': 'ActiveDirectory'},
'ad_adminname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_adminpw': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_dcname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_domainname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_netbiosname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_workgroup': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'services.afp': {
'Meta': {'object_name': 'AFP'},
'afp_srv_ddp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'afp_srv_guest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'afp_srv_guest_user': ('freenasUI.freeadmin.models.UserField', [], {'default': "'nobody'", 'max_length': '120'}),
'afp_srv_local': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'afp_srv_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'services.cifs': {
'Meta': {'object_name': 'CIFS'},
'cifs_srv_aio_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_aio_rs': ('django.db.models.fields.IntegerField', [], {'default': "'1'", 'max_length': '120'}),
'cifs_srv_aio_ws': ('django.db.models.fields.IntegerField', [], {'default': "'1'", 'max_length': '120'}),
'cifs_srv_authmodel': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'cifs_srv_description': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_dirmask': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_dosattr': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_doscharset': ('django.db.models.fields.CharField', [], {'default': "'CP437'", 'max_length': '120'}),
'cifs_srv_easupport': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_filemask': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_guest': ('freenasUI.freeadmin.models.UserField', [], {'default': "'nobody'", 'max_length': '120'}),
'cifs_srv_guestok': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_guestonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_homedir': ('freenasUI.freeadmin.models.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cifs_srv_homedir_browseable_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_homedir_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_largerw': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_localmaster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_loglevel': ('django.db.models.fields.CharField', [], {'default': "'Minimum'", 'max_length': '120'}),
'cifs_srv_netbiosname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'cifs_srv_nullpw': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_sendfile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_smb_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_timeserver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_unixcharset': ('django.db.models.fields.CharField', [], {'default': "'UTF-8'", 'max_length': '120'}),
'cifs_srv_workgroup': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'services.dynamicdns': {
'Meta': {'object_name': 'DynamicDNS'},
'ddns_domain': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_fupdateperiod': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ddns_password': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ddns_provider': ('django.db.models.fields.CharField', [], {'default': "'dyndns'", 'max_length': '120'}),
'ddns_updateperiod': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_username': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'services.ftp': {
'Meta': {'object_name': 'FTP'},
'ftp_anonpath': ('freenasUI.freeadmin.models.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ftp_anonuserbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_anonuserdlbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_banner': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ftp_clients': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_defaultroot': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_dirmask': ('django.db.models.fields.CharField', [], {'default': "'077'", 'max_length': '3'}),
'ftp_filemask': ('django.db.models.fields.CharField', [], {'default': "'077'", 'max_length': '3'}),
'ftp_fxp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_ident': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_ipconnections': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_localuserbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_localuserdlbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_loginattempt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_masqaddress': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'blank': 'True'}),
'ftp_onlyanonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_onlylocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ftp_passiveportsmax': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_passiveportsmin': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '21'}),
'ftp_resume': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_reversedns': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_rootlogin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_ssltls': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'services.iscsitarget': {
'Meta': {'object_name': 'iSCSITarget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_alias': ('django.db.models.fields.CharField', [], {'max_length': '120', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'iscsi_target_authgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_target_authtype': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_target_flags': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '120'}),
'iscsi_target_initialdigest': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_target_initiatorgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.iSCSITargetAuthorizedInitiator']"}),
'iscsi_target_logical_blocksize': ('django.db.models.fields.IntegerField', [], {'default': '512', 'max_length': '3'}),
'iscsi_target_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'}),
'iscsi_target_portalgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.iSCSITargetPortal']"}),
'iscsi_target_queue_depth': ('django.db.models.fields.IntegerField', [], {'default': '32', 'max_length': '3'}),
'iscsi_target_serial': ('django.db.models.fields.CharField', [], {'default': "'10000001'", 'max_length': '16'}),
'iscsi_target_type': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.iscsitargetauthcredential': {
'Meta': {'object_name': 'iSCSITargetAuthCredential'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_auth_peersecret': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_target_auth_peeruser': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_auth_secret': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_target_auth_tag': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'iscsi_target_auth_user': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.iscsitargetauthorizedinitiator': {
'Meta': {'object_name': 'iSCSITargetAuthorizedInitiator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_initiator_auth_network': ('django.db.models.fields.TextField', [], {'default': "'ALL'", 'max_length': '2048'}),
'iscsi_target_initiator_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_initiator_initiators': ('django.db.models.fields.TextField', [], {'default': "'ALL'", 'max_length': '2048'}),
'iscsi_target_initiator_tag': ('django.db.models.fields.IntegerField', [], {'default': '1', 'unique': 'True'})
},
'services.iscsitargetextent': {
'Meta': {'object_name': 'iSCSITargetExtent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_extent_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_extent_filesize': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '120'}),
'iscsi_target_extent_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'}),
'iscsi_target_extent_path': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_target_extent_type': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.iscsitargetglobalconfiguration': {
'Meta': {'object_name': 'iSCSITargetGlobalConfiguration'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_basename': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_defaultt2r': ('django.db.models.fields.IntegerField', [], {'default': '60', 'max_length': '120'}),
'iscsi_defaultt2w': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '120'}),
'iscsi_discoveryauthgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_discoveryauthmethod': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_firstburst': ('django.db.models.fields.IntegerField', [], {'default': '65536', 'max_length': '120'}),
'iscsi_iotimeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'max_length': '120'}),
'iscsi_luc_authgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_luc_authmethod': ('django.db.models.fields.CharField', [], {'default': "'chap'", 'max_length': '120', 'blank': 'True'}),
'iscsi_luc_authnetwork': ('django.db.models.fields.IPAddressField', [], {'default': "'255.255.255.0'", 'max_length': '15', 'blank': 'True'}),
'iscsi_lucip': ('django.db.models.fields.IPAddressField', [], {'default': "'127.0.0.1'", 'max_length': '15', 'blank': 'True'}),
'iscsi_lucport': ('django.db.models.fields.IntegerField', [], {'default': '3261', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_maxburst': ('django.db.models.fields.IntegerField', [], {'default': '262144', 'max_length': '120'}),
'iscsi_maxconnect': ('django.db.models.fields.IntegerField', [], {'default': '8', 'max_length': '120'}),
'iscsi_maxoutstandingr2t': ('django.db.models.fields.IntegerField', [], {'default': '16', 'max_length': '120'}),
'iscsi_maxrecdata': ('django.db.models.fields.IntegerField', [], {'default': '262144', 'max_length': '120'}),
'iscsi_maxsesh': ('django.db.models.fields.IntegerField', [], {'default': '16', 'max_length': '120'}),
'iscsi_nopinint': ('django.db.models.fields.IntegerField', [], {'default': '20', 'max_length': '120'}),
'iscsi_r2t': ('django.db.models.fields.IntegerField', [], {'default': '32', 'max_length': '120'}),
'iscsi_toggleluc': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'services.iscsitargetportal': {
'Meta': {'object_name': 'iSCSITargetPortal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_portal_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_portal_listen': ('django.db.models.fields.TextField', [], {'default': "'0.0.0.0:3260'", 'max_length': '120'}),
'iscsi_target_portal_tag': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '120'})
},
'services.iscsitargettoextent': {
'Meta': {'object_name': 'iSCSITargetToExtent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_extent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.iSCSITargetExtent']", 'unique': 'True'}),
'iscsi_target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['services.iSCSITarget']"})
},
'services.ldap': {
'Meta': {'object_name': 'LDAP'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ldap_anonbind': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ldap_basedn': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_groupsuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_hostname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_machinesuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_passwordsuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_pwencryption': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ldap_rootbasedn': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_rootbindpw': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_ssl': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_tls_cacertfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ldap_usersuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
},
'services.nfs': {
'Meta': {'object_name': 'NFS'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nfs_srv_async': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nfs_srv_servers': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.rsyncd': {
'Meta': {'object_name': 'Rsyncd'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rsyncd_auxiliary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncd_port': ('django.db.models.fields.IntegerField', [], {'default': '873'})
},
'services.rsyncmod': {
'Meta': {'object_name': 'RsyncMod'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rsyncmod_auxiliary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_comment': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'rsyncmod_group': ('freenasUI.freeadmin.models.GroupField', [], {'default': "'nobody'", 'max_length': '120', 'blank': 'True'}),
'rsyncmod_hostsallow': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_hostsdeny': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_maxconn': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rsyncmod_mode': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '120'}),
'rsyncmod_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'rsyncmod_path': ('freenasUI.freeadmin.models.PathField', [], {'max_length': '255'}),
'rsyncmod_user': ('freenasUI.freeadmin.models.UserField', [], {'default': "'nobody'", 'max_length': '120', 'blank': 'True'})
},
'services.services': {
'Meta': {'object_name': 'services'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'srv_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'srv_service': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.smart': {
'Meta': {'object_name': 'SMART'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smart_critical': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_difference': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'smart_informal': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_interval': ('django.db.models.fields.IntegerField', [], {'default': '30'}),
'smart_powermode': ('django.db.models.fields.CharField', [], {'default': "'never'", 'max_length': '60'})
},
'services.snmp': {
'Meta': {'object_name': 'SNMP'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'snmp_contact': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'snmp_location': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'snmp_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'snmp_traps': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'services.ssh': {
'Meta': {'object_name': 'SSH'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ssh_compression': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssh_host_dsa_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_dsa_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_rsa_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_rsa_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ssh_passwordauth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssh_privatekey': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'ssh_rootlogin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssh_tcpfwd': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssh_tcpport': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'services.tftp': {
'Meta': {'object_name': 'TFTP'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tftp_directory': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'tftp_newfiles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tftp_options': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'tftp_port': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'tftp_umask': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'tftp_username': ('freenasUI.freeadmin.models.UserField', [], {'default': "'nobody'", 'max_length': '120'})
},
'services.ups': {
'Meta': {'object_name': 'UPS'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ups_description': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_driver': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_emailnotify': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ups_identifier': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ups_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ups_port': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_rmonitor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ups_shutdown': ('django.db.models.fields.CharField', [], {'default': "'batt'", 'max_length': '120'}),
'ups_shutdowntimer': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ups_subject': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ups_toemail': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
}
}
complete_apps = ['services']
| [
"[email protected]"
] | |
d34fbe1e3e13aba5af7f4311621eadacb0303484 | 92b3ade5b69889b806f37c440ff7bbe9ad1e9ca9 | /mysite/project/apps/delete_app/apps.py | 98a79918bf2a624ba19a7809a99856fa1bfea326 | [] | no_license | BorisovDima/WebProject | 4b468ed07555140890165954710185612d629ec9 | e84e5e5d83028412bdfb8cb93c8ec0fde5c54980 | refs/heads/master | 2022-12-10T17:17:56.159721 | 2019-02-22T02:42:53 | 2019-02-22T02:42:53 | 160,443,451 | 0 | 0 | null | 2022-11-22T03:08:53 | 2018-12-05T01:43:46 | Python | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class DeleteAppConfig(AppConfig):
name = 'delete_app'
| [
"[email protected]"
] | |
adb5db04bf882b17a899d77604e756bf0d9d820e | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_206/run_cfg.py | b9f6e4a67763432bc61a0361d65f5f9d9897e0c4 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_983.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_984.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_985.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_986.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_987.root')
)
| [
"[email protected]"
] | |
e5ef76fee0173c91abd8ae221d0dbccf96b57853 | 5608a9cd3bec8cab1c3f9d7f42896107b78593cc | /tests/unit/cfngin/hooks/test_iam.py | 1a5cfdf027e579f17585b5d71d8e6463801586c8 | [
"Apache-2.0"
] | permissive | troyready/runway | cdee6d94f42173c8aa0bd414620b68be36a510aa | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | refs/heads/master | 2021-06-18T16:05:30.712211 | 2021-01-14T01:44:32 | 2021-01-14T01:44:32 | 151,314,626 | 0 | 0 | Apache-2.0 | 2018-10-02T19:55:09 | 2018-10-02T19:55:08 | null | UTF-8 | Python | false | false | 2,784 | py | """Tests for runway.cfngin.hooks.iam."""
import unittest
import boto3
from awacs.helpers.trust import get_ecs_assumerole_policy
from botocore.exceptions import ClientError
from moto import mock_iam
from runway.cfngin.hooks.iam import _get_cert_arn_from_response, create_ecs_service_role
from ..factories import mock_context, mock_provider
REGION = "us-east-1"
# No test for stacker.hooks.iam.ensure_server_cert_exists until
# updated version of moto is imported
# (https://github.com/spulec/moto/pull/679) merged
class TestIAMHooks(unittest.TestCase):
"""Tests for runway.cfngin.hooks.iam."""
def setUp(self):
"""Run before tests."""
self.context = mock_context(namespace="fake")
self.provider = mock_provider(region=REGION)
def test_get_cert_arn_from_response(self):
"""Test get cert arn from response."""
arn = "fake-arn"
# Creation response
response = {"ServerCertificateMetadata": {"Arn": arn}}
self.assertEqual(_get_cert_arn_from_response(response), arn)
# Existing cert response
response = {"ServerCertificate": response}
self.assertEqual(_get_cert_arn_from_response(response), arn)
def test_create_service_role(self):
"""Test create service role."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
with self.assertRaises(ClientError):
client.get_role(RoleName=role_name)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
def test_create_service_role_already_exists(self):
"""Test create service role already exists."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json(),
)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
| [
"[email protected]"
] | |
197d3c7c153009b1ecc3a674cd4dd32cc4606a41 | c4702d1a06640555829b367852138cc93ba4a161 | /dym_proses_stnk/models/dym_birojasa_withholding_tax.py | 01e3f2c50be8ac2b5ed67a4ef92c69f5da9b1cfa | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,652 | py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import Warning
class dym_proses_birojasa(models.Model):
_inherit = "dym.proses.birojasa"
withholding_ids = fields.One2many(
'dym.proses.birojasa.withholding',
'birojasa_id',
string='Withholdings',
required=False,
readonly=True,
states={'draft': [('readonly', False)]}
)
withholdings_amount = fields.Float(
'PPh',
# waiting for a PR 9081 to fix computed fields translations
# _('Withholdings Amount'),
help='Importe a ser Pagado con Retenciones',
# help=_('Amount Paid With Withholdings'),
compute='_get_withholdings_amount',
digits=dp.get_precision('Account'),
)
total_net = fields.Float(
'Total Tagihan (Net)',
# waiting for a PR 9081 to fix computed fields translations
# _('Withholdings Amount'),
help='Importe a ser Pagado con Retenciones',
# help=_('Amount Paid With Withholdings'),
compute='_get_total_net',
digits=dp.get_precision('Account'),
)
amountview = fields.Float('Total',
help='Total amount view',
digits=dp.get_precision('Account'),
)
# tax_base = fields.Float('Tax Base',compute=_compute_amount)
@api.onchange('net_amount')
def onchange_amount_total_view(self):
self.amountview = self.net_amount
@api.one
@api.depends(
'withholding_ids',
)
def _get_withholdings_amount(self):
self.withholdings_amount = self.get_withholdings_amount()[self.id]
# We force the update of paylines and amount
# self._get_paylines_amount()
# self._get_amount(inverse=True)
@api.multi
def get_withholdings_amount(self):
res = {}
for birojasa in self:
withholdings_amount = sum(
x.amount for x in birojasa.withholding_ids)
res[birojasa.id] = withholdings_amount
return res
@api.one
@api.depends(
'withholdings_amount',
)
def _get_total_net(self):
self.total_net = self.get_total_net()[self.id]
# We force the update of paylines and amount
# self._get_paylines_amount()
# self._get_amount(inverse=True)
@api.multi
def get_total_net(self):
res = {}
for birojasa in self:
total_net = birojasa.amount_total - birojasa.withholdings_amount
res[birojasa.id] = total_net
return res
@api.multi
def get_paylines_amount(self):
res = super(dym_proses_birojasa, self).get_paylines_amount()
for birojasa in self:
withholdings_amount = birojasa.get_withholdings_amount()[birojasa.id]
res[birojasa.id] = res[birojasa.id] + withholdings_amount
return res
@api.model
def paylines_moves_create(
self, birojasa, move_id, company_currency, current_currency):
paylines_total = super(dym_proses_birojasa, self).paylines_moves_create(
birojasa, move_id, company_currency, current_currency)
withholding_total = self.create_withholding_lines(
birojasa, move_id, company_currency, current_currency)
return paylines_total + withholding_total
# TODO ver si en vez de usar api.model usamos self y no pasamos el voucher
# TODO ver que todo esto solo funcione en payment y receipts y no en sale y purchase
@api.model
def create_withholding_lines(
self, birojasa, move_id, company_currency, current_currency):
move_lines = self.env['account.move.line']
withholding_total = 0.0
for line in birojasa.withholding_ids:
name = '%s: %s' % (
line.tax_withholding_id.description, line.internal_number)
if line.name:
name += ' (%s)' % line.name
payment_date = False
amount = line.amount
if amount >= 0:
account = line.tax_withholding_id.account_id
else:
account = line.tax_withholding_id.ref_account_id
partner = self.env['res.partner'].search([('kas_negara','=',True)])
move_line = move_lines.create(
self.prepare_move_line(
voucher, amount, move_id, name, company_currency,
current_currency, payment_date, account, partner)
)
line.move_line_id = move_line.id
if voucher.line_dr_ids and voucher.type == 'payment':
move_line.update({
'analytic_account_id':voucher.line_dr_ids[0].move_line_id.analytic_account_id.id,
'branch_id':voucher.line_dr_ids[0].move_line_id.branch_id.id,
'division':voucher.line_dr_ids[0].move_line_id.division,
})
elif voucher.line_cr_ids and voucher.type == 'receipt':
move_line.update({
'analytic_account_id':voucher.line_cr_ids[0].move_line_id.analytic_account_id.id,
'branch_id':voucher.line_cr_ids[0].move_line_id.branch_id.id,
'division':voucher.line_cr_ids[0].move_line_id.division,
})
elif voucher.line_cr_ids and voucher.type == 'receipt':
move_line.update({
'analytic_account_id':voucher.line_cr_ids[0].move_line_id.analytic_account_id.id,
'branch_id':voucher.line_cr_ids[0].move_line_id.branch_id.id,
'division':voucher.line_cr_ids[0].move_line_id.division,
})
move_line.update({
'tax_code_id': line.tax_withholding_id.tax_code_id.id,
'tax_amount': amount,
})
withholding_total += move_line.debit - move_line.credit
return withholding_total
class dym_proses_birojasa_withholding(models.Model):
_name = "dym.proses.birojasa.withholding"
_rec_name = "display_name"
_description = "Account Withholding Birojasa"
birojasa_id = fields.Many2one(
'dym.proses.birojasa',
'Birojasa',
required=True,
ondelete='cascade',
)
display_name = fields.Char(
compute='get_display_name'
)
name = fields.Char(
'Nomor Bukti Potong',
)
internal_number = fields.Char(
'Internal Number',
required=True,
default='/',
readonly=True,
states={'draft': [('readonly', False)]},
)
date = fields.Date(
'Date',
required=True,
default=fields.Date.context_today,
)
state = fields.Selection(
related='birojasa_id.state',
default='draft',
)
tax_withholding_id = fields.Many2one(
'account.tax.withholding',
string='Jenis PPh',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
)
comment = fields.Text(
'Additional Information',
)
amount = fields.Float(
'Jumlah PPh',
required=True,
digits=dp.get_precision('Account'),
readonly=True,
states={'draft': [('readonly', False)]},
)
tax_base = fields.Float(
'Tax Base',
required=True,
digits=dp.get_precision('Account'),
states={'draft': [('readonly', False)]},
# default=_get_tax_base
)
move_line_id = fields.Many2one(
'account.move.line',
'Journal Item',
readonly=True,
)
# Related fields
partner_id = fields.Many2one(
related='birojasa_id.partner_id',
store=True, readonly=True,
)
company_id = fields.Many2one(
'res.company',
related='birojasa_id.branch_id.company_id',
string='Company', store=True, readonly=True
)
type = fields.Selection(
related='birojasa_id.type',
string='Tipe',
# string='Type',
# waiting for a PR 9081 to fix computed fields translations
readonly=True,
)
# _sql_constraints = [
# ('internal_number_uniq', 'unique(internal_number, tax_withholding_id)',
# 'Internal Number must be unique per Tax Withholding!'),
# ]
# @api.one
# @api.depends('birojasa_id')
# def get_tax_base(self):
# self.tax_base = self.birojasa_id.net_amount
def _unit_compute_inv(self, cr, uid, taxes, price_unit):
res = []
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if tax.type=='percent':
tax_parent_tot += tax.amount
for tax in taxes:
if tax.type=='fixed':
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
todo = 1
values = {
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_id.id,
'account_paid_id': tax.ref_account_id.id,
'account_analytic_collected_id': tax.account_analytic_id.id,
'account_analytic_paid_id': tax.ref_account_analytic_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(values)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv(self, cr, uid, taxes, price_unit, precision=None):
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute_inv(cr, uid, taxes, price_unit)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r['balance'], precision) - total
else:
r['amount'] = round(r['amount'], precision)
total += r['amount']
return res
def _unit_compute(self, cr, uid, taxes, price_unit):
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
if tax.type=='percent':
amount = cur_price_unit * tax.amount
elif tax.type=='fixed':
amount = tax.amount
data = {
'id':tax.id,
'name': tax.name,
'amount': amount,
'account_collected_id':tax.account_id.id,
'account_paid_id':tax.ref_account_id.id,
'account_analytic_collected_id': tax.account_analytic_id.id,
'account_analytic_paid_id': tax.ref_account_analytic_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(data)
return res
def _compute(self, cr, uid, taxes, price_unit, precision=None):
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute(cr, uid, taxes, price_unit)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0), precision) - total
else:
r['amount'] = round(r.get('amount', 0.0), precision)
total += r['amount']
return res
@api.onchange('tax_base','tax_withholding_id')
def get_tax_amount(self):
amount = 0
# if self.tax_base == 0:
# self.tax_base = self.birojasa_id.net_amount
# for x in self.birojasa_id.line_dr_ids:
# if x.move_line_id.move_id:
# model = x.move_line_id.move_id.model
# res_id = x.move_line_id.move_id.transaction_id
# # if model == 'dym.proses.birojasa':
# # for vou in self.env[model].browse([res_id]):
# # for y in vou.line_dr_ids:
# # self.tax_base = y.amount
# # elif model == 'account.invoice':
# # for inv in self.env[model].browse([res_id]):
# # self.tax_base = inv.amount_untaxed
# else:
# self.tax_base = x.amount
if self.tax_base and self.tax_withholding_id and self.tax_withholding_id.type != 'none':
precision = self.env['decimal.precision'].precision_get('Account')
tax_compute_precision = precision
if self.tax_withholding_id.company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(self.tax_base, precision)
tin = []
tex = []
if not self.tax_withholding_id.price_include:
tex.append(self.tax_withholding_id)
else:
tin.append(self.tax_withholding_id)
tin = self.compute_inv(tin, self.tax_base, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex
except:
pass
tex = self._compute(tex, totlex_qty, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
res = {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
amount = sum(t['amount'] for t in res['taxes'])
self.amount = amount
@api.one
@api.depends('name', 'internal_number')
def get_display_name(self):
display_name = self.internal_number
if self.name:
display_name += ' (%s)' % self.name
self.display_name = display_name
@api.one
@api.constrains('tax_withholding_id', 'birojasa_id')
def check_tax_withholding(self):
if self.birojasa_id.branch_id.company_id != self.tax_withholding_id.company_id:
raise Warning(_(
'Voucher and Tax Withholding must belong to the same company'))
@api.model
def create(self, vals):
if vals.get('internal_number', '/') == '/':
tax_withholding = self.tax_withholding_id.browse(
vals.get('tax_withholding_id'))
if not tax_withholding:
raise Warning(_('Tax Withholding is Required!'))
sequence = tax_withholding.sequence_id
vals['internal_number'] = sequence.next_by_id(sequence.id) or '/'
kas_negara_id = self.env['res.partner'].search([('kas_negara','=',True)])
if not kas_negara_id:
raise Warning(_(
'Kas Negara belum ditentukan, silahkan centang Kas Negara di form Partner'))
vals['partner_id'] = kas_negara_id.id
return super(dym_proses_birojasa_withholding, self).create(vals)
# class dym_harga_bbn(models.Model):
# _inherit="dym.harga.bbn"
# partner_id = fields.Many2one('res.partner','Biro Jasa',domain="[('partner_type','=','Pihak_ke_3')]")
| [
"[email protected]"
] | |
29e29394f428b36553cae90bdd87ff1634203acf | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/numexpr-2.6.1-np111py27_nomkl_0/lib/python2.7/site-packages/numexpr/version.py | 489f86851cdbbbb05f7542c3461194c22d116ca2 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 366 | py | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
version = '2.6.1'
| [
"[email protected]"
] | |
2d2d2b7570f4b96dd7bebf921993bfae435fa0e2 | b1d1797241a12fb318c0f841fceebece4e34078b | /{{cookiecutter.project_slug}}/src/{{cookiecutter.package_name}}/application_config.py | b7f5033eacd9881a72f8b9712e4aaac0cb2767e7 | [
"MIT"
] | permissive | ITISFoundation/cookiecutter-simcore-pyservice | 407a3012a20787bf48b0360498c8c45e5b8425d0 | 8e7cb6729f3ec663b6715cca54396075f1e4593e | refs/heads/master | 2022-08-24T04:50:58.561916 | 2022-06-10T14:28:53 | 2022-06-10T14:28:53 | 151,701,223 | 0 | 3 | MIT | 2022-06-10T14:29:52 | 2018-10-05T09:52:29 | Python | UTF-8 | Python | false | false | 2,089 | py | """ app's configuration
This module loads the schema defined by every subsystem and injects it in the
application's configuration scheams
It was designed in a similar fashion to the setup protocol of the application
where every subsystem is imported and queried in a specific order. The application
depends on the subsystem and not the other way around.
The app configuration is created before the application instance exists.
{# TODO: can this be done using declarative programming?? #}
{# TODO: add more strict checks with re #}
{# TODO: add support for versioning.
- check shema fits version
- parse/format version in schema
#}
"""
import logging
import trafaret as T
from servicelib import application_keys # pylint:disable=unused-import
from servicelib.application_keys import APP_CONFIG_KEY
from .resources import resources
from . import rest_config
logger = logging.getLogger(__name__)
def create_schema():
"""
Build schema for the configuration's file
by aggregating all the subsystem configurations
"""
schema = T.Dict({
"version": T.String(),
"main": T.Dict({
"host": T.IP,
"port": T.Int(),
"log_level": T.Enum(*logging._nameToLevel.keys()), # pylint: disable=protected-access
"enabled_development_mode": T.Bool(),
}),
rest_config.CONFIG_SECTION_NAME: rest_config.schema,
## Add here more configurations
})
section_names = [k.name for k in schema.keys]
assert len(section_names) == len(set(section_names)), "Found repeated section names in %s" % section_names
return schema
# app[APP_CONFIG_KEY] = key for config object
APP_CONFIG_KEY = APP_CONFIG_KEY # pylint: disable=self-assigning-variable,bad-option-value
# config/${CLI_DEFAULT_CONFIGFILE}
CLI_DEFAULT_CONFIGFILE = 'config-container-prod.yml'
# schema for app config's startup file
app_schema = create_schema()
assert resources.exists( 'config/' + CLI_DEFAULT_CONFIGFILE ), \
"'config/%s' does not exist" % CLI_DEFAULT_CONFIGFILE
| [
"[email protected]"
] | |
2dad42052ebb90a57b78dfa90f03fb1439d28cc0 | 7427e92cc5205276a1f4b7d6244b67fc8b31a976 | /reverseInParentheses.py | 74eb9eb7934a9289f6b2c8a924171b95d9b34192 | [] | no_license | newfull5/CodeSignal | 1d10837d33fa4126d6a59b76ee2b99b003043887 | ad24711a30b3ccd252247d0eee6cf4a0b4d96e1e | refs/heads/master | 2021-07-09T09:37:19.648588 | 2020-12-03T07:25:55 | 2020-12-03T07:25:55 | 216,835,128 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | def reverseInParentheses(inputString):
if inputString[0] == '(' or inputString[-1]:
return inputString[1:len(inputString)-1][::-1]
left = ''
right = ''
center = ''
answer = ''
centence = ''
for i in range(0, len(inputString)):
if inputString[i] == '(':
left += inputString[:i]
center += inputString[i+1:]
for i in range(1, len(inputString)):
if inputString[-i] == ')':
right += inputString[-i+1:]
answer += center[:(-i)]
centence = left + answer[::-1] + right
return centence
| [
"[email protected]"
] | |
61812c0f57e61aa84dc418b44729db8b264f680a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /uWW8cZymSkrREdDpQ_16.py | 95cfdd8b10b76c690ed082e36332618b4dcb9bf5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
def sums_up(lst):
FinalPairList = list()
tmpList = list()
for num in lst:
for i in tmpList:
if (i+num) == 8:
pair= []
if i<num:
pair.append(i)
pair.append(num)
else:
pair.append(num)
pair.append(i)
FinalPairList.append(pair)
tmpList.append(num)
return{'pairs':FinalPairList }
| [
"[email protected]"
] | |
c7bc4f565ecd5059b7f2fbd110ed1c6137e32f9c | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21520.py | 0c9970195ee6b358af5254485788d837a1dbd2a5 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # jquery script on html file
<script type="text/javascript" src="/<script_path_here>"></script>
| [
"[email protected]"
] | |
a862eb31048836e74bd11de782ff4427ad37637a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_092/ch70_2020_03_06_12_02_03_238346.py | 21ba0a3f5a97399809400d3fb07358fc813278a8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | def esconde_senha(n):
b = int(n)
return n*'*' | [
"[email protected]"
] | |
fc0e88abd089496570a23940186a2fd152baac22 | 82be39549f5d90b1ca1bb65407ae7695e1686ed8 | /code_challenges/147/hundred_days.py | fa433e7ef84669a91dd1d805173af3281a0475b4 | [] | no_license | dcribb19/bitesofpy | 827adc9a8984d01c0580f1c03855c939f286507f | a1eb0a5553e50e88d3568a36b275138d84d9fb46 | refs/heads/master | 2023-03-02T02:04:46.865409 | 2021-02-12T01:20:30 | 2021-02-12T01:20:30 | 259,764,008 | 1 | 0 | null | 2020-10-06T13:48:16 | 2020-04-28T22:16:38 | HTML | UTF-8 | Python | false | false | 484 | py | from datetime import date
from dateutil.rrule import rrule, DAILY, MO, TU, WE, TH, FR
TODAY = date(year=2018, month=11, day=29)
def get_hundred_weekdays(start_date=TODAY):
"""Return a list of hundred date objects starting from
start_date up till 100 weekdays later, so +100 days
skipping Saturdays and Sundays"""
weekdays = list(rrule(DAILY, count=100, byweekday=(MO, TU, WE, TH, FR), dtstart=start_date))
return [weekday.date() for weekday in weekdays]
| [
"[email protected]"
] | |
36c01f762c49f4403f32c7ef8c027910a90c7932 | 2e927b6e4fbb4347f1753f80e9d43c7d01b9cba5 | /Section 30 - File IO/file_stats.py | 45669c705609fe90cbf3002134b9720195db5e94 | [] | no_license | tielushko/The-Modern-Python-3-Bootcamp | ec3d60d1f2e887d693efec1385a6dbcec4aa8b9a | 17b3156f256275fdba204d514d914731f7038ea5 | refs/heads/master | 2023-01-22T01:04:31.918693 | 2020-12-04T02:41:29 | 2020-12-04T02:41:29 | 262,424,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | #write a function to count number of lines, words and characters in the text file
def statistics(file_name):
with open(file_name) as file:
statistics = {
'lines': 0,
'words': 0,
'characters': 0
}
for line in file:
#line = line.strip('\n')
words = line.split()
statistics['lines'] += 1
statistics['words'] += len(words)
statistics['characters'] += len(line)
return statistics
def statistics_v2(file_name):
with open(file_name) as file:
lines = file.readlines()
return { "lines": len(lines),
"words": sum(len(line.split(" ")) for line in lines),
"characters": sum(len(line) for line in lines) } | [
"[email protected]"
] | |
584195b082aac85b8e8c4eafa628ba1fe5063afd | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/idea.py | 983ade7b2bb92a463b0bbc4bd76bcba2d94510de | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 2,847 | py | ii = [('BentJDO2.py', 45), ('EmerRN.py', 12), ('CookGHP3.py', 35), ('LyelCPG2.py', 14), ('MarrFDI.py', 16), ('RogePAV2.py', 35), ('CoolWHM2.py', 6), ('GodwWSL2.py', 55), ('ChanWS.py', 31), ('RogePAV.py', 15), ('SadlMLP.py', 77), ('FerrSDO3.py', 19), ('WilbRLW.py', 25), ('WilbRLW4.py', 39), ('RennJIT.py', 3), ('ProuWCM.py', 23), ('AubePRP2.py', 25), ('CookGHP.py', 34), ('ShawHDE.py', 3), ('MartHSI2.py', 24), ('LeakWTI2.py', 10), ('KembFJ1.py', 15), ('WilkJMC3.py', 21), ('WilbRLW5.py', 46), ('LeakWTI3.py', 8), ('PettTHE.py', 12), ('MarrFDI3.py', 12), ('PeckJNG.py', 8), ('KnowJMM.py', 1), ('BailJD2.py', 5), ('AubePRP.py', 30), ('ChalTPW2.py', 34), ('GellWPT.py', 22), ('AdamWEP.py', 11), ('FitzRNS3.py', 10), ('WilbRLW2.py', 49), ('ClarGE2.py', 35), ('GellWPT2.py', 25), ('WilkJMC2.py', 24), ('CarlTFR.py', 18), ('SeniNSP.py', 3), ('LyttELD.py', 4), ('CoopJBT2.py', 2), ('GrimSLE.py', 15), ('RoscTTI3.py', 13), ('AinsWRR3.py', 6), ('CookGHP2.py', 17), ('KiddJAE.py', 10), ('AdamHMM.py', 18), ('BailJD1.py', 8), ('RoscTTI2.py', 11), ('CoolWHM.py', 18), ('MarrFDI2.py', 22), ('CrokTPS.py', 9), ('ClarGE.py', 60), ('LandWPA.py', 3), ('BuckWGM.py', 11), ('IrviWVD.py', 22), ('LyelCPG.py', 20), ('GilmCRS.py', 9), ('WestJIT2.py', 17), ('DibdTRL2.py', 12), ('AinsWRR.py', 3), ('CrocDNL.py', 14), ('MedwTAI.py', 14), ('LandWPA2.py', 7), ('WadeJEB.py', 39), ('FerrSDO2.py', 11), ('TalfTIT.py', 4), ('NewmJLP.py', 13), ('GodwWLN.py', 39), ('CoopJBT.py', 3), ('KirbWPW2.py', 17), ('SoutRD2.py', 3), ('BackGNE.py', 10), ('LeakWTI4.py', 6), ('LeakWTI.py', 5), ('MedwTAI2.py', 22), ('BachARE.py', 88), ('SoutRD.py', 2), ('DickCSG.py', 2), ('BuckWGM2.py', 2), ('WheeJPT.py', 15), ('MereHHB3.py', 4), ('HowiWRL2.py', 18), ('MereHHB.py', 2), ('WilkJMC.py', 16), ('HogaGMM.py', 21), ('MartHRW.py', 42), ('MackCNH.py', 3), ('WestJIT.py', 11), ('BabbCEM.py', 13), ('FitzRNS4.py', 71), ('CoolWHM3.py', 13), ('DequTKM.py', 1), ('FitzRNS.py', 29), ('BentJRP.py', 86), ('EdgeMHT.py', 13), ('BowrJMM.py', 1), ('LyttELD3.py', 1), ('FerrSDO.py', 10), ('RoscTTI.py', 17), ('ThomGLG.py', 3), ('StorJCC.py', 14), ('KembFJ2.py', 15), ('LewiMJW.py', 21), ('BabbCRD.py', 6), ('MackCNH2.py', 2), ('BellCHM.py', 31), ('JacoWHI2.py', 9), ('SomeMMH.py', 22), ('HaliTBC.py', 13), ('WilbRLW3.py', 63), ('AinsWRR2.py', 7), ('MereHHB2.py', 2), ('BrewDTO.py', 15), ('JacoWHI.py', 7), ('ClarGE3.py', 117), ('RogeSIP.py', 2), ('MartHRW2.py', 44), ('DibdTRL.py', 23), ('FitzRNS2.py', 130), ('HogaGMM2.py', 24), ('MartHSI.py', 41), ('EvarJSP.py', 29), ('DwigTHH.py', 20), ('SadlMLP2.py', 58), ('BowrJMM2.py', 6), ('LyelCPG3.py', 14), ('BowrJMM3.py', 3), ('BeckWRE.py', 5), ('TaylIF.py', 34), ('WordWYR.py', 1), ('DibdTBR.py', 3), ('ChalTPW.py', 20), ('ThomWEC.py', 7), ('KeigTSS.py', 27), ('KirbWPW.py', 25), ('WaylFEP.py', 2), ('BentJDO.py', 83), ('ClarGE4.py', 110), ('AdamJOA.py', 4)] | [
"[email protected]"
] | |
c3b020f232c2731e008a9a413e7639d1e601bce5 | 406d2e9e3850a8ac7dcae302aefc5fb9ba40e2dd | /02.NetworkPrograming/multiconn-server.py | 7c683543c62faec51e41af3333bc5ecc13df2e0d | [] | no_license | jeonghaejun/02.Thread-NetworkPrograming-MQTT | 565a0b72d98add71f1ff9a17998a03877ce8397a | 4757d9813dbb555cb3bd3602a645f1315619d371 | refs/heads/master | 2023-04-01T18:08:57.927328 | 2021-04-05T18:02:34 | 2021-04-05T18:02:34 | 329,620,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | import socket
from _thread import *
def threaded(client_socket, addr):
print('Connected by :', addr[0], ':', addr[1])
while True:
try:
# 데이터가 수신되면 클라이언트에 다시 전송합니다.(에코)
data = client_socket.recv(1024)
if not data:
print('Disconnected by '+addr[0], ':', addr[1])
break
print('Received from '+addr[0], ':', addr[1], data.decode())
client_socket.send(data)
except ConnectionResetError as e:
print('Disconnected by '+addr[0], ':', addr[1])
break
client_socket.close()
HOST = '127.0.0.1'
PORT = 9999
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen()
print('server start')
while True:
print('wait')
client_socket, addr = server_socket.accept()
start_new_thread(threaded, (client_socket, addr))
server_socket.close()
| [
"[email protected]"
] | |
c3e9230912baf950221c94030dbce6778bb9e557 | 7137161629a1003583744cc3bd0e5d3498e0a924 | /airflow/providers/amazon/aws/operators/glue.py | 81d3468d592fdf9223937cf282e2a9faf3a40b54 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jbampton/airflow | 3fca85975854eb916f16143b659a9119af143963 | dcfa14d60dade3fdefa001d10013466fe4d77f0d | refs/heads/master | 2023-05-25T22:31:49.104069 | 2021-09-18T19:18:32 | 2021-09-18T19:18:32 | 247,645,744 | 3 | 0 | Apache-2.0 | 2020-03-16T08:12:58 | 2020-03-16T08:12:57 | null | UTF-8 | Python | false | false | 5,479 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path
from typing import Optional
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
class AwsGlueJobOperator(BaseOperator):
"""
Creates an AWS Glue Job. AWS Glue is a serverless Spark
ETL service for running Spark Jobs on the AWS cloud.
Language support: Python and Scala
:param job_name: unique job name per AWS Account
:type job_name: Optional[str]
:param script_location: location of ETL script. Must be a local or S3 path
:type script_location: Optional[str]
:param job_desc: job description details
:type job_desc: Optional[str]
:param concurrent_run_limit: The maximum number of concurrent runs allowed for a job
:type concurrent_run_limit: Optional[int]
:param script_args: etl script arguments and AWS Glue arguments (templated)
:type script_args: dict
:param retry_limit: The maximum number of times to retry this job if it fails
:type retry_limit: Optional[int]
:param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job.
:type num_of_dpus: int
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param s3_bucket: S3 bucket where logs and local etl script will be uploaded
:type s3_bucket: Optional[str]
:param iam_role_name: AWS IAM Role for Glue Job Execution
:type iam_role_name: Optional[str]
:param create_job_kwargs: Extra arguments for Glue Job Creation
:type create_job_kwargs: Optional[dict]
"""
template_fields = ('script_args',)
template_ext = ()
template_fields_renderers = {
"script_args": "json",
"create_job_kwargs": "json",
}
ui_color = '#ededed'
def __init__(
self,
*,
job_name: str = 'aws_glue_default_job',
job_desc: str = 'AWS Glue Job with Airflow',
script_location: Optional[str] = None,
concurrent_run_limit: Optional[int] = None,
script_args: Optional[dict] = None,
retry_limit: Optional[int] = None,
num_of_dpus: int = 6,
aws_conn_id: str = 'aws_default',
region_name: Optional[str] = None,
s3_bucket: Optional[str] = None,
iam_role_name: Optional[str] = None,
create_job_kwargs: Optional[dict] = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.job_desc = job_desc
self.script_location = script_location
self.concurrent_run_limit = concurrent_run_limit or 1
self.script_args = script_args or {}
self.retry_limit = retry_limit
self.num_of_dpus = num_of_dpus
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.s3_bucket = s3_bucket
self.iam_role_name = iam_role_name
self.s3_protocol = "s3://"
self.s3_artifacts_prefix = 'artifacts/glue-scripts/'
self.create_job_kwargs = create_job_kwargs
def execute(self, context):
"""
Executes AWS Glue Job from Airflow
:return: the id of the current glue job.
"""
if self.script_location and not self.script_location.startswith(self.s3_protocol):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
script_name = os.path.basename(self.script_location)
s3_hook.load_file(
self.script_location, self.s3_artifacts_prefix + script_name, bucket_name=self.s3_bucket
)
s3_script_location = f"s3://{self.s3_bucket}/{self.s3_artifacts_prefix}{script_name}"
else:
s3_script_location = self.script_location
glue_job = AwsGlueJobHook(
job_name=self.job_name,
desc=self.job_desc,
concurrent_run_limit=self.concurrent_run_limit,
script_location=s3_script_location,
retry_limit=self.retry_limit,
num_of_dpus=self.num_of_dpus,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
s3_bucket=self.s3_bucket,
iam_role_name=self.iam_role_name,
create_job_kwargs=self.create_job_kwargs,
)
self.log.info("Initializing AWS Glue Job: %s", self.job_name)
glue_job_run = glue_job.initialize_job(self.script_args)
glue_job_run = glue_job.job_completion(self.job_name, glue_job_run['JobRunId'])
self.log.info(
"AWS Glue Job: %s status: %s. Run Id: %s",
self.job_name,
glue_job_run['JobRunState'],
glue_job_run['JobRunId'],
)
return glue_job_run['JobRunId']
| [
"[email protected]"
] | |
9babe7775b66eb37f494f78a5710eba7f275c9ef | fe5152ab8e656309f13bf29ededc4fe99ddfed45 | /pyguymer3/geo/_area.py | 5bab9ca971000a106ce9691521a219b45bb6845f | [
"Apache-2.0"
] | permissive | Guymer/PyGuymer3 | 763ba7ee61e20ac5afb262d428de2d7499c07828 | bc65f08f81ea0e76f5c9ae3d703056382f456f01 | refs/heads/main | 2023-08-23T14:42:34.997227 | 2023-08-19T08:39:54 | 2023-08-19T08:39:54 | 144,459,343 | 14 | 4 | Apache-2.0 | 2023-08-13T07:47:29 | 2018-08-12T10:59:59 | Python | UTF-8 | Python | false | false | 2,030 | py | #!/usr/bin/env python3
# Define function ...
def _area(triangle, /, *, eps = 1.0e-12, nmax = 100):
"""Find the area of a triangle.
Parameters
----------
triangle : shapely.geometry.polygon.Polygon
the triangle
eps : float, optional
the tolerance of the Vincenty formula iterations
nmax : int, optional
the maximum number of the Vincenty formula iterations
Returns
-------
area : float
the area (in metres-squared)
Notes
-----
Copyright 2017 Thomas Guymer [1]_
References
----------
.. [1] PyGuymer3, https://github.com/Guymer/PyGuymer3
"""
# Import standard modules ...
import math
# Import sub-functions ...
from .calc_dist_between_two_locs import calc_dist_between_two_locs
# Find the distance from the second point to the first point, and the
# bearing of the first point as viewed from the second point ...
a, bearing1, _ = calc_dist_between_two_locs(
triangle.exterior.coords[1][0],
triangle.exterior.coords[1][1],
triangle.exterior.coords[0][0],
triangle.exterior.coords[0][1],
eps = eps,
nmax = nmax,
) # [m], [°]
# Find the distance from the second point to the third point, and the
# bearing of the third point as viewed from the second point ...
b, bearing2, _ = calc_dist_between_two_locs(
triangle.exterior.coords[1][0],
triangle.exterior.coords[1][1],
triangle.exterior.coords[2][0],
triangle.exterior.coords[2][1],
eps = eps,
nmax = nmax,
) # [m], [°]
# Use the two bearings to find the interior angle between the first and
# third points ...
C = (bearing2 - bearing1) % 180.0 # [°]
# Return answer ...
return 0.5 * a * b * math.sin(math.radians(C))
| [
"[email protected]"
] | |
3f69552aae3908907bfa9556c030f108c60f776a | 15f347fce18db7e15b019fff66d58d27743143bd | /func_5.py | 3b72ef73431279799af0ec7b0a0d703f6bdfb029 | [] | no_license | iamsubingyawali/PyAssignment | f716d51f3e49a0e05ac3452e54c401ed60bda8e9 | 8892770f411aa711db876b95ebe72d649c5fbdd2 | refs/heads/master | 2022-11-15T23:37:10.000820 | 2020-06-28T17:59:14 | 2020-06-28T17:59:14 | 275,639,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def calc_factorial(num):
try:
if int(num) < 1:
return "Invalid Number"
else:
factorial = 1
for i in range(num, 0, -1):
factorial *= i
return factorial
except ValueError:
return "Invalid Number"
print(calc_factorial(6))
| [
"="
] | = |
97961378b173b4085f3f840f06171a75831c05aa | 98bd2625dbcc955deb007a07129cce8b9edb3c79 | /plot_counts.py | fdbf25f52b8502d1743c8f5413fa547a21879c5b | [] | no_license | melanieabrams/bremdata | 70d0a374ab5dff32f6d9bbe0a3959a617a90ffa8 | df7a12c72a29cca4760333445fafe55bb6e40247 | refs/heads/master | 2021-12-26T01:57:25.684288 | 2021-09-30T22:48:05 | 2021-09-30T22:48:05 | 166,273,567 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | import sys
import matplotlib.pyplot as plt
import pandas as pd
'''USAGE: python plot_counts.py outfilename.pdf {'Tn-seq' or 'Bar-Seq'} filename1 filename2...'''
#import
outfilename=sys.argv[1]
input_type=sys.argv[2]
countfiles=sys.argv[3:]
countfile_ids=[]
for countfile in countfiles:
countfile_ids.append(countfile.split('.')[0])
def get_BarSeq_counts(countfile):
counts = []
with open(countfile) as f:
lenF=0.0
for line in f:
lenF+=1
commsplit = line[2:-2].split(',')
for i in commsplit:
try:
n=float(i.split(': ')[1])
counts.append(normalized_n)
except:
None
f.close()
normalized_counts = []
for i in counts:
normalized_counts.append(n/lenF)
return normalized_counts
def get_TnSeq_counts(countfile):
counts = []
with open(countfile) as f:
lenF=0.0
for line in f:
lenF+=1
try:
n=float(line.split('\t')[5])
counts.append(n)
except:
None
f.close()
## normalized_counts = []
## for i in counts:
## normalized_counts.append(n/lenF)
#print(normalized_counts)
#return normalized_counts
return counts
def plot_counts(all_counts):
fig=plt.figure()
ax = fig.add_subplot(1,1,1)
ax.hist(all_counts, label=countfiles)
plt.title('skew')
# plt.xlabel('normalized abundance')
plt.xlabel('abundance')
plt.ylabel('number of inserts')
plt.legend()
plt.savefig(outfilename, bbox_inches='tight', format='pdf',dpi=1000)
def plot_low_counts(all_counts):
fig=plt.figure()
ax = fig.add_subplot(1,1,1)
ax.hist(all_counts, label=countfiles)
plt.title('skew')
# plt.xlabel('normalized abundance')
plt.xlabel('abundance')
plt.ylabel('number of inserts')
plt.xlim(0,1000)
plt.legend()
plt.savefig('lowcounts'+outfilename, bbox_inches='tight', format='pdf',dpi=1000)
def plot_all_counts():
all_counts=[]
if input_type=='Tn-Seq':
for countfile in countfiles:
all_counts.append(get_TnSeq_counts(countfile))
elif input_type=='Bar-Seq':
for countfile in countfiles:
all_counts.append(get_BarSeq_counts(countfile))
else:
print('specify Tn-Seq or Bar-Seq as sys.argv[1]')
plot_counts(all_counts)
plot_low_counts(all_counts)
plot_all_counts()
| [
"[email protected]"
] | |
673109ab4c5461a308417ef12eb2f6cc623e7e98 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/o11_j195024+500900/sdB_o11_j195024+500900_coadd.py | aef310d98cb00bed20d9f11a4f45b62002dc5092 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[297.6,50.15], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_o11_j195024+500900/sdB_o11_j195024+500900_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_o11_j195024+500900/sdB_o11_j195024+500900_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9a63309ab3eaecfd3e247064c688d8925f9239b2 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Difference/trend_MovingAverage/cycle_0/ar_12/test_artificial_1024_Difference_MovingAverage_0_12_0.py | b87dd9e926e0fdf59bad191e4236d882a4458a27 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"[email protected]"
] | |
d4a8f2b08378b8ccd840b2cd6d84af62cf6968fb | 319d66c48f51e3d98e9df953d406a6f545b72363 | /Python/AppleAndOrange.py | 947c089dc2e7012ae7350fe7be420b21641c8820 | [
"Apache-2.0"
] | permissive | WinrichSy/HackerRank-Solutions | 291bc7a32dc4d9569d7028d6d665e86869fbf952 | ed928de50cbbbdf0aee471630f6c04f9a0f69a1f | refs/heads/master | 2022-07-18T15:43:48.865714 | 2020-05-16T00:21:56 | 2020-05-16T00:21:56 | 255,453,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | #Apple and Orange
#https://www.hackerrank.com/challenges/apple-and-orange/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countApplesAndOranges function below.
def countApplesAndOranges(s, t, a, b, apples, oranges):
apple_total = [1 for x in apples if (x+a)>=s and (x+a)<=t]
orange_total = [1 for x in oranges if (x+b)>=s and (x+b)<=t]
print(sum(apple_total))
print(sum(orange_total))
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
| [
"[email protected]"
] | |
94c4b1013a6ccaae658ddc243a83d00404d36642 | aeb0a860196c7264bde3c02d51a350661520b6da | /HackerRank/Rank/medium/compress-string.py | 6455fc819d304a53ed16030bbc114489436ddb16 | [] | no_license | Md-Hiccup/Problem-Solving | fc9078288c59ef74fc1170e773ac20d17c28fbbb | edbee71b69aeec0b1dbeed78f033852af698b844 | refs/heads/master | 2022-12-10T02:35:24.315807 | 2022-03-16T12:50:44 | 2022-03-16T12:50:44 | 199,393,332 | 0 | 2 | null | 2022-12-08T01:22:44 | 2019-07-29T06:37:12 | Java | UTF-8 | Python | false | false | 648 | py | """
from itertools import groupby
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
First, the character 1 occurs only once. It is replaced by (1, 1). Then the character 2 occurs three times, and it is replaced by (3, 2) and so on.
Sample Input
1222311
Sample Output
(1, 1) (3, 2) (1, 3) (2, 1)
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import groupby
inp = input()
# Way 1
# for i,j in groupby(inp):
# print((len(list(j)), int(i)), end=' ')
# Way 2
print(*[(len(list(j)), int(i)) for i, j in groupby(inp)]) | [
"[email protected]"
] | |
f9e193313f7efcadc7d30522e7441224144c8203 | f4c5acc4d8923fd04dfcccff986cfd41a55510d5 | /MyGUI/views/helpers.py | 4ee283b8778fdf9893b213543de740ca3d360017 | [] | no_license | InesIvanova/python-gui-layer-for-bash-commands | 307d60d789d65fba62ff76c003d74c8c7efd3a9a | 3a4047af46bd1599ab2b9db82449bf79b1a3b238 | refs/heads/master | 2023-03-18T03:07:35.687810 | 2021-03-07T22:51:23 | 2021-03-07T22:51:23 | 345,472,377 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from tkinter import Tk
def destroy_view(func):
def wrapper(tk: Tk):
[slave.grid_forget() for slave in tk.grid_slaves()]
[slave.pack_forget() for slave in tk.pack_slaves()]
func(tk)
return wrapper
| [
"[email protected]"
] | |
f88fe23779ff393ccdfd5300828f4964e5b4c3b0 | 84b5ac79cb471cad1d54ed1d2c842dc5581a03f0 | /branches/unstable/src/paella/kde/trait.py | b4c2a442e2b68c86579c1af9db573e2894c765e6 | [] | no_license | BackupTheBerlios/paella-svn | c8fb5ea3ae2a5e4ca6325a0b3623d80368b767f3 | d737a5ea4b40f279a1b2742c62bc34bd7df68348 | refs/heads/master | 2021-01-18T14:07:40.881696 | 2012-11-13T20:33:08 | 2012-11-13T20:33:08 | 40,747,253 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,292 | py | import os
import re
from qt import SLOT, SIGNAL, Qt
from qt import QSyntaxHighlighter
from qt import QColor, QWidget
from qt import QVBoxLayout, QHBoxLayout
from qt import QSplitter
from qtext import QextScintilla, QextScintillaLexer
from qtext import QextScintillaLexerPython
from kdeui import KMainWindow
from kdeui import KPopupMenu
from kdeui import KMessageBox, KTextEdit
from kdeui import KListView, KListViewItem
from kfile import KFileDialog
from paella.base.template import Template
from paella.profile.base import PaellaConfig
from paella.profile.base import PaellaConnection
from paella.profile.trait import Trait
from useless.db.midlevel import StatementCursor
from useless.kbase.gui import MainWindow, SimpleSplitWindow
from useless.kbase.gui import ViewWindow
from useless.kdb.gui import ViewBrowser
from useless.kdb.gui import RecordSelector
from paella.kde.base import RecordSelectorWindow
from paella.kde.xmlgen import TraitDoc, PackageDoc
from paella.kde.db.gui import dbwidget
#from paella.kde.differ import TraitList
from paella.kde.template import TemplateEditorWindow
from paella.kde.template import SimpleEdit
class AnotherView(QextScintilla):
def __init__(self, app, parent):
QextScintilla.__init__(self, parent)
self.app = app
self.pylex = QextScintillaLexerPython(self)
self.lex = QextScintillaLexer(self)
def setText(self, text):
line = text.split('\n')[0]
if 'python' in line:
self.setLexer(self.pylex)
else:
self.setLexer(self.lex)
QextScintilla.setText(self, text)
class PackageView(ViewBrowser):
def __init__(self, app, parent):
ViewBrowser.__init__(self, app, parent, PackageDoc)
class PackageSelector(RecordSelector):
def __init__(self, app, parent, suite):
table = '%s_packages' % suite
fields = ['package', 'priority', 'section', 'installedsize',
'maintainer', 'version', 'description']
idcol = 'package'
groupfields = ['priority', 'section', 'maintainer']
view = PackageView
RecordSelector.__init__(self, app, parent, table, fields,
idcol, groupfields, view, 'PackageSelector')
class PackageSelectorWindow(KMainWindow):
def __init__(self, app, parent, suite):
KMainWindow.__init__(self, parent, 'PackageSelector')
self.app = app
self.conn = app.conn
self.mainView = PackageSelector(self.app, self, suite)
self.mainView.recView.doc.set_suite(suite)
self.setCentralWidget(self.mainView)
self.show()
class TraitView(ViewBrowser):
def __init__(self, app, parent):
ViewBrowser.__init__(self, app, parent, TraitDoc)
def set_trait(self, trait):
self.doc.set_trait(trait)
self.setText(self.doc.toxml())
def set_suite(self, suite):
self.doc.suite = suite
self.doc.trait = Trait(self.app.conn, suite=suite)
def setSource(self, url):
action, context, id = str(url).split('.')
if action == 'show':
if context == 'parent':
win = TraitMainWindow(self.app, self.parent(), self.doc.suite)
win.view.set_trait(id)
elif context == 'template':
fid = id.replace(',', '.')
package, template = fid.split('...')
win = ViewWindow(self.app, self.parent(), SimpleEdit, 'TemplateView')
templatefile = self.doc.trait._templates.templatedata(package, template)
win.view.setText(templatefile)
win.resize(600, 800)
elif context == 'script':
scriptfile = self.doc.trait._scripts.scriptdata(id)
win = ViewWindow(self.app, self.parent(), SimpleEdit, 'ScriptView')
win.view.setText(scriptfile)
win.resize(600, 800)
else:
self._url_error(url)
elif action == 'edit':
if context == 'templates':
#win = KFileDialog('.', '*', self, 'hello file dialog', False)
#win.show()
win = TemplateEditorWindow(self.app, self.parent(), self.doc.suite)
elif context == 'packages':
win = PackageSelectorWindow(self.app, self.parent(), self.doc.suite)
else:
self._url_error(url)
else:
self._url_error(url)
class TraitMainWindow(SimpleSplitWindow):
def __init__(self, app, parent, suite):
SimpleSplitWindow.__init__(self, app, parent, TraitView, 'TraitMainWindow')
self.app = app
self.initActions()
self.initMenus()
self.initToolbar()
self.conn = app.conn
self.suite = suite
self.cfg = app.cfg
self.cursor = StatementCursor(self.conn)
self.trait = Trait(self.conn, suite=suite)
self.refreshListView()
self.view.set_suite(suite)
self.resize(600, 800)
self.setCaption('%s traits' % suite)
def initActions(self):
collection = self.actionCollection()
def initMenus(self):
mainMenu = KPopupMenu(self)
menus = [mainMenu]
self.menuBar().insertItem('&Main', mainMenu)
self.menuBar().insertItem('&Help', self.helpMenu(''))
def initToolbar(self):
toolbar = self.toolBar()
def initlistView(self):
self.listView.setRootIsDecorated(True)
self.listView.addColumn('group')
def refreshListView(self):
trait_folder = KListViewItem(self.listView, 'traits')
for trait in self.trait.get_trait_list():
item = KListViewItem(trait_folder, trait)
item.trait = trait
def selectionChanged(self):
current = self.listView.currentItem()
if hasattr(current, 'trait'):
print 'trait is', current.trait
self.view.set_trait(current.trait)
if hasattr(current, 'suite'):
print 'suite is', current.suite
if hasattr(current, 'widget'):
print 'widget is', current.widget
if __name__ == '__main__':
cfg = PaellaConfig()
conn = PaellaConnection(cfg)
t = Trait(conn, suite='kudzu')
| [
"umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f"
] | umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f |
efdf79e368752ad93463e91fbb35da14ce8d73cc | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /util/update_copyright/__init__.py | 8046b58c3d0645928af9978505e51e9e8f5dc1e5 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 3,766 | py | # Copyright (c) 2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Utilities to parse and modify copyright headers in gem5 source.
"""
import re
org_alias_map = {
'arm': b'ARM Limited',
'uc': b'The Regents of the University of California',
}
_update_copyright_year_regexp = re.compile(b'(.*?)([0-9]+)$')
def _update_copyright_years(m, cur_year, org_bytes):
'''
Does e.g.: b'2016, 2018-2019' -> b'2016, 2018-2020'.
:param m: match containing only the years part of the string
:type m: re.Match
:param cur_year: the current year to update the copyright to
:type cur_year: int
:return: the new years part of the string
:rtype: bytes
'''
global _update_copyright_year_regexp
cur_year_bytes = str(cur_year).encode()
m = _update_copyright_year_regexp.match(m.group(1))
years_prefix = m.group(1)
old_year_bytes = m.group(2)
old_year = int(old_year_bytes.decode())
if old_year == cur_year:
new_years_string = old_year_bytes
elif old_year == cur_year - 1:
if len(years_prefix) > 0 and years_prefix[-1:] == b'-':
new_years_string = cur_year_bytes
else:
new_years_string = old_year_bytes + b'-' + cur_year_bytes
else:
new_years_string = old_year_bytes + b', ' + cur_year_bytes
new_years_string = years_prefix + new_years_string
return b' Copyright (c) %b %b\n' % (new_years_string, org_bytes)
def update_copyright(data, cur_year, org_bytes):
update_copyright_regexp = re.compile(
b' Copyright \\(c\\) ([0-9,\- ]+) ' + org_bytes + b'\n',
re.IGNORECASE
)
return update_copyright_regexp.sub(
lambda m: _update_copyright_years(m, cur_year, org_bytes),
data,
count=1,
)
| [
"[email protected]"
] | |
ca62c53ad5200dfd9ee5d1b839d9d12b82a2b858 | 430fd1fc3336b0e621dcfec40c7303ee8ed126c2 | /manage.py | 024b1c35bb0e183e3bf51a7749bc1b5f0eebe7e9 | [] | no_license | rikicop/kresky | 8ce106ddb8eb0496e1042add2a00734d777a3742 | 9bf8c181ff3976aef943eb841a2e53de2f005295 | refs/heads/main | 2023-02-21T05:55:31.399032 | 2021-01-31T23:03:42 | 2021-01-31T23:03:42 | 334,770,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kresky_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2f3f019f5fc52ecbcefb36671238ff9cf02a68be | 883c475191edd39c1a36a0873fd2f460a07dbb59 | /autocti/instruments/euclid/header.py | 1279ec75bcd6013807c6d8e8fc504e40fc57d5e4 | [
"MIT"
] | permissive | Jammy2211/PyAutoCTI | c18f4bb373572be202d691cc6b1438211a2cd07b | 32e9ec7194776e5f60329e674942bc19f8626b04 | refs/heads/main | 2023-08-16T10:59:09.723401 | 2023-08-14T09:42:57 | 2023-08-14T09:42:57 | 156,396,410 | 6 | 2 | MIT | 2023-09-03T20:03:43 | 2018-11-06T14:30:28 | Python | UTF-8 | Python | false | false | 851 | py | from typing import Dict, Tuple, Optional
from autoarray.structures.header import Header
class HeaderEuclid(Header):
def __init__(
self,
header_sci_obj: Dict = None,
header_hdu_obj: Dict = None,
original_roe_corner: Tuple[int, int] = None,
readout_offsets: Optional[Tuple] = None,
ccd_id: Optional[str] = None,
quadrant_id: Optional[str] = None,
):
super().__init__(
header_sci_obj=header_sci_obj,
header_hdu_obj=header_hdu_obj,
original_roe_corner=original_roe_corner,
readout_offsets=readout_offsets,
)
self.ccd_id = ccd_id
self.quadrant_id = quadrant_id
@property
def row_index(self) -> str:
if self.ccd_id is not None:
return self.ccd_id[-1]
| [
"[email protected]"
] | |
ac9b3edb41ce1ff5721302398091500bd8f0c622 | 568d7d17d09adeeffe54a1864cd896b13988960c | /project_blog/day02/ddblog/topic/models.py | 4274cbdcd6bab4f10850dc3972fe2120c065ee05 | [
"Apache-2.0"
] | permissive | Amiao-miao/all-codes | e2d1971dfd4cecaaa291ddf710999f2fc4d8995f | ec50036d42d40086cac5fddf6baf4de18ac91e55 | refs/heads/main | 2023-02-24T10:36:27.414153 | 2021-02-01T10:51:55 | 2021-02-01T10:51:55 | 334,908,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | from django.db import models
from user.models import UserProfile
# Create your models here.
class Topic(models.Model):
title=models.CharField('文章标题',max_length=50)
#tec no-tec
category=models.CharField('文章分类',max_length=20)
# public private
limit=models.CharField('文章权限',max_length=20)
introduce=models.CharField('文章简介',max_length=50)
content=models.TextField('文章内容')
created_time=models.DateTimeField(auto_now_add=True)
updated_time=models.DateTimeField(auto_now=True)
# 1对多的外键
user_profile=models.ForeignKey(UserProfile,on_delete=models.CASCADE) | [
"[email protected]"
] | |
6a46ff7662d9a86c93c34daca53e6021b8af5ce5 | c43855664f500991da9c0bfb9872fb71dfcf77cf | /MxOnline_Django/apps/course/migrations/0007_course_tag.py | 7a2ddcaaf39a99a7401d88d6ad23c5dcc410ec5d | [] | no_license | zfh960126/first | a2bc128d16d99a0fc348773418f81654228cc6ff | cd51bcfbb8cdf83a21e5fd838f95c66abbec77bd | refs/heads/master | 2021-04-03T01:21:32.822960 | 2018-05-24T23:56:40 | 2018-05-24T23:56:40 | 124,852,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-05-04 10:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0006_course_category'),
]
operations = [
migrations.AddField(
model_name='course',
name='tag',
field=models.CharField(default='', max_length=10, verbose_name='课程标签'),
),
]
| [
"[email protected]"
] | |
d144360eebe72f84e535278b005e2ab15ab08397 | c4bcb851c00d2830267b1997fa91d41e243b64c2 | /utils/actions/smooth/binary.py | 58550b7e2e8c58d232296cab4c3ca9aad97130b8 | [] | no_license | tjacek/cluster_images | 5d6a41114a4039b3bdedc34d872be4e6db3ba066 | 8c660c69658c64c6b9de66d6faa41c92486c24c5 | refs/heads/master | 2021-01-23T21:01:17.078036 | 2018-06-07T14:33:50 | 2018-06-07T14:33:50 | 44,881,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import sys,os
sys.path.append(os.path.abspath('../cluster_images'))
import numpy as np
import utils.actions.smooth
class BinarizeActions(utils.actions.smooth.TimeSeriesTransform):
def get_series_transform(self,frames):
frames=np.array(frames)
fr_std= np.std(frames,axis=0)
return Binarization(fr_std)
class Binarization(object):
def __init__(self,var):
self.var=var
def __call__(self,x):
def norm_helper(i,x_i):
if(self.var[i]==0):
return 0
return int((x_i)/self.var[i])
return [ norm_helper(i,x_i) for i,x_i in enumerate(x)] | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.