blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d9fbcc9d3db5195493acabf8c693f8b1d4d7abb
|
56a0762c741bcac3ab1172eb6114a9e59a48a5df
|
/tutorados/urls.py
|
2a9edff042fcb259b99285e88e6692f3844700c5
|
[
"MIT"
] |
permissive
|
jjmartinr01/gauss3
|
54af1735a035a566f237d8e0fd9a6fe4447845a2
|
41a23d35c763890d8f729c9d63ac073673689400
|
refs/heads/master
| 2023-08-23T06:40:51.033857 | 2023-08-08T11:50:50 | 2023-08-08T11:50:50 | 171,710,013 | 1 | 0 |
MIT
| 2023-02-15T18:43:56 | 2019-02-20T16:35:03 |
HTML
|
UTF-8
|
Python
| false | false | 341 |
py
|
# -*- coding: utf-8 -*-
from django.urls import path
from . import views
urlpatterns = [
path('informes_seguimiento/', views.informes_seguimiento),
path('ajax_informe_seguimiento/', views.ajax_informe_seguimiento),
path('informes_tareas/', views.informes_tareas),
path('ajax_informe_tareas/', views.ajax_informe_tareas),
]
|
[
"[email protected]"
] | |
ad5324159ca0060c785e7051432aad7ab77c8867
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/FRDTE-OPT-MIB.py
|
77d19de2e78d7c6a5a5bab9832e5753674ad3af1
|
[
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 |
Apache-2.0
| 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null |
UTF-8
|
Python
| false | false | 40,516 |
py
|
#
# PySNMP MIB module FRDTE-OPT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FRDTE-OPT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:02:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
enterprises, MibIdentifier, ModuleIdentity, Gauge32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, mgmt, ObjectIdentity, Unsigned32, Bits, TimeTicks, iso, Integer32, Counter32, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "MibIdentifier", "ModuleIdentity", "Gauge32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mgmt", "ObjectIdentity", "Unsigned32", "Bits", "TimeTicks", "iso", "Integer32", "Counter32", "IpAddress", "Counter64")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
codex = MibIdentifier((1, 3, 6, 1, 4, 1, 449))
cdxProductSpecific = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2))
cdx6500 = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1))
cdx6500Configuration = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 2))
cdx6500CfgProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1))
cdx6500PCTPortProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1))
cdx6500PCTStationProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3))
cdx6500Statistics = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 3))
cdx6500StatProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1))
cdx6500PSTPortProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1))
cdx6500PSTStationProtocolGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3))
cdx6500Controls = MibIdentifier((1, 3, 6, 1, 4, 1, 449, 2, 1, 4))
class Counter16(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class DisplayString(OctetString):
pass
cdx6500PCTFRDTEPortTable = MibTable((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5), )
if mibBuilder.loadTexts: cdx6500PCTFRDTEPortTable.setStatus('mandatory')
cdx6500PCTFRDTEPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1), ).setIndexNames((0, "FRDTE-OPT-MIB", "cdx6500frdtepCfgPortNum"))
if mibBuilder.loadTexts: cdx6500PCTFRDTEPortEntry.setStatus('mandatory')
cdx6500frdtepCfgPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCfgPortNum.setStatus('mandatory')
cdx6500frdtepConnectionType = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 21, 100))).clone(namedValues=NamedValues(("simp", 1), ("dtr", 2), ("simpb", 21), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepConnectionType.setStatus('mandatory')
cdx6500frdtepClockSource = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 100))).clone(namedValues=NamedValues(("int", 1), ("ext", 2), ("extint", 3), ("extlp", 4), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepClockSource.setStatus('mandatory')
cdx6500frdtepClockSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1200, 2048000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepClockSpeed.setStatus('mandatory')
cdx6500frdtepMaxStations = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepMaxStations.setStatus('deprecated')
cdx6500frdtepFrameSeqCounting = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("normal", 1), ("extended", 2), ("nc", 100)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepFrameSeqCounting.setStatus('mandatory')
cdx6500frdtepPktSeqCounting = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("normal", 1), ("extended", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepPktSeqCounting.setStatus('mandatory')
cdx6500frdtepCtrlProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 100))).clone(namedValues=NamedValues(("annexD", 1), ("none", 2), ("lmi", 3), ("annexA", 4), ("auto", 5), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepCtrlProtocol.setStatus('mandatory')
cdx6500frdtepT391 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepT391.setStatus('mandatory')
cdx6500frdtepT392 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepT392.setStatus('mandatory')
cdx6500frdtepN391 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepN391.setStatus('mandatory')
cdx6500frdtepN392 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepN392.setStatus('mandatory')
cdx6500frdtepN393 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepN393.setStatus('mandatory')
cdx6500frdtepNT1 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepNT1.setStatus('mandatory')
cdx6500frdtepNT2 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepNT2.setStatus('mandatory')
cdx6500frdtepNN1 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepNN1.setStatus('mandatory')
cdx6500frdtepNN2 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepNN2.setStatus('mandatory')
cdx6500frdtepNN3 = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepNN3.setStatus('mandatory')
cdx6500frdtepHighPriorityStn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 254))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepHighPriorityStn.setStatus('mandatory')
cdx6500frdtepMaxVoiceBWBitsPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2048000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepMaxVoiceBWBitsPerSec.setStatus('mandatory')
cdx6500frdtepSegSizeVoicePresent = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(33, 65, 129, 257, 513, 1025, 100))).clone(namedValues=NamedValues(("segSize32", 33), ("segSize64", 65), ("segSize128", 129), ("segSize256", 257), ("segSize512", 513), ("segSize1024", 1025), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepSegSizeVoicePresent.setStatus('mandatory')
cdx6500frdtepSegSizeVoiceNotPresent = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(33, 65, 129, 257, 513, 1025, 2049, 4097, 32000, 100))).clone(namedValues=NamedValues(("segSize32", 33), ("segSize64", 65), ("segSize128", 129), ("segSize256", 257), ("segSize512", 513), ("segSize1024", 1025), ("segSize2048", 2049), ("segSize4096", 4097), ("disable", 32000), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepSegSizeVoiceNotPresent.setStatus('mandatory')
cdx6500frdtepInvertTXClock = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("no", 1), ("yes", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepInvertTXClock.setStatus('mandatory')
cdx6500frdtepControlProtocolOptions = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 25), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepControlProtocolOptions.setStatus('mandatory')
cdx6500frdtepDiscardControlOptions = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("debit", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepDiscardControlOptions.setStatus('mandatory')
cdx6500frdtepElectricalInterfaceType = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("v24", 1), ("v35", 2), ("v36", 3), ("x21", 4), ("none", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepElectricalInterfaceType.setStatus('mandatory')
cdx6500frdtepV24ElectricalInterfaceOption = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ri", 1), ("tm", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepV24ElectricalInterfaceOption.setStatus('mandatory')
cdx6500frdtepHighSpeedElectricalInterfaceOption = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 1, 5, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("xover", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtepHighSpeedElectricalInterfaceOption.setStatus('mandatory')
cdx6500PPSTFRDTEPortTable = MibTable((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5), )
if mibBuilder.loadTexts: cdx6500PPSTFRDTEPortTable.setStatus('mandatory')
cdx6500PPSTFRDTEPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1), ).setIndexNames((0, "FRDTE-OPT-MIB", "cdx6500frdtepStatsPortNum"))
if mibBuilder.loadTexts: cdx6500PPSTFRDTEPortEntry.setStatus('mandatory')
cdx6500frdtepStatsPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepStatsPortNum.setStatus('mandatory')
cdx6500frdtepPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 100))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("busyOut", 3), ("up", 4), ("down", 5), ("na", 100)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepPortStatus.setStatus('mandatory')
cdx6500frdtepPortSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepPortSpeed.setStatus('mandatory')
cdx6500frdtepUtilizationIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepUtilizationIn.setStatus('mandatory')
cdx6500frdtepUtilizationOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepUtilizationOut.setStatus('mandatory')
cdx6500frdtepCharInTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCharInTotal.setStatus('mandatory')
cdx6500frdtepCharOutTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCharOutTotal.setStatus('mandatory')
cdx6500frdtepCharsInPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCharsInPerSec.setStatus('mandatory')
cdx6500frdtepCharsOutPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCharsOutPerSec.setStatus('mandatory')
cdx6500frdtepFrameInTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepFrameInTotal.setStatus('mandatory')
cdx6500frdtepFrameOutTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepFrameOutTotal.setStatus('mandatory')
cdx6500frdtepFramesInPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepFramesInPerSec.setStatus('mandatory')
cdx6500frdtepFramesOutPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepFramesOutPerSec.setStatus('mandatory')
cdx6500frdtepOverrunErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 14), Counter16()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepOverrunErrors.setStatus('mandatory')
cdx6500frdtepUnderrunErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 15), Counter16()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepUnderrunErrors.setStatus('mandatory')
cdx6500frdtepCRCErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 1, 5, 1, 16), Counter16()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtepCRCErrors.setStatus('mandatory')
cdx6500SPCTFRDTEStationTable = MibTable((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2), )
if mibBuilder.loadTexts: cdx6500SPCTFRDTEStationTable.setStatus('mandatory')
cdx6500SPCTFRDTEStationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1), ).setIndexNames((0, "FRDTE-OPT-MIB", "cdx6500frdtesCfgPortNum"), (0, "FRDTE-OPT-MIB", "cdx6500frdtesCfgStationNum"))
if mibBuilder.loadTexts: cdx6500SPCTFRDTEStationEntry.setStatus('mandatory')
cdx6500frdtesCfgPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCfgPortNum.setStatus('mandatory')
cdx6500frdtesCfgDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 1007))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesCfgDLCI.setStatus('mandatory')
cdx6500frdtesStationType = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("annexG", 1), ("bypass", 2), ("voiceRelay", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStationType.setStatus('mandatory')
cdx6500frdtesCommInfoRate = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesCommInfoRate.setStatus('mandatory')
cdx6500frdtesCommBurstSize = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesCommBurstSize.setStatus('mandatory')
cdx6500frdtesTransDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesTransDelay.setStatus('mandatory')
cdx6500frdtesControlledMode = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 100))).clone(namedValues=NamedValues(("normal", 1), ("disable", 2), ("congested", 3), ("limit", 4), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesControlledMode.setStatus('mandatory')
cdx6500frdtesLinkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("dte", 1), ("dce", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesLinkAddress.setStatus('mandatory')
cdx6500frdtesPVCChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesPVCChannels.setStatus('mandatory')
cdx6500frdtesStartingPVC = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStartingPVC.setStatus('mandatory')
cdx6500frdtesSVCChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesSVCChannels.setStatus('mandatory')
cdx6500frdtesStartingSVC = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStartingSVC.setStatus('mandatory')
cdx6500frdtesInitialFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 100))).clone(namedValues=NamedValues(("sabm", 1), ("disc", 2), ("none", 3), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesInitialFrame.setStatus('mandatory')
cdx6500frdtesRetryTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesRetryTimer.setStatus('mandatory')
cdx6500frdtesPollTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesPollTimer.setStatus('mandatory')
cdx6500frdtesTries = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesTries.setStatus('mandatory')
cdx6500frdtesFrameWinSize = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesFrameWinSize.setStatus('mandatory')
cdx6500frdtesPacketWinSize = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesPacketWinSize.setStatus('mandatory')
cdx6500frdtesMaxPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(6, 7, 8, 9, 10, 11, 100))).clone(namedValues=NamedValues(("psize32", 6), ("psize64", 7), ("psize128", 8), ("psize256", 9), ("psize512", 10), ("psize1024", 11), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesMaxPacketSize.setStatus('mandatory')
cdx6500frdtesUpperQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesUpperQueue.setStatus('mandatory')
cdx6500frdtesLowerQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 21), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesLowerQueue.setStatus('mandatory')
cdx6500frdtesRestartTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 22), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesRestartTimer.setStatus('mandatory')
cdx6500frdtesResetTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 23), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesResetTimer.setStatus('mandatory')
cdx6500frdtesCallTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 24), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesCallTimer.setStatus('mandatory')
cdx6500frdtesClearTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 25), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesClearTimer.setStatus('mandatory')
cdx6500frdtesX25Options = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesX25Options.setStatus('deprecated')
cdx6500frdtesRCDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 27), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesRCDestination.setStatus('mandatory')
cdx6500frdtesCUG = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 28), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 23))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesCUG.setStatus('mandatory')
cdx6500frdtesBillingRecords = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("off", 1), ("on", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesBillingRecords.setStatus('mandatory')
cdx6500frdtesCfgStationNum = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCfgStationNum.setStatus('mandatory')
cdx6500frdtesStnX25Options = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 31), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnX25Options.setStatus('mandatory')
cdx6500frdtesStnFrameSegmenter = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnFrameSegmenter.setStatus('mandatory')
cdx6500frdtesStnVoiceSVCChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 33), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnVoiceSVCChannels.setStatus('mandatory')
cdx6500frdtesStnVoiceCongCtrlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnVoiceCongCtrlMode.setStatus('mandatory')
cdx6500frdtesStnPeakUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 35), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 240))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnPeakUtilization.setStatus('mandatory')
cdx6500frdtesStnMaxInboundQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 36), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 2500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnMaxInboundQueue.setStatus('mandatory')
cdx6500frdtesStnAnnexGRateReduction = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 2, 1, 3, 2, 1, 37), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 100))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("nc", 100)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdx6500frdtesStnAnnexGRateReduction.setStatus('mandatory')
cdx6500SPSTFRDTEStationTable = MibTable((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2), )
if mibBuilder.loadTexts: cdx6500SPSTFRDTEStationTable.setStatus('mandatory')
cdx6500SPSTFRDTEStationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1), ).setIndexNames((0, "FRDTE-OPT-MIB", "cdx6500frdtesStatsPortNum"), (0, "FRDTE-OPT-MIB", "cdx6500frdtesStatsStationNumber"))
if mibBuilder.loadTexts: cdx6500SPSTFRDTEStationEntry.setStatus('mandatory')
cdx6500frdtesStatsPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesStatsPortNum.setStatus('mandatory')
cdx6500frdtesStatsDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 1007))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesStatsDLCI.setStatus('mandatory')
cdx6500frdtesUtilizationIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesUtilizationIn.setStatus('mandatory')
cdx6500frdtesUtilizationOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesUtilizationOut.setStatus('mandatory')
cdx6500frdtesMaxSVCCount = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesMaxSVCCount.setStatus('mandatory')
cdx6500frdtesCurrentSVCCount = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCurrentSVCCount.setStatus('mandatory')
cdx6500frdtesCharInTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCharInTotal.setStatus('mandatory')
cdx6500frdtesCharOutTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCharOutTotal.setStatus('mandatory')
cdx6500frdtesCharsInPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCharsInPerSec.setStatus('mandatory')
cdx6500frdtesCharsOutPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesCharsOutPerSec.setStatus('mandatory')
cdx6500frdtesPktInTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesPktInTotal.setStatus('mandatory')
cdx6500frdtesPktOutTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesPktOutTotal.setStatus('mandatory')
cdx6500frdtesPktsInPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesPktsInPerSec.setStatus('mandatory')
cdx6500frdtesPktsOutPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesPktsOutPerSec.setStatus('mandatory')
cdx6500frdtesPacketsQueued = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesPacketsQueued.setStatus('mandatory')
cdx6500frdtesFrameInTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesFrameInTotal.setStatus('mandatory')
cdx6500frdtesFrameOutTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesFrameOutTotal.setStatus('mandatory')
cdx6500frdtesFramesInPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesFramesInPerSec.setStatus('mandatory')
cdx6500frdtesFramesOutPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesFramesOutPerSec.setStatus('mandatory')
cdx6500frdtesInfoFramesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesInfoFramesIn.setStatus('mandatory')
cdx6500frdtesInfoFramesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesInfoFramesOut.setStatus('mandatory')
cdx6500frdtesRNRFramesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesRNRFramesIn.setStatus('mandatory')
cdx6500frdtesRNRFramesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesRNRFramesOut.setStatus('mandatory')
cdx6500frdtesRRFramesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesRRFramesIn.setStatus('mandatory')
cdx6500frdtesRRFramesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesRRFramesOut.setStatus('mandatory')
cdx6500frdtesREJFramesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesREJFramesIn.setStatus('mandatory')
cdx6500frdtesREJFramesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesREJFramesOut.setStatus('mandatory')
cdx6500frdtesDataPktsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesDataPktsIn.setStatus('mandatory')
cdx6500frdtesDataPktsOut = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesDataPktsOut.setStatus('mandatory')
cdx6500frdtesResetStats = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("reset", 1), ("noReset", 2)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: cdx6500frdtesResetStats.setStatus('mandatory')
cdx6500frdtesBoot = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("boot", 1), ("noBoot", 2)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: cdx6500frdtesBoot.setStatus('mandatory')
cdx6500frdtesDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("noDisable", 2)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: cdx6500frdtesDisable.setStatus('mandatory')
cdx6500frdtesEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("noEnable", 2)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: cdx6500frdtesEnable.setStatus('mandatory')
cdx6500frdtesStatsStationNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 449, 2, 1, 3, 1, 3, 2, 1, 34), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdx6500frdtesStatsStationNumber.setStatus('mandatory')
mibBuilder.exportSymbols("FRDTE-OPT-MIB", cdx6500frdtesUtilizationOut=cdx6500frdtesUtilizationOut, cdx6500frdtepFrameInTotal=cdx6500frdtepFrameInTotal, cdx6500StatProtocolGroup=cdx6500StatProtocolGroup, cdx6500PPSTFRDTEPortEntry=cdx6500PPSTFRDTEPortEntry, cdx6500frdtesClearTimer=cdx6500frdtesClearTimer, codex=codex, cdx6500frdtesPVCChannels=cdx6500frdtesPVCChannels, cdx6500frdtepStatsPortNum=cdx6500frdtepStatsPortNum, cdx6500frdtesRRFramesIn=cdx6500frdtesRRFramesIn, cdx6500frdtesStnX25Options=cdx6500frdtesStnX25Options, cdx6500PSTStationProtocolGroup=cdx6500PSTStationProtocolGroup, cdx6500frdtesStnVoiceCongCtrlMode=cdx6500frdtesStnVoiceCongCtrlMode, cdx6500frdtesLowerQueue=cdx6500frdtesLowerQueue, cdx6500PCTFRDTEPortEntry=cdx6500PCTFRDTEPortEntry, Counter16=Counter16, cdx6500frdtepT392=cdx6500frdtepT392, cdx6500frdtesDataPktsOut=cdx6500frdtesDataPktsOut, cdx6500frdtepNN1=cdx6500frdtepNN1, cdx6500frdtesControlledMode=cdx6500frdtesControlledMode, cdx6500frdtepCharsOutPerSec=cdx6500frdtepCharsOutPerSec, cdx6500frdtesRNRFramesIn=cdx6500frdtesRNRFramesIn, cdx6500frdtepNN2=cdx6500frdtepNN2, cdx6500frdtepClockSource=cdx6500frdtepClockSource, cdx6500frdtepClockSpeed=cdx6500frdtepClockSpeed, cdx6500frdtesStnFrameSegmenter=cdx6500frdtesStnFrameSegmenter, cdx6500PCTPortProtocolGroup=cdx6500PCTPortProtocolGroup, cdx6500frdtepInvertTXClock=cdx6500frdtepInvertTXClock, cdx6500frdtesCharInTotal=cdx6500frdtesCharInTotal, cdx6500frdtepPortSpeed=cdx6500frdtepPortSpeed, cdx6500frdtesCallTimer=cdx6500frdtesCallTimer, cdx6500frdtesRetryTimer=cdx6500frdtesRetryTimer, cdx6500frdtepCfgPortNum=cdx6500frdtepCfgPortNum, cdx6500frdtesStnAnnexGRateReduction=cdx6500frdtesStnAnnexGRateReduction, cdx6500frdtesDataPktsIn=cdx6500frdtesDataPktsIn, cdx6500frdtesTransDelay=cdx6500frdtesTransDelay, cdx6500frdtesFramesInPerSec=cdx6500frdtesFramesInPerSec, cdx6500frdtesCharsOutPerSec=cdx6500frdtesCharsOutPerSec, cdx6500frdtepFrameOutTotal=cdx6500frdtepFrameOutTotal, cdx6500frdtesRRFramesOut=cdx6500frdtesRRFramesOut, cdx6500SPSTFRDTEStationEntry=cdx6500SPSTFRDTEStationEntry, cdx6500frdtepV24ElectricalInterfaceOption=cdx6500frdtepV24ElectricalInterfaceOption, cdx6500frdtesStartingSVC=cdx6500frdtesStartingSVC, cdx6500frdtepN391=cdx6500frdtepN391, cdx6500SPCTFRDTEStationEntry=cdx6500SPCTFRDTEStationEntry, cdx6500frdtesStartingPVC=cdx6500frdtesStartingPVC, cdx6500frdtepMaxVoiceBWBitsPerSec=cdx6500frdtepMaxVoiceBWBitsPerSec, cdx6500Configuration=cdx6500Configuration, cdx6500frdtesMaxPacketSize=cdx6500frdtesMaxPacketSize, cdx6500frdtesCharOutTotal=cdx6500frdtesCharOutTotal, cdx6500frdtepNN3=cdx6500frdtepNN3, DisplayString=DisplayString, cdx6500frdtepCtrlProtocol=cdx6500frdtepCtrlProtocol, cdx6500frdtepCharOutTotal=cdx6500frdtepCharOutTotal, cdx6500frdtepPortStatus=cdx6500frdtepPortStatus, cdx6500frdtesPktInTotal=cdx6500frdtesPktInTotal, cdx6500frdtepNT2=cdx6500frdtepNT2, cdx6500frdtepHighSpeedElectricalInterfaceOption=cdx6500frdtepHighSpeedElectricalInterfaceOption, cdx6500frdtesInfoFramesIn=cdx6500frdtesInfoFramesIn, cdx6500frdtesResetStats=cdx6500frdtesResetStats, cdx6500frdtepConnectionType=cdx6500frdtepConnectionType, cdx6500frdtepFrameSeqCounting=cdx6500frdtepFrameSeqCounting, cdx6500frdtesStatsPortNum=cdx6500frdtesStatsPortNum, cdx6500frdtesResetTimer=cdx6500frdtesResetTimer, cdx6500frdtepN392=cdx6500frdtepN392, cdx6500frdtesTries=cdx6500frdtesTries, cdx6500frdtesRNRFramesOut=cdx6500frdtesRNRFramesOut, cdx6500frdtesStnPeakUtilization=cdx6500frdtesStnPeakUtilization, cdx6500frdtesFrameInTotal=cdx6500frdtesFrameInTotal, cdx6500frdtesPacketsQueued=cdx6500frdtesPacketsQueued, cdx6500frdtesCfgPortNum=cdx6500frdtesCfgPortNum, cdx6500frdtesCfgDLCI=cdx6500frdtesCfgDLCI, cdx6500PSTPortProtocolGroup=cdx6500PSTPortProtocolGroup, cdx6500frdtepUnderrunErrors=cdx6500frdtepUnderrunErrors, cdx6500frdtesSVCChannels=cdx6500frdtesSVCChannels, cdx6500PCTFRDTEPortTable=cdx6500PCTFRDTEPortTable, cdx6500frdtesFramesOutPerSec=cdx6500frdtesFramesOutPerSec, cdx6500frdtesDisable=cdx6500frdtesDisable, cdx6500frdtepControlProtocolOptions=cdx6500frdtepControlProtocolOptions, cdx6500frdtesPktsOutPerSec=cdx6500frdtesPktsOutPerSec, cdx6500SPSTFRDTEStationTable=cdx6500SPSTFRDTEStationTable, cdx6500frdtepOverrunErrors=cdx6500frdtepOverrunErrors, cdx6500frdtesCharsInPerSec=cdx6500frdtesCharsInPerSec, cdx6500frdtepElectricalInterfaceType=cdx6500frdtepElectricalInterfaceType, cdx6500frdtesREJFramesOut=cdx6500frdtesREJFramesOut, cdx6500frdtepUtilizationIn=cdx6500frdtepUtilizationIn, cdx6500frdtesREJFramesIn=cdx6500frdtesREJFramesIn, cdx6500frdtepHighPriorityStn=cdx6500frdtepHighPriorityStn, cdx6500frdtesCommInfoRate=cdx6500frdtesCommInfoRate, cdx6500frdtesPacketWinSize=cdx6500frdtesPacketWinSize, cdx6500frdtesUtilizationIn=cdx6500frdtesUtilizationIn, cdx6500frdtesFrameWinSize=cdx6500frdtesFrameWinSize, cdx6500frdtepCharsInPerSec=cdx6500frdtepCharsInPerSec, cdx6500frdtesFrameOutTotal=cdx6500frdtesFrameOutTotal, cdx6500frdtesInitialFrame=cdx6500frdtesInitialFrame, cdx6500frdtesX25Options=cdx6500frdtesX25Options, cdx6500CfgProtocolGroup=cdx6500CfgProtocolGroup, cdx6500frdtesInfoFramesOut=cdx6500frdtesInfoFramesOut, cdx6500frdtepCRCErrors=cdx6500frdtepCRCErrors, cdxProductSpecific=cdxProductSpecific, cdx6500frdtepUtilizationOut=cdx6500frdtepUtilizationOut, cdx6500frdtesPktOutTotal=cdx6500frdtesPktOutTotal, cdx6500SPCTFRDTEStationTable=cdx6500SPCTFRDTEStationTable, cdx6500Controls=cdx6500Controls, cdx6500Statistics=cdx6500Statistics, cdx6500frdtepN393=cdx6500frdtepN393, cdx6500frdtepSegSizeVoicePresent=cdx6500frdtepSegSizeVoicePresent, cdx6500frdtepMaxStations=cdx6500frdtepMaxStations, cdx6500frdtesLinkAddress=cdx6500frdtesLinkAddress, cdx6500frdtepDiscardControlOptions=cdx6500frdtepDiscardControlOptions, cdx6500frdtesRCDestination=cdx6500frdtesRCDestination, cdx6500frdtesCurrentSVCCount=cdx6500frdtesCurrentSVCCount, cdx6500PCTStationProtocolGroup=cdx6500PCTStationProtocolGroup, cdx6500frdtepFramesOutPerSec=cdx6500frdtepFramesOutPerSec, cdx6500frdtesStationType=cdx6500frdtesStationType, cdx6500frdtesBoot=cdx6500frdtesBoot, cdx6500frdtesStnVoiceSVCChannels=cdx6500frdtesStnVoiceSVCChannels, cdx6500frdtepNT1=cdx6500frdtepNT1, cdx6500frdtepFramesInPerSec=cdx6500frdtepFramesInPerSec, cdx6500frdtesStatsStationNumber=cdx6500frdtesStatsStationNumber, cdx6500frdtepT391=cdx6500frdtepT391, cdx6500frdtesBillingRecords=cdx6500frdtesBillingRecords, cdx6500frdtesStnMaxInboundQueue=cdx6500frdtesStnMaxInboundQueue, cdx6500frdtesStatsDLCI=cdx6500frdtesStatsDLCI, cdx6500frdtesUpperQueue=cdx6500frdtesUpperQueue, cdx6500frdtesMaxSVCCount=cdx6500frdtesMaxSVCCount, cdx6500frdtepCharInTotal=cdx6500frdtepCharInTotal, cdx6500frdtesCommBurstSize=cdx6500frdtesCommBurstSize, cdx6500frdtesCfgStationNum=cdx6500frdtesCfgStationNum, cdx6500frdtesPktsInPerSec=cdx6500frdtesPktsInPerSec, cdx6500frdtepSegSizeVoiceNotPresent=cdx6500frdtepSegSizeVoiceNotPresent, cdx6500frdtepPktSeqCounting=cdx6500frdtepPktSeqCounting, cdx6500=cdx6500, cdx6500frdtesRestartTimer=cdx6500frdtesRestartTimer, cdx6500frdtesCUG=cdx6500frdtesCUG, cdx6500PPSTFRDTEPortTable=cdx6500PPSTFRDTEPortTable, cdx6500frdtesEnable=cdx6500frdtesEnable, cdx6500frdtesPollTimer=cdx6500frdtesPollTimer)
|
[
"[email protected]"
] | |
196cb48fcdbe649a2e58181b1da415c8ed75de4d
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/cp.py
|
384994489e1d03b7b35680797cc0c3b747a290c6
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 485 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'CP':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
f77666e52275503f32ffd175f4645ea453839b20
|
3388cf3dfde334e6eddc845879b48e9804d8d374
|
/src/rocks-pylib/rocks/build.py
|
5b81b235aeb6f48159b35972173f47e2c0547b5c
|
[] |
no_license
|
scottsakai/core
|
16c6d83a4ee33a534ab0e0a1462680a1183c7881
|
21bced45edd9b70258fa59929f09b102f7874060
|
refs/heads/master
| 2021-08-14T07:12:42.726105 | 2017-11-14T23:57:36 | 2017-11-14T23:57:36 | 109,899,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 50,617 |
py
|
#! /opt/rocks/bin/python
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.2 (SideWinder)
#
# Copyright (c) 2000 - 2014 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:[email protected]
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: build.py,v $
# Revision 1.52 2012/11/27 00:48:40 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.51 2012/06/06 01:22:33 clem
# no more md5sum for all the rpm
#
# Revision 1.50 2012/05/06 05:48:46 phil
# Copyright Storm for Mamba
#
# Revision 1.49 2012/04/30 17:07:20 phil
# See if product.img only needs to be readable.
#
# Revision 1.48 2012/04/05 22:00:37 phil
# Now have flag to not create packages.md5. Temporary distributions don't need
# them.
#
# Revision 1.47 2012/03/26 19:46:00 phil
# Do not create cachedir. Test if not needed on 5 and 6.
#
# Revision 1.46 2012/02/09 21:20:38 phil
# convert to use subprocess module
#
# Revision 1.45 2012/01/06 21:58:14 phil
# Build a proper repo when no comps.xml file. Useful when bootstrapping
# and you don't have the base roll built yet.
#
# Revision 1.44 2011/07/23 02:30:49 phil
# Viper Copyright
#
# Revision 1.43 2011/06/24 19:25:07 phil
# Firewall documentation. Fix some typos in rulenames.
#
# Revision 1.42 2011/06/08 03:04:12 phil
# Sort listing for easier human reading
#
# Revision 1.41 2010/11/05 18:19:45 bruno
# added more files to packages.md5 (e.g., comps.xml, stage2.img, etc.)
#
# Revision 1.40 2010/09/07 23:53:08 bruno
# star power for gb
#
# Revision 1.39 2010/08/09 22:24:54 bruno
# create MD5 checksums for all the RPMs
#
# Revision 1.38 2009/06/24 04:46:12 bruno
# restore roll tweaks
#
# Revision 1.37 2009/05/01 19:07:08 mjk
# chimi con queso
#
# Revision 1.36 2008/12/18 21:41:17 bruno
# add the 'enabled' field to the rolls selection code while building a distro.
#
# Revision 1.35 2008/10/18 00:56:02 mjk
# copyright 5.1
#
# Revision 1.34 2008/05/29 18:06:45 bruno
# add full path to mksquashfs
#
# Revision 1.33 2008/03/06 23:41:44 mjk
# copyright storm on
#
# Revision 1.32 2007/12/10 21:28:35 bruno
# the base roll now contains several elements from the HPC roll, thus
# making the HPC roll optional.
#
# this also includes changes to help build and configure VMs for V.
#
# Revision 1.31 2007/06/23 04:03:24 mjk
# mars hill copyright
#
# Revision 1.30 2007/06/06 17:04:03 bruno
# nuke the "Couldn't find comps package" error message -- in the common case,
# it is a misleading message
#
# Revision 1.29 2006/09/12 21:56:58 bruno
# only apply RPMs from the current distro that is being built.
#
# this is a no-op when there is only a distro from one architecture, but when
# there are multiple architectures (e.g., for 'cross kickstarting'), then
# you want to apply the RPMS from the cross kickstarted distro when the
# 'arch' flag is present.
#
# Revision 1.28 2006/09/11 22:47:22 mjk
# monkey face copyright
#
# Revision 1.27 2006/08/10 00:09:41 mjk
# 4.2 copyright
#
# Revision 1.26 2006/07/19 01:33:27 bruno
# if the file is not an RPM, then just catch the exception
#
# Revision 1.25 2006/07/13 03:56:59 bruno
# make sure the critical RPMs that we build (anaconda, anaconda-runtime,
# kudzu and kudzu-devel) are included from the base roll.
#
# so, if those packages are present from an updated OS CD set and the
# timestamps on those packages are newer than the timestamps on the packages
# in the base roll, then we still will include the base roll packages.
#
# Revision 1.24 2006/06/13 21:48:48 bruno
# now using comps.xml file from native distro
#
# Revision 1.23 2006/06/05 17:57:37 bruno
# first steps towards 4.2 beta
#
# Revision 1.22 2006/01/16 06:48:59 mjk
# fix python path for source built foundation python
#
# Revision 1.21 2005/10/12 18:08:42 mjk
# final copyright for 4.1
#
# Revision 1.20 2005/09/23 04:51:21 bruno
# a workaround in order to build the OS roll
#
# Revision 1.19 2005/09/16 01:02:21 mjk
# updated copyright
#
# Revision 1.18 2005/08/18 22:09:01 bruno
# make torrent files in the resulting 'lan' distro
#
# Revision 1.17 2005/07/27 01:54:38 bruno
# checkpoint
#
# Revision 1.16 2005/07/11 23:51:35 mjk
# use rocks version of python
#
# Revision 1.15 2005/06/30 19:16:17 bruno
# patch netstg2.img in kernel roll, not with rocks-dist.
#
# this means the --public and --notouch flags are gone.
#
# Revision 1.14 2005/05/24 21:21:57 mjk
# update copyright, release is not any closer
#
# Revision 1.13 2005/04/29 01:14:25 mjk
# Get everything in before travel. Rocks-roll is looking pretty good and
# can now build the os roll (centos with updates). It looks like only the
# first CDROM of our os/centos roll is needed with 3 extra disks.
#
# - rocks-dist cleanup (tossed a ton of code)
# - rocks-roll growth (added 1/2 a ton of code)
# - bootable rolls do not work
# - meta rolls are untested
# - rocks-dist vs. rocks-roll needs some redesign but fine for 4.0.0
#
# Revision 1.12 2005/04/18 18:43:47 fds
# WAN kickstart authentication requires a different DN from the client than
# on the central's CA.
#
# Revision 1.11 2005/04/14 00:23:53 fds
# Keep it simple. Less throwing around keys.
#
# Revision 1.10 2005/04/01 21:04:49 fds
# Fixed wan distro building on new 4.0 beta frontends.
#
# Revision 1.9 2005/03/25 22:59:23 fds
# Added back boot ISO building. Cleaner and faster than before.
# Also keeping central's crpyto keys in USB key. Used if central is in
# lockdown.
#
# Revision 1.8 2005/03/21 23:46:30 bruno
# everything's a roll support added
#
# Revision 1.7 2005/03/16 20:49:10 fds
# Security and 411 keys on USB drive.
#
# Revision 1.6 2005/03/16 04:44:02 fds
# USB boot key image generator for rocks-dist
#
# Revision 1.5 2005/03/12 00:01:52 bruno
# minor checkin
#
# Revision 1.4 2005/03/10 01:18:21 fds
# Redoing brunos 1.2 diff that got lost. No kickstart-profiles.
#
# Revision 1.3 2005/03/10 00:08:14 fds
# Fix exception when we want to include all rolls, but dont have them
# all listed in the database.
#
# Revision 1.2 2005/03/02 21:19:02 bruno
# don't install rocks-kickstart-profiles -- it doesn't exist anymore
#
# Revision 1.1 2005/03/01 00:22:08 mjk
# moved to base roll
#
# Revision 1.170 2005/02/21 21:22:09 bruno
# now using 'rocks-build' to make all SRPMS
#
# Revision 1.169 2005/02/21 06:42:24 bruno
# the beginning of making a build-rocks.py script
#
# Revision 1.168 2005/01/26 23:09:39 mjk
# Rolls are indexed by name,version,arch. Last release was just name so
# multiple versions of a roll could not be installed. Now you can install
# whatever you want. Rocks-dist keeps track of this in the DB but this
# code does not know about the DB. For the install environment the
# rocks-dist --with-roll flag can be used inplace of the database.
#
# Revision 1.167 2005/01/18 16:36:08 fds
# rocks-dist mirror tries ftp first, then falls back to http. Now works
# with both ftp.rocksclusters.org, and centrals.
#
# Revision 1.166 2005/01/10 19:30:10 bruno
# netstg2.img is the default.
#
# this assumes we won't be going back to redhat 7.0 anytime soon.
#
# Revision 1.165 2004/11/29 21:14:47 fds
# Commit comment for version 163 got lost
#
# Revision 1.164 2004/11/04 23:52:04 fds
# Tweak
#
# Revision 1.163 2004/11/04 23:37:09 fds
# Support for notouch. Version support for cdrom isos. Build bootdisks.
#
# Revision 1.162 2004/11/03 19:37:09 fds
# Tweak: stay within your mirror tree.
#
# Revision 1.161 2004/11/02 02:11:48 fds
# Working towards bug 62: use http for rocks-dist mirror.
#
# Revision 1.160 2004/10/20 16:29:23 bruno
# set all references to 'ramdisk_size' to 150000
#
# Revision 1.159 2004/10/04 19:20:49 fds
# Uses getArchList to fix bug 25 (opteron installs i386 rolls). Also
# handles rolls with hyphens in name.
#
# Revision 1.158 2004/09/16 19:52:56 fds
# Dont die as easily.
#
# Revision 1.157 2004/09/16 17:35:34 bruno
# so close
#
# Revision 1.156 2004/09/14 19:47:38 bruno
# pretty close to making a working CD
#
# Revision 1.155 2004/08/10 14:37:26 bruno
# first pass at installing a frontend from a distribution that is housed
# on the frontend's local disk.
#
# Revision 1.154 2004/08/10 00:33:11 fds
# Handlers empty mirrors
#
# Revision 1.153 2004/04/28 21:05:44 fds
# Rocks-dist optimization for cross-kickstarting. Do not need the awkward
# --genhdlist flag anymore.
# o Will automatically find the native genhdlist executable, but
# o requires the native dist be made first.
#
# Revision 1.152 2004/04/27 23:50:35 fds
# Fixing rocks-dist cdrom
#
# Revision 1.151 2004/04/14 19:19:42 mjk
# select individual rolls
#
# Revision 1.150 2004/03/25 03:15:47 bruno
# touch 'em all!
#
# update version numbers to 3.2.0 and update copyrights
#
# Revision 1.149 2004/03/23 19:46:02 fds
# Tweaks.
#
# Revision 1.148 2004/03/23 19:24:24 fds
# Support for building central roll links.
#
# Revision 1.147 2004/03/18 15:54:13 mjk
# fix patch profiles paths
#
# Revision 1.146 2004/03/16 22:10:33 mjk
# fix profile paths for netstg2
#
# Revision 1.145 2004/03/08 23:26:12 mjk
# - Rolls are off to the side
# - Pristine distribution building
# - Files support chmod
# - Profiles are distribution local
#
# Revision 1.144 2004/03/03 19:36:37 fds
# Changes for cross-kickstarting
#
# Revision 1.143 2004/02/25 17:55:53 bruno
# send error messages from applyRPM to /dev/null.
#
# this is because the intel roll adds a path to the intel libraries and
# everytime ldconfig was called, you see errors like:
#
# /sbin/ldconfig: File /opt/intel_fc_80/lib/libcprts.so is too small, not checked
#
# and the 'expat' package calls ldconfig (and expat is patched into the distro)
#
# Revision 1.142 2004/01/07 22:14:41 bruno
# nuke the code that removed the 'modules' directory on the netstg2.
#
# this caused the ext3 driver to not be loaded and, consequently, a
# user could select ext3 as a file system type.
#
# Revision 1.141 2003/12/10 19:47:53 fds
# Using a real XML parser to manipulate the comps file.
#
# Revision 1.140 2003/11/05 01:17:15 bruno
# moved the netstg2.img inserting into a different part of the cd building
# flow
#
# Revision 1.139 2003/11/05 01:07:34 bruno
# make sure rocks-boot-netstage is on the rocks base CD
#
# Revision 1.138 2003/11/05 00:35:59 bruno
# put in the netstg2.img built by us
#
# Revision 1.137 2003/10/29 00:36:49 mjk
# - Added rebuild lock file (log file locking breaks iteration)
# - All rebuild state goes in spool directory
#
# Revision 1.136 2003/10/29 00:13:43 mjk
# more RHEL changes
#
# Revision 1.135 2003/10/28 23:20:56 mjk
# more RHEL rocks-rebuild changes
#
# Revision 1.134 2003/10/28 20:30:38 mjk
# use product-release name
#
# Revision 1.133 2003/10/27 20:05:00 bruno
# rhel-3
#
# Revision 1.132 2003/10/21 15:44:40 bruno
# removed debug statement
#
# Revision 1.131 2003/10/17 00:01:00 mjk
# get ISOs for beta
#
# Revision 1.130 2003/10/15 22:18:21 bruno
# now can build a bootable taroon-based CD that installs on a frontend
#
# Revision 1.129 2003/10/10 17:44:45 fds
# Redirect comps warnings so they dont annoy us.
#
# Revision 1.128 2003/10/09 00:00:25 fds
# Added expat to patchRPMs list
#
# Revision 1.127 2003/10/08 23:17:29 bruno
# to build CDs under taroon
#
# Revision 1.126 2003/10/07 19:24:44 mjk
# debug prints use --debug flag
#
# Revision 1.125 2003/10/07 18:33:12 fds
# Added support for multiple rpm archs in applyRPM, using the DistRPMList
# exception. Forgive me mjk, but I added another line of output which will
# help in debugging new redhat products.
#
# Revision 1.124 2003/10/06 22:47:14 fds
# Added buildstamp file to allow
# loader to 'verify' the netstg2 image. This string will also be
# used in the boot process in several places.
#
# Revision 1.123 2003/10/01 02:11:15 bruno
# fixes for anaconda 9
#
# Revision 1.122 2003/09/28 23:43:34 fds
# Slightly cleaner.
#
# Revision 1.121 2003/09/28 19:41:27 fds
# Changes for Taroon
#
# Revision 1.120 2003/09/24 17:08:45 fds
# Bruno's changes for RH 9
#
# Revision 1.119 2003/09/12 23:08:18 fds
# Added comps.xml parsing. More Exception handling.
#
# Revision 1.118 2003/09/11 18:56:38 fds
# Introduced BuildError exception, put spinner-cmd into its own function.
#
# Revision 1.117 2003/09/03 00:29:37 bruno
# little tweak
#
# Revision 1.116 2003/09/03 00:27:52 bruno
# building multiple CDs via xml config file
#
# Revision 1.115 2003/09/02 23:37:28 bruno
# flag to make all media set
#
# Revision 1.114 2003/08/28 02:37:07 bruno
# needed comma
#
# Revision 1.113 2003/08/27 23:10:55 mjk
# - copyright update
# - rocks-dist uses getArch() fix the i686 distro bug
# - ganglia-python spec file fixes (bad service start code)
# - found some 80col issues while reading code
# - WAN ks support starting
#
# Revision 1.112 2003/08/26 22:44:20 mjk
# - File tag now takes "expr" attribute (command evaluation)
# - Conversion of old code to file tags
# - Added media-server (used to be server)
# - Killed replace-server on the hpc roll
# - Updated Server database membership (now a media-server)
# - Added Public field to the membership table
# - Insert-ethers only allows a subset of memberships (Public ones) to be
# inserted.
# - Added getArch() to Application class
# - Kickstart trinity (kcgi,kpp,kgen) all updated self.arch initial value
#
# Revision 1.111 2003/08/15 22:34:46 mjk
# 3.0.0 copyright
#
# Revision 1.110 2003/08/13 22:12:54 mjk
# gingin changes
#
# Revision 1.109 2003/08/13 19:11:22 bruno
# changed media name to 'Rocks Base'
#
# Revision 1.108 2003/07/25 21:18:48 mjk
# - Fixed some files to tab spacing
# - Support rolls on the first CD
# - DVD building fixes
#
# Revision 1.107 2003/07/23 15:59:26 mjk
# - moved all disabled packages to node-thin
# - cdrecord is now less verbose
#
# Revision 1.106 2003/07/21 22:55:25 bruno
# added mini_httpd for rocks-boot building
#
# Revision 1.105 2003/07/19 00:34:09 bruno
# removed patching of CD and hard disk second stage loader
#
# Revision 1.104 2003/07/17 23:08:03 bruno
# pushing towards 2.3.3
#
# Revision 1.103 2003/07/10 15:28:04 bruno
# increased ramdisk size to 100000
#
# Revision 1.102 2003/07/07 20:28:52 bruno
# roll enablers
#
# Revision 1.101 2003/07/07 16:25:07 mjk
# IA64 redux
#
# Revision 1.100 2003/06/30 23:47:16 mjk
# ia64 source distro building changes
#
# Revision 1.99 2003/05/28 17:27:45 mjk
# overflow goes on 2nd CD
#
# Revision 1.98 2003/05/22 16:39:28 mjk
# copyright
#
# Revision 1.97 2003/04/24 16:56:13 mjk
# - Better DFS Graph traversing
# - Adding includes directory for the graph
#
# Revision 1.96 2003/04/03 20:57:03 bruno
# initialize some variables in the 'patch' section -- thanks najib!
#
# Revision 1.95 2003/04/01 00:07:00 mjk
# more mirror changes
#
# Revision 1.94 2003/03/28 20:40:56 bruno
# renamed CD disks to 1,2,3
#
# Revision 1.93 2003/03/28 19:09:27 bruno
# don't remove the 'modules' directory on the second stage loader
# if this is an ia64
#
# Revision 1.92 2003/03/26 20:40:52 bruno
# don't patch the modules into the second stage boot loaders
#
# Revision 1.91 2003/03/22 01:00:55 mjk
# RC 74.3245.32.fds.12
#
# Revision 1.90 2003/03/21 21:27:32 bruno
# mason likes this one
#
# Revision 1.89 2003/03/21 20:46:17 bruno
# mason says this is a good idea
#
# Revision 1.88 2003/02/28 18:43:10 bruno
# another fix to ia64 efi
#
# Revision 1.87 2003/02/28 17:40:32 bruno
# added more functionality to ia64 efi patching
#
# Revision 1.86 2003/02/22 17:39:27 bruno
# fixes to allow patching an ia64 frontend
#
# Revision 1.85 2003/02/17 18:43:04 bruno
# updated copyright to 2003
#
# Revision 1.84 2003/02/10 22:21:16 bruno
# if the CD size is 0.00, don't print 'CDROM-n : size 0.00'
#
# Revision 1.83 2003/01/25 05:38:49 bruno
# fix to the CD 'backfilling' code
#
# Revision 1.82 2003/01/22 19:16:46 bruno
# code to backfill a CD or DVD
#
# Revision 1.81 2002/12/21 17:10:17 bruno
# fine tune 'patch'
#
# Revision 1.80 2002/12/21 16:56:56 bruno
# more fixes to 'patch'
#
# Revision 1.79 2002/12/21 15:52:14 bruno
# tuned the 'patch' command
#
# Revision 1.78 2002/12/21 02:15:36 bruno
# added grub manipulation to the end of the 'patch' script
#
# Revision 1.77 2002/12/21 02:03:22 bruno
# support for frontend patching -- the 'patch' command
#
# Revision 1.76 2002/12/18 17:40:05 bruno
# now patch hdstg1.img -- this enables patching the frontend from its own
# distribution
#
# Revision 1.75 2002/11/15 21:18:17 mjk
# added --dvd flag
#
# Revision 1.74 2002/11/14 18:50:08 mjk
# added expat parser to pathing image
#
# Revision 1.73 2002/11/07 18:44:01 mjk
# only generate kickstart files once
#
# Revision 1.72 2002/11/06 22:37:40 mjk
# force patch RPMS onto cd1
#
# Revision 1.71 2002/10/29 16:18:23 bruno
# had to take out patching of rocks-boot into the image
#
# Revision 1.70 2002/10/28 20:16:20 mjk
# Create the site-nodes directory from rocks-dist
# Kill off mpi-launch
# Added rocks-backup
#
# Revision 1.69 2002/10/21 22:07:59 mjk
# removed forms from CD
#
# Revision 1.68 2002/10/18 21:33:26 mjk
# Rocks 2.3 Copyright
#
# Revision 1.67 2002/10/18 20:31:31 mjk
# multiple mirror fixes
#
# Revision 1.66 2002/10/18 19:58:40 mjk
# multiple mirror fixes
#
# Revision 1.65 2002/10/18 19:54:35 mjk
# create site-nodes symlink
#
# Revision 1.64 2002/10/18 19:20:11 mjk
# Support for multiple mirrors
# Fixed insert-copyright for new CVS layout
#
# Revision 1.63 2002/10/09 21:05:14 bruno
# we can now build a cdrom again (after source tree reorganization)
#
# Revision 1.62 2002/10/03 20:01:43 mjk
# move everything to /opt/rocks
#
# Revision 1.61 2002/08/31 00:05:04 bruno
# found a bug during 'upgrade' -- the link to /home/install/profiles/nodes
# is there, but since autofs isn't running, it a call to os.path.exist() will
# return false, then the call to os.symlink will throw an exception -- because
# the file is there!
#
# Revision 1.60 2002/07/10 18:54:03 bruno
# changes to make 7.3 installation from CD work
#
# Revision 1.59 2002/07/03 23:33:59 bruno
# added many more packages to the 'patch ekv' section -- now that we build
# the kickstart file on the installing system
#
# Revision 1.58 2002/03/19 23:03:36 bruno
# added multi cdrom building when select 'cdrom'
#
# Revision 1.57 2002/02/26 01:12:52 mjk
# - Remove more of the --cdrom stuff from bruno, thanks to my screwup
# - Added audiofile rpm back the x11 config (gnome needs sound, piece of crap)
# - Burned down a frontend and compute nodes looks pretty good.
#
# Revision 1.56 2002/02/23 00:10:46 bruno
# updates to handle 'negative' packages. the cdrom builder needs them and
# kickstarting nodes don't.
#
# Revision 1.55 2002/02/21 21:33:28 bruno
# added new copyright
#
# Revision 1.54 2002/02/15 21:44:39 mjk
# remove debug lines
#
# Revision 1.53 2002/02/14 02:12:29 mjk
# - Removed CD copy gui code from insert-ethers
# - Added CD copy code back to install.xml (using rocks-dist)
# - Added copycd command to rocks-dist
# - Added '-' packages logic to kgen
# - Other file changed to support above
#
# Revision 1.52 2002/02/12 23:50:34 mjk
# Already forgot
#
# Revision 1.51 2002/02/12 18:40:30 bruno
# nukin' unused code
#
# Revision 1.50 2002/02/12 18:31:47 bruno
# added 'w' to file open for .info file
#
# Revision 1.49 2002/02/12 05:46:10 mjk
# added fixCompFile method
#
# Revision 1.48 2002/02/08 21:58:36 bruno
# made subroutine 'patchImage' because we patch so many damn redhat images.
#
# Revision 1.47 2002/02/07 02:16:59 bruno
# needed to patch stage2.img instead of hdstg1.img for cd install
#
# Revision 1.46 2002/02/06 21:22:44 bruno
# all the little things that releases find ...
#
# Revision 1.45 2002/02/05 22:40:53 mjk
# Red Hat's comps.py file changed to support dependencies. The hdlist
# packages now supports the select()/unselect()/isSelected() methods --
# they weren't there before. Changing to method access versus member
# access is good, and it fixed some problems we had with metapackages
# unselecting individual components.
#
# Revision 1.44 2002/02/05 16:43:47 bruno
# added 'deselecting' of packages -- for cdrom support
#
# Revision 1.43 2002/01/18 23:43:27 bruno
# added 'mkcramfs' tool for 7.2
#
# Revision 1.42 2001/11/09 23:50:54 mjk
# - Post release ia64 changes
#
# Revision 1.40 2001/11/08 18:27:21 mjk
# - ia64 vs. i386 cdrom building
#
# Revision 1.39 2001/11/07 19:21:37 mjk
# - moved phpMyAdmin the /var/www/html
# - nuke cluster-config-* as special case rpms in rocks-dist (build.py)
# - moved around code in rocks-boot
# - 2.1.1 copyright
#
# Revision 1.37 2001/11/06 23:30:32 bruno
# cleaned up the information line about where the rocks.iso file is located
#
# Revision 1.36 2001/11/06 22:59:19 bruno
# added fuckin' piece-pipe
#
# Revision 1.35 2001/11/06 22:06:56 bruno
# added mkisofs and isolinux goodies to cdrom building
#
# Revision 1.34 2001/11/05 23:10:18 bruno
# fixed syntax error
#
# Revision 1.33 2001/11/05 22:12:16 bruno
# fixes for 2.1.1
#
# Revision 1.32 2001/11/05 18:36:56 bruno
# more changes for redhat 7.2
#
# Revision 1.31 2001/11/03 00:05:50 bruno
# first steps into 7.2 land
#
# Revision 1.30 2001/10/30 02:59:27 mjk
# left in debug statements
#
# Revision 1.29 2001/10/30 02:17:54 mjk
# - Were cooking with CGI kickstart now
# - added popen stuff to ks.py
# - verify command is dead
#
# Revision 1.28 2001/10/24 20:23:32 mjk
# Big ass commit
#
# Revision 1.26 2001/09/10 18:31:12 mjk
# wish I remembered what changed...
#
# Revision 1.25 2001/07/24 21:11:14 mjk
# Put --ignorearch back in for ekv patching
#
# Revision 1.24 2001/06/27 22:32:17 mjk
# - Added pssh.py module
# - Application now work when the HOME env var is not set
#
# Revision 1.23 2001/06/14 17:19:05 mjk
# - removed --ignorearch flag from ekv-anaconda patching. Need to by
# done on the correct arch anyway.
#
# - fixed stage2 filesystem size calculation to allow 20% inode
# overhead.
#
# Revision 1.22 2001/06/12 18:13:50 mjk
# - Added Force RPMS directory to docs
# - Always create a Force RPMS directory
#
# Revision 1.21 2001/05/29 17:12:21 mjk
# Added verify command support
#
# Revision 1.20 2001/05/23 22:42:25 mjk
# Preserve the force/RPMS dir
#
# Revision 1.19 2001/05/21 22:56:06 mjk
# Remove chroot code. Back to relocate for RPMs.
#
# Revision 1.18 2001/05/21 19:29:50 mjk
# - Cleanup
# - Don't create symlink for the ekv and piece-pipe packages anymore
#
# Revision 1.17 2001/05/17 16:11:18 bruno
# applyRPM fixes -- i hate redhat
#
# Revision 1.16 2001/05/16 21:44:40 mjk
# - Major changes in CD building
# - Added ip.py, sql.py for SQL oriented scripts
#
# Revision 1.15 2001/05/11 18:12:08 bruno
# cd building
#
# Revision 1.14 2001/05/10 00:04:44 mjk
# Unset LANG for build cdrom
#
# Revision 1.13 2001/05/09 22:33:10 mjk
# - better paths commads
# - more cdrom cleanup
#
# Revision 1.12 2001/05/09 20:50:04 mjk
# Added ekv-anaconda to list of CD rpms
#
# Revision 1.11 2001/05/09 20:17:21 bruno
# bumped copyright 2.1
#
# Revision 1.10 2001/05/07 22:29:14 mjk
# - Release candidate 1
#
# Revision 1.9 2001/05/04 22:58:53 mjk
# - Added 'cdrom' command, and CDBuilder class.
# - CDBuilder uses RedHat's source to parse the hdlist/comps file so we can
# trim the set of RPMs on our CD.
# - Weekend!
#
# Revision 1.8 2001/05/01 01:02:13 bruno
# added first pass at 'cd_distro' to build the cd-friendly directories.
# it's ugly -- katz, don't kill me.
#
# Revision 1.7 2001/04/27 01:08:50 mjk
# - Created working 7.0 and 7.1 distibutions (in same tree even)
# - Added symlink() method to File object. Trying to get the File object
# to make the decision on absolute vs. relative symlinks. So far we are
# absolute everywhere.
# - Still missing CD making code. Need to figure out how to read to
# comps files using RedHat's anaconda python code. Then we can decide
# which RPMs can go on the second CD based on what is required in the
# kickstart files.
#
# Revision 1.6 2001/04/24 20:59:22 mjk
# - Moved Bruno's eKV 2nd stage patching code over. And I even understand it.
# - The DistributionBuilder now changes the File object in the distribution as
# the links, or copies are done. This means the Tree always reflects what
# is on the disk, like it should have been in the first place.
# - Added CVS Log from cluster-dist to show the history of the monster
# - Last missing piece is CD building.
#
# Revision 1.5 2001/04/21 01:50:49 mjk
# - Added imortality to files so we can force old RPMS to always be in
# the distribution.
#
# - Added site/RPMS, site/SRPMS directories for local packages, as in Rocks
# RPMS.
#
# - Also resolve versions for SRPMS. The old cluster-dist didn't do this!
#
# - Added DistributionBuilder.applyRPM() method so make patching the
# dist easier.
#
# - Everything still works fine. But still missing Bruno's CD and eKV
# changes.
#
# Revision 1.4 2001/04/20 22:27:02 mjk
# - always apply the genhdlist rpm and run it
# - removed the newdist object from the DistributionBuilder
# - added template for RocksDistributionBuilder
# - Mirror code works
# - Added 'paths' command for learing how to find pathnames
#
# Revision 1.3 2001/04/20 01:53:18 mjk
# - Basic distribution building works. We now do either all symlink or
# all copies. The hybrid case wasn't needed and is a big mess-o-code.
#
# - CVS checkout for build directory works
#
# - Need to decide how to add Bruno's changes to cluster-dist back in.
#
# Revision 1.2 2001/04/18 23:17:10 mjk
# - Fixed some low level design bugs in Tree, and Distribution
#
# - The DistributionBuilder can now gather RPMS from all the correct
# sources. Still need version resolving code the the File and RPMFile
# objects. Also need to figure how to effeciently traverse this long
# List the RPMFiles.
#
# Revision 1.1 2001/04/18 01:20:38 mjk
# - Added build.py, util.py modules
#
# - Getting closer. I'm happy with the object model for building
# mirrors, and this will extend well to build the distributions.
#
# - Seriously needs a design document.
#
# Revision 1.1 2001/04/17 02:27:59 mjk
# Time for an initial checkin. Datastructure and general layout of the
# code is correct. Still need comparison code for File and RPM objects.
#
import sys
import os
import shutil
import re
import tempfile
import string
import time
import subprocess
import xml
import socket
import rocks.dist
import rocks.file
import rocks.util
import rocks
class BuildError(Exception):
pass
class Builder:
def __init__(self):
self.verbose = 0
self.debug = 0
self.versionMajor = int(rocks.version_major)
def build(self):
pass
def setVerbose(self, level=1):
self.verbose = level
def setDebug(self, level=1):
self.debug = level
class MirrorBuilder(Builder):
def __init__(self, m):
Builder.__init__(self)
self.mirrors = m
def build(self):
for m in self.mirrors:
dirs = []
if m.getRemoteReleasePath():
dirs.append(m.getRemoteReleasePath())
for dir in dirs:
self.buildMirror(m.getHost(), dir)
def buildMirror(self, host, path):
# Try FTP first, failover to HTTP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, 21))
sock.close()
cmd = 'wget -m -nv ftp://%s//%s/' % (host, path)
except socket.error:
cmd = 'wget -m -nv -np http://%s//%s/' % (host, path)
sock = None
if self.verbose or self.debug:
print cmd
if not self.debug:
subprocess.call(cmd, shell=True)
#
# this will be copied to the rolls directory to reliably return directory
# listings
#
directory_listing_cgi = """#!/usr/bin/env python
import os
try:
dir = os.environ['DOCUMENT_ROOT'] + os.environ['REQUEST_URI']
except:
dir = '.'
pass
out = ''
out += '<html>'
out += '<body>'
out += '<table>'
listing = os.listdir(dir)
listing.sort(key=str.lower)
for file in listing:
if file not in [ 'index.cgi' ]:
out += '<tr><td>\\n'
if os.path.isdir(os.path.join(dir, file)):
out += '<a href="%s/">%s/</a>\\n' % (file, file)
else:
out += '<a href="%s">%s</a>\\n' % (file, file)
out += '</td></tr>'
out += '\\n'
out += '</table>'
out += '</body>'
out += '</html>'
print 'Content-type: text/html'
print 'Content-length: %d' % (len(out))
print ''
print out
"""
class DistributionBuilder(Builder):
def __init__(self, dist, links=1):
Builder.__init__(self)
self.dist = dist
self.useLinks = links
self.compsPath = None
self.useRolls = {}
self.allRolls = 1
self.onlyRolls = 0
self.withSiteProfiles = 0
self.version = '1.0'
self.calcmd5 = 1
# Build the Tree objects for the Mirror and Distribution
# trees. The actual files for the distibution may or may not
# exist. We no longer nuke pre-existing distibutions before
# building a new one. This will make mirroring simpler.
for mirror in self.dist.getMirrors():
if not mirror.isBuilt():
mirror.build()
if not self.dist.isBuilt():
self.dist.build()
def setRolls(self, list, only=0):
if list:
for e in list:
self.useRolls[e[0]] = (e[1], e[2])
self.allRolls = 0
else:
self.useRolls = {}
self.allRolls = 1
self.onlyRolls = only
def setVersion(self, ver):
self.version = ver
def setSiteProfiles(self, bool):
self.withSiteProfiles = bool
def setCalcMD5(self, bool):
self.calcmd5 = bool
def clean(self):
# Nuke the previous distribution. The cleaner() method will
# preserve any build/ directory.
print 'Cleaning distribution'
self.dist.getTree('release').apply(self.cleaner)
def useRoll(self, key, ver, arch):
"Returns true if we should include this roll"
if arch == self.dist.arch:
if self.allRolls:
return 1
if self.useRolls.has_key(key):
version, enabled = self.useRolls[key]
if enabled and version == ver:
return 1
return 0
def getRollBaseFiles(self):
files = []
for m in self.dist.getMirrors():
for key, value in m.getRolls().items():
for arch, ver in value:
if self.useRoll(key, ver, arch):
print ' including "%s" (%s,%s) roll...' % \
(key, ver, arch)
files.extend(m.getRollBaseFiles(key,
ver,
arch))
return files
def getRollRPMS(self):
rpms = []
for m in self.dist.getMirrors():
for key, value in m.getRolls().items():
for arch, ver in value:
if self.useRoll(key, ver, arch):
print ' including "%s" (%s,%s) roll...' % \
(key, ver, arch)
rpms.extend(m.getRollRPMS(key,
ver,
arch))
return rpms
def getRollSRPMS(self):
rpms = []
for m in self.dist.getMirrors():
for key, value in m.getRolls().items():
for arch, ver in value:
if self.useRoll(key,ver,arch):
print ' including "%s" (%s,%s) roll...' % \
(key, ver, arch)
rpms.extend(m.getRollSRPMS(key,
ver,
arch))
return rpms
def buildRPMSList(self):
# Build and resolve the list of RPMS. Then drop in all
# the other non-rpm directories from the Mirror's release.
rpms = self.getRollRPMS()
for mirror in self.dist.getMirrors():
rpms.extend(mirror.getRPMS())
if not self.onlyRolls:
rpms.extend(self.dist.getContribRPMS())
rpms.extend(self.dist.getLocalRPMS())
if not os.path.isdir(self.dist.getForceRPMSPath()):
os.makedirs(self.dist.getForceRPMSPath())
else:
rpms.extend(self.dist.getForceRPMS())
return rpms
def buildSRPMSList(self):
# Build and resolve the list of SRPMS.
rpms = self.getRollSRPMS()
for mirror in self.dist.getMirrors():
rpms.extend(mirror.getSRPMS())
rpms.extend(self.dist.getContribSRPMS())
rpms.extend(self.dist.getLocalSRPMS())
return rpms
def buildRollLinks(self):
"""Links all rolls from our mirrors into rocks-dist/rolls/"""
print "Building Roll Links"
rollLocation = self.dist.getRollsPath()
subprocess.call('mkdir -p %s' % rollLocation, shell=True)
rolls = []
for mirror in self.dist.getMirrors():
rolldir = mirror.getRollsPath()
if not os.path.exists(rolldir):
continue
for d in os.listdir(rolldir):
rollpath = os.path.join(rolldir,d)
if os.path.isdir(rollpath):
rolls.append(rollpath)
here = os.getcwd()
os.chdir(rollLocation)
for r in rolls:
subprocess.call('ln -sf %s .' % (r), shell=True)
os.chdir(here)
def buildWANLinks(self, lanbase):
"""Links in the stage2.img from lan/"""
print "Linking boot stages from lan"
wanbase = self.dist.getBasePath()
subprocess.call('rm -rf %s' % wanbase, shell=True)
subprocess.call('mkdir -p %s' % wanbase, shell=True)
subprocess.call('ln -s %s/* %s' % (lanbase, wanbase), shell=True)
def buildBase(self):
print 'Resolving versions (base files)'
self.dist.setBaseFiles(self.resolveVersions(self.getRollBaseFiles()))
def touchCriticalFiles(self, m, key, ver, arch):
criticalfiles = [ 'anaconda', 'anaconda-runtime',
'kudzu', 'kudzu-devel' ]
for rpm in m.getRollRPMS(key,ver,arch):
try:
if rpm.getPackageName() in criticalfiles:
rpm.timestamp = int(time.time())
except:
pass
def includeCriticalRPMS(self):
print 'Including critical RPMS'
#
# there are some standard RPMs that we build in order for our
# modifcations to the installer to work correctly. this function
# ensures that the rocks-built standard RPMs are always included
# and the ones from OS CDs are not.
#
for m in self.dist.getMirrors():
for key, value in m.getRolls().items():
if key != 'base':
continue
for arch, ver in value:
if self.useRoll(key, ver, arch):
self.touchCriticalFiles(m,key,ver,arch)
def buildRPMS(self):
print 'Resolving versions (RPMs)'
self.dist.setRPMS(self.resolveVersions(self.buildRPMSList()))
def buildSRPMS(self):
print 'Resolving versions (SRPMs)'
self.dist.setSRPMS(self.resolveVersions(self.buildSRPMSList()))
def insertNetstage(self):
print 'Applying netstage (aka stage2)'
cmd = 'rm -f %s/RedHat/base/stage2.img' % (self.dist.getReleasePath())
subprocess.call(cmd, shell=True)
## Note for CentOS7 rocks-boot has all the net/cdrom/EFI components
try:
if self.versionMajor >= 7:
rpm = 'rocks-boot'
else:
rpm = 'rocks-boot-netstage'
self.applyRPM(rpm, self.dist.getReleasePath())
except:
print "Couldn't find the package %s" % rpm
print "\tIf you are building the OS roll, this is not a problem"
pass
print 'Applying rocks-anaconda-updates'
if self.versionMajor < 7:
cmd = 'rm -f %s/RedHat/base/updates.img' % (self.dist.getReleasePath())
subprocess.call(cmd, shell=True)
## Note for CentOS7 rocks-anaconda-updates only contains comps.xml
try:
self.applyRPM('rocks-anaconda-updates',
self.dist.getReleasePath())
except:
print "Couldn't find the package rocks-anaconda-updates"
print "\tIf you are building the OS roll, this is not a problem"
pass
return
def build(self):
self.clean()
self.dist.syncMirror()
self.buildBase()
self.includeCriticalRPMS()
self.buildRPMS()
self.buildSRPMS()
print 'Creating files',
if self.useLinks:
print '(symbolic links - fast)'
else:
print '(deep copy - slow)'
self.dist.getReleaseTree().apply(self.builder)
self.dist.getReleaseTree().apply(self.normalizer)
self.insertNetstage()
self.buildKickstart()
print ' Calling Yum genpkgmetadata.py'
self.createrepo()
print ' Rebuilding Product Image including md5 sums'
self.buildProductImg()
print ' Creating Directory Listing'
self.makeDirListing()
return
def buildKickstart(self):
print 'Installing XML Kickstart profiles'
build = self.dist.getBuildPath()
for rpm in self.dist.getRPMS():
tok = rpm.getBaseName().split('-')
if tok[0] != 'roll':
continue
try:
k = tok.index('kickstart')
rollname = '-'.join(tok[1:k])
except ValueError:
continue
print ' installing "%s" profiles...' % rollname
self.applyRPM(rpm.getBaseName(), build)
# Copy local profiles into the distribution.
if self.withSiteProfiles:
print ' installing "site" profiles...'
tree = self.dist.getSiteProfilesTree()
for dir in tree.getDirs():
for file in tree.getFiles(dir):
path = os.path.join(build, dir)
if not os.path.isdir(path):
os.makedirs(path)
shutil.copy(file.getFullName(),
os.path.join(path, file.getName()))
# make sure apache can read site XML
file.chmod(0664)
def applyRPM(self, name, root, flags=''):
"""Used to 'patch' the new distribution with RPMs from the
distribution. We use this to always get the correct
genhdlist, and to apply eKV to Rocks distributions.
Throws a ValueError if it cannot find the specified RPM, and
BuildError if the RPM was found but could not be installed."""
rpm = None
try:
rpm = self.dist.getRPM(name)
except rocks.dist.DistRPMList, e:
for r in e.list:
if r.getPackageArch() == self.dist.getArch():
rpm = r
break
if not rpm:
raise ValueError, "could not find %s" % name
dbdir = os.path.join(root, 'var', 'lib', 'rpm')
if not os.path.isdir(dbdir):
os.makedirs(dbdir)
reloc = subprocess.call("rpm -q --queryformat '%{prefixes}\n' -p " +
rpm.getFullName() + "| grep none > /dev/null", shell=True)
cmd = 'rpm -i --ignoresize --nomd5 --force --nodeps --ignorearch '
cmd += '--dbpath %s ' % dbdir
if reloc:
cmd = cmd + '--prefix %s %s %s' % (root, flags,
rpm.getFullName())
else:
cmd = cmd + '--badreloc --relocate /=%s %s %s' % (root, flags,
rpm.getFullName())
if self.debug > 0:
sys.stderr.write('build.applyRPM: executing "%s"' % cmd)
retval = subprocess.call(cmd + ' > /dev/null 2>&1', shell=True)
shutil.rmtree(os.path.join(root, 'var'))
if retval == 256:
raise BuildError, "could not apply RPM %s" % (name)
return retval
def buildProductImg(self):
#
# the directory where the python files exist that are used to
# extend anaconda
#
## For CentOS 7, rocks-boot has the product img
if self.versionMajor >= 7:
return
product = '../../images/product.img'
productfilesdir = os.path.join(self.dist.getBuildPath(), 'include')
if not os.path.exists(productfilesdir):
#
# there are no 'product' files, so there's nothing to do.
# let's just return
#
return
cwd = os.getcwd()
#
# make an MD5 checksum for all files in the distribution
#
# the 'sed' command strips off the leading "./" from the pathnames
#
# don't include the build, SRPMS and force directories
#
os.chdir(self.dist.getReleasePath())
if self.calcmd5:
cmd = '/usr/bin/md5sum `find -L . -type f | sed "s/^\.\///" | '
cmd += 'egrep -v "^build|^SRPMS|^force" | egrep -v "rpm$"` '
cmd += '> %s/packages.md5' % (productfilesdir)
else:
cmd = 'touch %s/packages.md5' % (productfilesdir)
subprocess.call(cmd, shell=True)
#
# create the product.img file
#
os.chdir(productfilesdir)
if not os.path.exists('../../images'):
os.makedirs('../../images')
subprocess.call('rm -f %s' % (product), shell=True)
cmd = '/sbin/mksquashfs packages.md5 installclass/*py installclasses '
cmd += '%s ' % (product)
cmd += '-keep-as-directory > /dev/null 2>&1'
subprocess.call(cmd,shell=True)
if os.path.exists(product):
#
# on a server installation (e.g., frontend), mksquashfs
# fails, but it is not important that product.img is built
# during the installation. product.img was already downloaded
# off the CD, so it will not be needed for the remainder of
# the server installation.
#
os.chmod(product, 0664)
os.chdir(cwd)
return
def createrepo(self):
print 'Creating repository'
cwd = os.getcwd()
releasedir = self.dist.getReleasePath()
os.chdir(releasedir)
#
# first check in the install environment (/tmp/updates), then
# look in the 'normal' place (on a running frontend).
#
createrepo = '/tmp/updates/usr/share/createrepo/genpkgmetadata.py'
if not os.path.exists(createrepo):
createrepo = '/usr/share/createrepo/genpkgmetadata.py'
groupfile = "%s/RedHat/base/comps.xml" % releasedir
if os.path.exists(groupfile):
gf = "--groupfile %s/RedHat/base/comps.xml " % (releasedir)
else:
print "Couldn't find the groupfile %s" % groupfile
print "\tIf you are bootstrapping, this is not a problem"
gf = " "
tmpdir = os.getenv("TMPDIR")
# worker.py (Called by genpkgmetadata) needs tmp space
os.putenv("TMPDIR",".")
subprocess.call('%s ' % (createrepo) +
gf + ' --workers 8 ' +
'--quiet .', shell=True)
if tmpdir is not None:
os.putenv("TMPDIR",tmpdir)
else:
os.unsetenv("TMPDIR")
os.chdir(cwd)
return
def makeDirListing(self):
#
# make sure a known CGI exists in the roll directory so we can
# reliably list all the rolls present on a system. this is useful
# when the directory listing output is different between different
# web servers
#
path = os.path.join(self.dist.getRootPath(), 'rolls')
if os.path.exists(path):
filename = os.path.join(path, 'index.cgi')
file = open(filename, 'w')
file.write('%s' % (directory_listing_cgi))
file.close()
os.chmod(path, 755)
os.chmod(filename, 755)
return
def cleaner(self, path, file, root):
if not root:
root = self.dist.getReleasePath()
dir = os.path.join(root, path)
if dir not in [ self.dist.getForceRPMSPath() ]:
os.unlink(os.path.join(dir, file.getName()))
def builder(self, path, file, root):
if not root:
root = self.dist.getReleasePath()
dir = os.path.join(root, path)
fullname = os.path.join(dir, file.getName())
if file.getFullName() == fullname:
return
if not os.path.isdir(dir):
os.makedirs(dir)
# Create the new distribution either with all symbolic links
# into the mirror, contrib, and local rpms. Or copy
# everything. The idea is local distributions should be all
# symlinks, but a published base distribution (like the NPACI
# Rocks master) should be copys. This keeps the FTP chroot
# environment happy, extends the lifetime of the release past
# that of scattered RPMS. It may also make sense to have your
# master distribution for your cluster done by copy.
if self.useLinks:
file.symlink(fullname, self.dist.getRootPath())
else:
# For copied distributions, the timestamps of the new
# files are forced to that of the source files. This
# keeps wget happy.
if os.path.islink(file.getFullName()):
os.symlink(os.readlink(file.getFullName()), fullname)
else:
shutil.copy(file.getFullName(), fullname)
os.utime(fullname, (file.getTimestamp(), file.getTimestamp()))
def normalizer(self, path, file, root):
if not root:
root = self.dist.getReleasePath()
dir = os.path.join(root, path)
fullname = os.path.join(dir, file.getName())
# Reset the File to represent the one we just created in the new
# distribution.
if file.getFullName() != fullname:
file.setFile(fullname)
def resolveVersions(self, files):
# Use a dictionary (hash table) to find and resolve all the
# version conflict in the list of files. We use a dictionary
# to avoid an O(n) list based approach. Burn memory, save
# time.
dict = {}
for e in files:
name = e.getUniqueName() # name w/ arch string appended
if not dict.has_key(name) or e >= dict[name]:
dict[name] = e
# Extract the File objects from the dictionary and return
# them as a list.
list = []
for e in dict.keys():
list.append(dict[e])
return list
def setComps(self, path):
self.compsPath = path
class USBBuilder(DistributionBuilder):
"Builds a filesytem image for a Bootable USB Key."
dn = '/CN=anonymous'
def build(self, dn=None, size=20000):
"""Assumes a valid rocks-dist, will throw an
exception if missing. Size is number of blocks (1block = 1KB)
in the filesystem."""
print 'Creating Bootable USB filesystem ...'
if dn:
self.dn = dn
cd = os.path.normpath(
os.path.join(self.dist.getReleasePath(), '..'))
thisdir = os.path.join(cd,'usb-key')
subprocess.call('mkdir -p %s' % thisdir, shell=True)
os.chdir(thisdir)
self.applyRPM('rocks-boot-cdrom', thisdir)
subprocess.call('/sbin/mkfs.vfat -C usb.img '
+ '-n "Rocks USB Boot" %s > /dev/null' % size, shell=True)
subprocess.call('rm -rf key-img', shell=True)
subprocess.call('mkdir -p key-img', shell=True)
subprocess.call('mount -o loop usb.img key-img', shell=True)
subprocess.call('cp -a isolinux/* key-img/', shell=True)
os.rename('key-img/isolinux.cfg','key-img/syslinux.cfg')
subprocess.call('touch key-img/rocks-usbkey', shell=True)
try:
self.writeKeys('key-img')
except Exception, msg:
print 'warning - could not find key: %s' % msg
subprocess.call('umount key-img', shell=True)
subprocess.call('/usr/bin/syslinux usb.img', shell=True)
imgname = 'rocks-usb-%s.%s.img' % \
(self.version, self.dist.getArch())
imgpath = os.path.join(cd,imgname)
os.rename('usb.img', imgpath)
os.chmod(imgpath,0444)
subprocess.call('rm -rf %s' % thisdir, shell=True)
print "Wrote:", imgpath
print "Copy this image directly onto a usb key: "
print " # dd < %s > /dev/sda" % imgname
def writeKeys(self, root):
"Copy essential cluster keys to usb drive"
subprocess.call('mkdir -p %s/security/server' % root, shell=True)
subprocess.call('mkdir -p %s/security/client' % root, shell=True)
self.newCert('%s/security' % root)
# For Server: our CA and 411 master.
ca = '/etc/security/ca'
for k in ('ca.crt','ca.key','ca.serial'):
shutil.copy(os.path.join(ca,k),
'%s/security/server/' % root)
# sacerdoti: The 411 shared key is saved for the frontend,
# so 411 and the CA can be recovered a catastrophe (disk or node
# destroyed. Computes never need the shared 411 key, since
# it is in the kickstart file. The 411 master public key is
# always generated from the private key.
shutil.copy('/etc/411-security/master.key',
'%s/security/server/411-master.key' % root)
shutil.copy('/etc/411-security/shared.key',
'%s/security/server/411-shared.key' % root)
# Keep central's keys if we installed over WAN.
for k in ('ca.crt','cert.crt','cert.key'):
try:
shutil.copy('/etc/security/cluster-%s' % k,
'%s/security/server' % root)
except IOError:
pass
# Everyone
shutil.copy('%s/ca.crt' % ca,
'%s/security/cluster-ca.crt' % root)
def newCert(self, root):
"""Generates a Certificate signed by our CA, for use
by compute nodes to prove their membership in the cluster."""
ca = '/etc/security/ca'
print ' Making new certificate keypair'
cwd = os.getcwd()
os.chdir(root)
cmd = ('/usr/bin/openssl req -new -nodes '
+ '-config %s/ca.cfg -batch -subj "%s" ' % (ca, self.dn)
+ '-keyout cluster-cert.key > cert.csr 2> /dev/null')
subprocess.call(cmd, shell=True)
os.chmod('cluster-cert.key',0400)
print ' Signing the certificate with our CA'
cmd = ('/usr/bin/openssl x509 -req -days 1000 '
+ '-CA %s/ca.crt -CAkey %s/ca.key -CAserial %s/ca.serial '
% (ca, ca, ca)
+ ' < cert.csr > cluster-cert.crt 2> /dev/null')
subprocess.call(cmd, shell=True)
os.chmod('cluster-cert.crt', 0444)
os.unlink('cert.csr')
os.chdir(cwd)
return
|
[
"[email protected]"
] | |
8a57b2c557a5ef2f443057587619d3e3c06297ab
|
6f02ef3af5c9360fdc41f766493e5f8f2eeaca58
|
/todos/migrations/0001_initial.py
|
a4a758ab58d2e6245ce1c817234954a901ac27f6
|
[] |
no_license
|
mahmudgithub/demo_pactics_project_eighteen
|
b79b58a9e6283f6c155013e775f28ba9746ce8e1
|
0709453fddde390ad1b456b2dc3bc5cfab0a30de
|
refs/heads/master
| 2022-04-01T03:17:32.275191 | 2020-01-29T11:03:50 | 2020-01-29T11:03:50 | 236,962,921 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
# Generated by Django 2.2.6 on 2019-10-31 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
]
|
[
"[email protected]"
] | |
f8f6e8e72ebdcabc0c5a3c0453af21f15603e6d2
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/comparison/test_chart_radar01.py
|
3ec2e6659d3e8c3762dccca98737d355ecc87765
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,554 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_radar01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'radar'})
chart.axis_ids = [56801152, 56802688]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
e6bfc1a526fa9242f4a6480456004c104c5cb614
|
d3d14ffc9d0211c49187f7502e0e9edbf68cc01f
|
/auth_network_provider/admin.py
|
2aaf239a44680b3362b64fbffbc8be3a999739e3
|
[] |
no_license
|
Brachamul/centrifuge
|
567948fe0fd67a605448c1f3248a0fc5c6d838e6
|
b3ba6635fd4097cc76b4ef6e2522ab2741ccd372
|
refs/heads/master
| 2021-05-01T03:41:29.432670 | 2017-06-17T14:20:02 | 2017-06-17T14:20:02 | 61,970,963 | 0 | 1 | null | 2017-01-22T15:45:55 | 2016-06-26T02:48:56 |
HTML
|
UTF-8
|
Python
| false | false | 752 |
py
|
from django.contrib import admin
from .models import *
class AppAdmin(admin.ModelAdmin):
model = App
list_display = ("name", "trusted", "callback_url", "key", "secret")
admin.site.register(App, AppAdmin)
class CredentialsInline(admin.TabularInline):
model = Credentials
readonly_fields = ( "app", "user_has_authorized", "token", )
extra = 0
class NetworkUserAdmin(admin.ModelAdmin):
model = NetworkUser
readonly_fields = ("user", "uuid", )
list_display = ("user", "number_of_apps",)
inlines = [CredentialsInline, ]
admin.site.register(NetworkUser, NetworkUserAdmin)
class CredentialsAdmin(admin.ModelAdmin):
model = Credentials
readonly_fields = ( "token", "date_joined", )
admin.site.register(Credentials, CredentialsAdmin)
|
[
"[email protected]"
] | |
21bb27820c75c147f94f451e5b09b11aa42f6dbc
|
a73fd25dd9a8e6df0b1bf3eee0bccf5297722bc7
|
/]tasks/2018.01.26.make_purge_road_ds/purged_road_test.py
|
b51732b72009e0187128f0a3175859c76936bfc9
|
[] |
no_license
|
bohaohuang/sis
|
23d0260d85903b62518fb8fb588661597248ad0d
|
28a59f3182f0ba58ba582449377c6588af1d4cde
|
refs/heads/master
| 2021-05-05T17:00:33.808099 | 2019-09-06T17:46:02 | 2019-09-06T17:46:02 | 117,362,230 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 818 |
py
|
import os
import imageio
import numpy as np
import matplotlib.pyplot as plt
patchDir2 = r'/hdd/uab_datasets/Results/PatchExtr/inria/chipExtrRand0_cSz224x224_pad0'
files = os.path.join(patchDir2, 'fileList.txt')
with open(files, 'r') as f:
file_list = f.readlines()
files = os.path.join(r'/media/lab/Michael(01)/chipExtrRegPurge_cSz572x572_pad184', 'state.txt')
with open(files, 'r') as f:
text = f.readlines()
print(text)
'''for i in file_list[:5]:
file_array = i.strip().split(' ')
rgb = []
for file in file_array[:3]:
img = imageio.imread(os.path.join(patchDir2, file))
rgb.append(img)
rgb = np.dstack(rgb)
gt = imageio.imread(os.path.join(patchDir2, file_array[-1]))
plt.subplot(121)
plt.imshow(rgb)
plt.subplot(122)
plt.imshow(gt)
plt.show()'''
|
[
"[email protected]"
] | |
b2f0c12b65c751c60b90d88d52ace5920b128e1d
|
35f2fafdc401b6a055d7d236fd1a5c619b6567df
|
/users/models.py
|
781e6d3ddd30db5b26262f98d80bdcb1a5e68729
|
[] |
no_license
|
lannyMa/zmr_form2
|
d8999d3605cf6ef0aee53b91599db3d5e91ddfc2
|
e02743231d1df98e25c7c321bae82b01ebcaae83
|
refs/heads/master
| 2021-07-18T11:20:19.914460 | 2017-10-26T10:25:17 | 2017-10-26T10:25:17 | 108,396,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,484 |
py
|
from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class UserProfile(AbstractUser):
image = models.ImageField(upload_to="image/%Y/%m",default="image/default.png",verbose_name="头像")
nick_name = models.CharField(max_length=50,default="",verbose_name="昵称")
gender = models.CharField(max_length=50,choices=(("femail","女"),("male","男")), default="femail",verbose_name="性别")
birth = models.DateField(null=True,blank=True,verbose_name="生日")
address = models.CharField(max_length=100,default="",verbose_name="地址")
mobile = models.CharField(max_length=13,verbose_name="手机")
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=20,verbose_name="验证码类型")
email = models.EmailField(max_length=30,verbose_name="邮箱")
send_type = models.CharField(max_length=30,choices=(("register","注册"),("forget","找回密码"),("update","修改邮箱")),default="register",verbose_name="发送类型")
send_time = models.DateField(default=datetime.now,verbose_name="添加时间")
class Meta:
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}({1})".format(self.code,self.email)
|
[
"[email protected]"
] | |
919360772cff2ff788f446d26bf11cbde56b7805
|
dc437674926f7402da4de3ea4022da37932aaffd
|
/studentproject/post/admin.py
|
2a3ec02467b1509ef6750978aeee6360170098cc
|
[] |
no_license
|
kstar0102/Datasource
|
ec188029ed6fdefcda13b49fffc0496f9cbd6277
|
92bb1f4f9f1cfe9dd4c25c220cf503cb1de2ba68
|
refs/heads/master
| 2023-04-24T17:37:27.670492 | 2021-04-22T07:39:36 | 2021-04-22T07:39:36 | 360,366,639 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Post)
admin.site.register(Like)
|
[
"[email protected]"
] | |
60a5cc01c4078a8fac44bbfd6e9fc314eddbd9cd
|
e55480007fde8acea46fe8eeb3ee7193c25ba113
|
/tests/test_ds/test_graph_subject/chapter_04/test_is_tree.py
|
8ea2c200572eb20635412bb520bb3f7bd67a6172
|
[] |
no_license
|
Annihilation7/Ds-and-Al
|
80301bf543ec2eb4b3a9810f5fc25b0386847fd3
|
a0bc5f5ef4a92c0e7a736dcff77df61d46b57409
|
refs/heads/master
| 2020-09-24T05:04:41.250051 | 2020-02-15T10:31:10 | 2020-02-15T10:31:10 | 225,669,366 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,273 |
py
|
# -*- coding: utf-8 -*-
# Email: [email protected]
# Created: 2020-02-11 09:36pm
from src.ds.graph_subject.chapter_02 import my_adj_set, adj_matrix
from src.ds.graph_subject.chapter_04 import is_tree
import unittest
class Test_IsTree(unittest.TestCase):
def setUp(self) -> None:
self.test_adj_matrix1 = is_tree.IsTree(
adj_matrix.AdjMatrix('src/ds/graph_subject/data/g2.txt')
)
self.test_adj_matrix2 = is_tree.IsTree(
adj_matrix.AdjMatrix('src/ds/graph_subject/data/g3.txt')
)
self.test_adj_set1 = is_tree.IsTree(
my_adj_set.MyAdjSet('src/ds/graph_subject/data/g2.txt')
)
self.test_adj_set2 = is_tree.IsTree(
my_adj_set.MyAdjSet('src/ds/graph_subject/data/g3.txt')
)
def test_all(self):
# 由于5是孤立点,自成连通分量,所以这里返回的应该都是False
print('基于邻接矩阵的图:')
print(self.test_adj_matrix1.is_tree())
print(self.test_adj_matrix2.is_tree())
print('=' * 20, '华丽分割线', '=' * 20)
print('基于邻接表的图:')
print(self.test_adj_set1.is_tree())
print(self.test_adj_set2.is_tree())
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a0319a975466da4555acd1c7f72db4566235dbd5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/hpJsoWBBHWKZ9NcAi_20.py
|
a8ff8faf0dbc2ff4752f68ec485a8e5bd8531d3f
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,710 |
py
|
"""
In the world of birding there are four-letter codes for the common names of
birds. These codes are created by some simple rules:
* If the bird's name has only one word, the code takes the first four letters of that word.
* If the name is made up of two words, the code takes the first two letters of each word.
* If the name is made up of three words, the code is created by taking the first letter from the first two words and the first two letters from the third word.
* If the name is four words long, the code uses the first letter from all the words.
There are other ways codes are created, but this challenge will only use the
four rules listed above.
In this challenge you will write a function that takes a list of strings of
common bird names and create the codes for those names based on the rules
above. The function will return a list of codes in the same order in which the
input names were presented.
### Examples
bird_code(["Black-Capped Chickadee", "Common Tern"]) ➞ ["BCCH", "COTE"]
bird_code(["American Redstart", "Northern Cardinal"]) ➞ ["AMRE","NOCA"]
bird_code(["Bobolink", "American White Pelican"]) ➞ ["BOBO","AWPE"]
### Notes
* The four-letter codes in the returned list should be in UPPER CASE.
* If a common name has a hyphen/dash, it should be considered a space.
"""
import re
def bird_code(lst):
A=[re.split('[\-\s]',x) for x in lst]
B=[]
for x in A:
if len(x)==1:
B.append(x[0][:4].upper())
elif len(x)==2:
B.append((x[0][:2]+x[-1][:2]).upper())
elif len(x)==3:
B.append((x[0][0]+x[1][0]+x[2][:2]).upper())
else:
B.append((x[0][0]+x[1][0]+x[2][0]+x[3][0]).upper())
return B
|
[
"[email protected]"
] | |
90e79259099914f41a2ae73355cadc9e89d537bc
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2019/PutValue.spec
|
82f9ea18648132fc2116887adffdb4b33ca2706b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 |
NOASSERTION
| 2022-02-27T11:05:26 | 2021-07-08T07:53:21 |
Python
|
UTF-8
|
Python
| false | false | 1,196 |
spec
|
1. ReturnIfAbrupt(_V_).
1. ReturnIfAbrupt(_W_).
1. If Type(_V_) is not Reference, throw a *ReferenceError* exception.
1. Let _base_ be GetBase(_V_).
1. If IsUnresolvableReference(_V_) is *true*, then
1. If IsStrictReference(_V_) is *true*, then
1. Throw a *ReferenceError* exception.
1. Let _globalObj_ be GetGlobalObject().
1. Return ? Set(_globalObj_, GetReferencedName(_V_), _W_, *false*).
1. Else if IsPropertyReference(_V_) is *true*, then
1. If HasPrimitiveBase(_V_) is *true*, then
1. Assert: In this case, _base_ will never be *undefined* or *null*.
1. Set _base_ to ! ToObject(_base_).
1. Let _succeeded_ be ? _base_.[[Set]](GetReferencedName(_V_), _W_, GetThisValue(_V_)).
1. If _succeeded_ is *false* and IsStrictReference(_V_) is *true*, throw a *TypeError* exception.
1. Return.
1. Else _base_ must be an Environment Record,
1. Return ? _base_.SetMutableBinding(GetReferencedName(_V_), _W_, IsStrictReference(_V_)) (see <emu-xref href="#sec-environment-records"></emu-xref>).
|
[
"[email protected]"
] | |
b034390970ca7665154b9ff7554141897cd63861
|
fd64e364368bcb2cdcf77ab1e0fc234a6b698f69
|
/Python/Beginner/CATSDOGS.py
|
4c85396cd6c96559c3c085683f329d3f416ad4ff
|
[] |
no_license
|
Parizval/CodeChefCodes
|
57712069f3d56cc42282f9e35c6ddd9398e4a5bf
|
cfd2876816be806882650b6ea51431b1f8d6bec5
|
refs/heads/master
| 2021-07-16T13:10:15.668713 | 2020-07-06T21:40:09 | 2020-07-06T21:40:09 | 188,693,667 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
for a in range(int(input())):
C,D,L = map(int,input().split())
check = True
if L % 4 != 0 :
check = False
else:
animals = L //4
upperlimit = D + C
remainder = C - 2*D
if remainder < 0 :
remainder = 0
lowerlimit = D + remainder
if animals < lowerlimit or animals > upperlimit:
check = False
if check:
print("yes")
else:
print("no")
|
[
"[email protected]"
] | |
8897cd5753b7e6de917d400cf7fff05f75fe2ae7
|
9431bba2d148f8aef9c0a8f3ca16fcf875890757
|
/tools/snippets/callexecute.py
|
d020c8d54598d30b17ad2f2e36d6f1c5d51cb87b
|
[
"MIT"
] |
permissive
|
terasakisatoshi/pythonCodes
|
fba0b78414b2c85f4a738200354ea583f0516768
|
953210c06e9885a7c885bc01047715a77de08a1a
|
refs/heads/master
| 2023-05-14T12:30:22.201711 | 2023-05-07T13:41:22 | 2023-05-07T13:41:22 | 197,893,702 | 2 | 1 |
MIT
| 2022-11-25T10:59:52 | 2019-07-20T07:09:12 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 416 |
py
|
import subprocess
import sys
from os.path import join
def main():
argv=sys.argv
argc=len(argv)
print ("argv=%s"%argv)
print ("argc=%d"%argc)
if(argc==2):
exename=argv[1]
path ="hoge"
command=exename+" "+join(".",path)
echo="echo "+command
subprocess.call(echo,shell=True)
subprocess.call(command,shell=True)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
42dd946ad5766301d51e0817e59acc4c05619a40
|
8cf0844cfc26f32726ea787100528aea9a63427c
|
/flask_app/app_start.py
|
6fe44cf4502b62ed22ca7eb5655a1d62a1e129d6
|
[] |
no_license
|
lcgkiller/firstFlask
|
8d8aa2987381dd599522391a82f5c1e53cda48fc
|
4cfb3b8093e1e118aecebdcd9945edae02226ccc
|
refs/heads/master
| 2020-03-13T21:47:43.241979 | 2018-04-27T16:02:51 | 2018-04-27T16:02:51 | 131,304,087 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 414 |
py
|
from flask import Flask, request, redirect, url_for, send_from_directory, render_template
app = Flask(__name__)
app.debug = True
# Routes
@app.route('/', methods=['GET'])
def root():
return render_template('index.html')
@app.route('/<path:path>')
def static_prox(path):
return app.send_static_file(path)
if __name__ == "__main__":
app.run()
# app.run(host="0.0.0.0", port=80, threaded=True)
|
[
"[email protected]"
] | |
7ef87ef5d13fc04a4c361d681198ecd5755d565c
|
ab6c141c7575dc7bcf91739bd747bb06b7eb5b65
|
/ideation/idea/migrations/0014_auto__add_field_ideasupport_date_created.py
|
8868f4dddf44854fb813fb352ce9335438306297
|
[] |
no_license
|
mattiek/thoughtbubble.us
|
fc2c2ea93fe8485f84e93f0337799ba2adefaffc
|
d0ec2300b07af51deeeece42590110c37c90493f
|
refs/heads/master
| 2021-03-30T18:04:04.217266 | 2014-09-17T01:11:51 | 2014-09-17T01:11:51 | 10,833,788 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,359 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IdeaSupport.date_created'
db.add_column(u'idea_ideasupport', 'date_created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 10, 1, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IdeaSupport.date_created'
db.delete_column(u'idea_ideasupport', 'date_created')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'cities.city': {
'Meta': {'object_name': 'City'},
'county': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'elevation': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'idea.idea': {
'Meta': {'object_name': 'Idea'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 1, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 1, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_idea_creator'", 'null': 'True', 'to': u"orm['thoughtbubble.ThoughtbubbleUser']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['thoughtbubble.ThoughtbubbleUser']", 'null': 'True'}),
'what_for': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'what_kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.IdeaType']"}),
'where': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['location.Location']"})
},
u'idea.ideaimage': {
'Meta': {'object_name': 'IdeaImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.Idea']"}),
'img': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'idea.idealink': {
'Meta': {'object_name': 'IdeaLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.Idea']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'idea.ideasupport': {
'Meta': {'object_name': 'IdeaSupport'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.Idea']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['thoughtbubble.ThoughtbubbleUser']"})
},
u'idea.ideatype': {
'Meta': {'object_name': 'IdeaType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'location.location': {
'Meta': {'object_name': 'Location'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.City']", 'null': 'True', 'blank': 'True'}),
'city_and_state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neighborhood.Neighborhood']", 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'what_kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['location.LocationType']", 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
},
u'location.locationtype': {
'Meta': {'object_name': 'LocationType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maki_class': ('django.db.models.fields.CharField', [], {'default': "'rocket'", 'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'neighborhood.neighborhood': {
'Meta': {'object_name': 'Neighborhood'},
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '43'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'regionid': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'thoughtbubble.thoughtbubbleuser': {
'Meta': {'object_name': 'ThoughtbubbleUser'},
'email': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '25', 'db_index': 'True'})
}
}
complete_apps = ['idea']
|
[
"[email protected]"
] | |
11a9567b55ce1e148141783bb21cb23f10aeca3c
|
b24ce5acced59ef367a20706949953f3ea81d57a
|
/tensorflow/contrib/learn/python/learn/learn_runner.py
|
183ab438b6f2f658da98d6f655c97b4a59ac9a06
|
[
"Apache-2.0"
] |
permissive
|
BoldizsarZopcsak/Image-Classifier
|
b57dd3b72cf368cc1d66a5e318003a2a2d8338a4
|
c0d471a55a70b3118178488db3c005a9277baade
|
refs/heads/master
| 2022-11-19T12:28:49.625532 | 2018-01-20T15:48:48 | 2018-01-20T15:48:48 | 118,253,026 | 1 | 1 |
Apache-2.0
| 2022-11-01T09:24:24 | 2018-01-20T15:04:57 |
Python
|
UTF-8
|
Python
| false | false | 6,712 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs an Experiment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.python.platform import tf_logging as logging
# TODO(xiejw): Refactor the learn_runner to make code reusable.
def _execute_schedule(experiment, schedule):
"""Execute the method named `schedule` of `experiment`."""
if not hasattr(experiment, schedule):
logging.error('Schedule references non-existent task %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise ValueError('Schedule references non-existent task %s' % schedule)
task = getattr(experiment, schedule)
if not callable(task):
logging.error('Schedule references non-callable member %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise TypeError('Schedule references non-callable member %s' % schedule)
return task()
def run(experiment_fn, output_dir, schedule=None):
"""Make and run an experiment.
It creates an Experiment by calling `experiment_fn`. Then it calls the
function named as `schedule` of the Experiment.
If schedule is not provided, then the default schedule for the current task
type is used. The defaults are as follows:
* 'ps' maps to 'serve'
* 'worker' maps to 'train'
* 'master' maps to 'local_run'
If the experiment's config does not include a task type, then an exception
is raised.
Example:
```
def _create_my_experiment(output_dir):
return tf.contrib.learn.Experiment(
estimator=my_estimator(model_dir=output_dir),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
learn_runner.run(
experiment_fn=_create_my_experiment,
output_dir="some/output/dir",
schedule="train")
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `output_dir` which should be used to create the `Estimator`
(passed as `model_dir` to its constructor). It must return an
`Experiment`.
output_dir: Base output directory.
schedule: The name of the method in the `Experiment` to run.
Returns:
The return value of function `schedule`.
Raises:
ValueError: If `output_dir` is empty, `schedule` is None but no task
type is set in the built experiment's config, the task type has no
default, or `schedule` doesn't reference a member of `Experiment`.
TypeError: `schedule` references non-callable member.
"""
if not output_dir:
raise ValueError('Must specify an output directory')
if not callable(experiment_fn):
raise TypeError('Experiment builder "%s" is not callable.' %
experiment_fn)
# Call the builder
experiment = experiment_fn(output_dir=output_dir)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
# Get the schedule
config = experiment.estimator.config
schedule = schedule or _get_default_schedule(config)
return _execute_schedule(experiment, schedule)
@experimental
def tune(experiment_fn, tuner):
"""Tune an experiment with hyper-parameters.
It iterates trials by running the Experiment for each trial with the
corresponding hyper-parameters. For each trial, it retrieves the
hyper-parameters from `tuner`, creates an Experiment by calling experiment_fn,
and then reports the measure back to `tuner`.
Example:
```
def _create_my_experiment(config, hparams):
hidden_units = [hparams.unit_per_layer] * hparams.num_hidden_layers
return tf.contrib.learn.Experiment(
estimator=DNNClassifier(config=config, hidden_units=hidden_units),
train_input_fn=my_train_input,
eval_input_fn=my_eval_input)
tuner = create_tuner(study_configuration, objective_key)
learn_runner.tune(experiment_fn=_create_my_experiment, tuner)
```
Args:
experiment_fn: A function that creates an `Experiment`. It should accept an
argument `config` which should be used to create the `Estimator` (passed
as `config` to its constructor), and an argument `hparams`, which should
be used for hyper-parameters tuning. It must return an `Experiment`.
tuner: A `Tuner` instance.
"""
while tuner.next_trial():
tuner.run_experiment(experiment_fn)
def _is_distributed(config):
"""Returns true if this is a distributed job."""
if not config.cluster_spec:
return False
# This is considered a distributed job if there is more than one task
# in the cluster spec.
task_count = 0
for job in config.cluster_spec.jobs:
for _ in config.cluster_spec.job_tasks(job):
task_count += 1
return task_count > 1
def _get_default_schedule(config):
"""Returns the default schedule for the provided RunConfig."""
if not config or not _is_distributed(config):
return 'train_and_evaluate'
if not config.task_type:
raise ValueError('Must specify a schedule')
if config.task_type == run_config.TaskType.MASTER:
# TODO(rhaertel): handle the case where there is more than one master
# or explicitly disallow such a case.
return 'train_and_evaluate'
elif config.task_type == run_config.TaskType.PS:
return 'run_std_server'
elif config.task_type == run_config.TaskType.WORKER:
return 'train'
raise ValueError('No default schedule for task type: %s' % (config.task_type))
|
[
"[email protected]"
] | |
276580429593813de66a2a00be17ca56381aed29
|
beb4c1dd9077f11ebd7aca407b272acbc780aa3c
|
/natureShare/natureShare/asgi.py
|
353dbf25eec8bcec3d755238860bcb12f1dbfeb5
|
[] |
no_license
|
jpchato/nature-share
|
f89cfc1295648e9a0b03b7281d72d9179357b0be
|
2cb952e72ea43c45884cd4a6c4b39c32936fe612
|
refs/heads/master
| 2023-02-19T06:46:01.213253 | 2021-01-15T19:10:24 | 2021-01-15T19:10:24 | 317,411,534 | 1 | 0 | null | 2021-01-13T20:23:29 | 2020-12-01T03:15:01 |
Python
|
UTF-8
|
Python
| false | false | 399 |
py
|
"""
ASGI config for natureShare project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'natureShare.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
a06ec03b657f606e1b3c9cb0faa9abb9c070ad9c
|
410e4fa021618be7d0a75aa46fa60243b0f084a0
|
/bang/seasons.py
|
841c75fb3a61a1271acfd89d53feb6c039fbd8d0
|
[] |
no_license
|
MagiCircles/BanGDream
|
0c44f0dceffdb8c398c62b75061aca94726d2397
|
ae639b35a14a78d9dfab3fbd863e4beb10a54699
|
refs/heads/master
| 2023-08-11T03:27:31.601492 | 2023-08-02T08:13:47 | 2023-08-02T08:13:47 | 86,222,591 | 59 | 13 | null | 2022-08-03T01:30:39 | 2017-03-26T09:52:18 |
Python
|
UTF-8
|
Python
| false | false | 26,305 |
py
|
# -*- coding: utf-8 -*-
import random, json
from django.conf import settings as django_settings
from magi.settings import (
HOMEPAGE_BACKGROUNDS,
)
from bang.utils import (
getHomepageArts,
)
def getRandomChristmasArt():
homepage_arts = []
try:
homepage_arts += getHomepageArts(
only_card_ids=django_settings.STAFF_CONFIGURATIONS.get('christmas_theme_cards', '').split(','),
only_recent_cards=False,
fallback_to_all=False,
randomize=True,
limit_to=1,
)
except:
pass
try:
arts = json.loads(django_settings.STAFF_CONFIGURATIONS.get('christmas_theme_arts', [])) or []
random.shuffle(arts)
homepage_arts += [arts[0]]
except:
pass
if homepage_arts:
return random.choice(homepage_arts)
return None
def getRandomChristmasBackground():
try:
return random.choice([
background
for background in HOMEPAGE_BACKGROUNDS
if background['id'] in [
int(id)
for id in django_settings.STAFF_CONFIGURATIONS.get(
'christmas_theme_backgrounds', '').split(',')
]
])
except IndexError:
return None
def getRandomPrideBackground():
return {
'image': u'pride{}.png'.format(random.randint(1, 5)),
}
PRIDE_ARTS = [
{
'url': 'https://i.imgur.com/fdmnbra.png',
'about_url': 'https://bandori.party/activity/49615/here-s-another-edit-i-made-all-the-guitarists-excluding-touko-and-lock-rip-wishing-you-a-happy/',
},
{
'url': 'https://i.imgur.com/nMwz2I7.png',
'about_url': 'https://bandori.party/activity/49603/Lesbian-PAREO-3-I-think-the-edit-is-a-little-ugly-but-I-m-happy-with-the-result/',
},
{
'url': 'https://i.imgur.com/Bg3BJTj.png',
'about_url': 'https://bandori.party/activity/49598/Shhhhh-mods-are-asleep-it-s-time-to-post-last-minute-entries-Sorry-if-it-s-bad-i-don-t/',
},
{
'url': 'https://i.imgur.com/8r3QZtV.jpg',
'about_url': 'https://bandori.party/activity/49597/Hello-I-am-new-here-but-am-I-late-with-the-Pride-Month-card-event-In-this-one-I-made-Omni/',
},
{
'foreground_url': 'https://i.imgur.com/6snXIo8.jpg',
'about_url': 'https://bandori.party/activity/49585/i-did-another-one-color-red-g-color-color-orange-a-color-color-yellow-y-color/',
},
{
'url': 'https://i.imgur.com/uZtTmQM.jpg',
'about_url': 'https://bandori.party/activity/49584/I-made-this-pan-Pareo-edit-and-it-took-me-longer-than-what-I-expected-But-I-m-not-disappointed/',
},
{
'url': 'https://i.bandori.party/u/activities/azKpO3idCtYPbwap4Co8LbDI7tlYAY.jpg',
'about_url': 'https://bandori.party/activity/49580/this-took-me-surprisingly-long-and-uhhh-it-s-not-even-that-good-but-anyway/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/MEecPE9BmENCgLInGZr8eGqStMtjP0.png',
'about_url': 'https://bandori.party/activity/49568/happy-pride-month-sorry-if-my-handwriting-doesn-t-look-good-everyone-had-such-good-edits-that/',
},
{
'url': 'https://i.bandori.party/u/activities/NsMmFmRWmZRx59AxlQkEVyXRBzQeXa.png',
'about_url': 'https://bandori.party/activity/49565/New-day-More-pride-edits-Pride-buttons-Straight-ally-Moca-Asexual-Bi-romantic-Ran-Other/',
},
{
'url': 'https://i.bandori.party/u/activities/kXVgKQ9kDUvuzRolaAWpARh5OpipNl.png',
'about_url': 'https://bandori.party/activity/49556/My-last-edit-for-today-Here-s-Masking-rocking-some-nonbinary-pride-Happy-pride-month/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/btn9PiBq74msHFdbUfCRnIfJyzEQtR.png',
'about_url': 'https://bandori.party/activity/49554/Pan-pride-Kaoru-I-did-another-costume-color-edit-This-time-it-s-Kaoru-representing-some/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/NluGRScGQD7A3izI7TTy70gGP6ymfj.jpeg',
'about_url': 'https://bandori.party/activity/49552/Happy-pride-month-Headcanon-that-hina-is-asekual-since-she-is-friendlikely-to-all-girls-and-her/',
},
{
'url': 'https://i.bandori.party/u/activities/wQm3KdwrLHV2w0miPBYunJu2jUcj5Q.png',
'about_url': 'https://bandori.party/activity/49551/I-don-t-know-how-to-use-this-website-but-happy-pride-month-3/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/3G2kTrW2yfOEqnh1l1ftNSpO84wDxC.png',
'about_url': 'https://bandori.party/activity/49547/This-is-the-last-one-I-swear-I-just-got-too-excited-Anyway-are-Bandorisona-edits-still-a-thing/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/4W7G9LnyRSuZ3TNSYK3pM2B6w1zsLb.png',
'about_url': 'https://bandori.party/activity/49537/Happy-pride-month/',
},
{
'url': 'https://i.bandori.party/u/activities/jZS4Jb9VmmyDMSCqzQETGfHzJIyC2S.png',
'about_url': 'https://bandori.party/activity/49531/An-asexual-spectrum-edit-of-Rui-I-really-wanted-to-edit-the-Morfonica-outfit-with-ace-colors-and/',
},
{
'url': 'https://i.bandori.party/u/activities/F6NoBO9A6VBGwtR8y7hG94fmPu3zTM.png',
'about_url': 'https://bandori.party/activity/49520/Anyone-for-rainbow-bread/',
},
{
'url': 'https://i.bandori.party/u/activities/a0NCm7JQQoPLIEMcGfsXgBhVolxGGn.png',
'about_url': 'https://bandori.party/activity/49517/okay-this-took-a-little-longer-than-i-thought-but-here-s-my-card-edit-so-i-headcanon-hina-to-be-a/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/tTt6pEL8EXwmK5310aNc5l09qcQuir.png',
'about_url': 'https://bandori.party/activity/49516/Happy-Pride-Month-I-ve-always-wanted-to-edit-Mizuki-into-a-bandori-card-so-heres-Mizuki/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/AB6NMPF8XhizdznvamQPJuhPbBCdqa.jpg',
'about_url': 'https://bandori.party/activity/49515/Hello-judges-and-may-I-say-you-re-looking-great-today-My-Pride-edit-is-ass-but-hey-it-s/',
},
{
'url': 'https://i.bandori.party/u/activities/cagm01cVMiNCYf4SPL1n6jYxo8wo9p.png',
'about_url': 'https://bandori.party/activity/49511/Heya-Happy-Pride-I-wouldn-t-consider-myself-as-part-of-the-lgbtq-spectrum-tbh-but-it-s-kinda-a/',
},
{
'url': 'https://i.bandori.party/u/activities/jdjWuDyyOjNENGvM1I1GcgNxS7NvOh.png',
'about_url': 'https://bandori.party/activity/49509/HEYYY-happy-PrideMonth-everyone-flags-transfem-bisexual-genderfluid/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/YZkYTftJSAz2XJeFnZhrcqYkW522ig.png',
'about_url': 'https://bandori.party/activity/49508/am-not-very-the-best-of-editing-but-i-tried-my-best-with-this-challenge-happy-pride-month/',
},
{
'url': 'https://i.bandori.party/u/activities/mYn8MHmSVeY5Giw0ysUNilGqnsLfnY.jpeg',
'about_url': 'https://bandori.party/activity/49507/I-m-not-that-good-at-edits-but-here-s-my-submission-to-the-PRIDE-Ready-event-Non-binary/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/8SmjTsNaEmpNyHvGxv4OrbUGkEey3G.jpg',
'about_url': 'https://bandori.party/activity/49504/PrideMonth-To-celebrate-Pride-Month-along-with-RAS-debut-on-EN-server-coming-soon-I/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/wV3IlIzXXmKfofylCs96LRVxN1c8oE.png',
'about_url': 'https://bandori.party/activity/49496/Hihi-Happy-Pride-Month-She-just-conveniently-happened-to-be-holding-a-flag/',
},
{
'url': 'https://i.bandori.party/u/activities/htAUstbawnXTHsBBPXEuM4StvhhaD9.png',
'about_url': 'https://bandori.party/activity/49494/Rinko-wishes-you-a-happy-pride-month/',
},
{
'url': 'https://i.bandori.party/u/activities/QyruNHtonbPWU9nb9MiijZAJ4SMpBY.png',
'about_url': 'https://bandori.party/activity/49493/lesbian-pride-tomoe-for-the-pride-ready-event/',
},
{
'foreground_url': 'https://i.bandori.party/u/activities/7VxccwbrXslO6zaDvrWwvpeecfwiw0.png',
'about_url': 'https://bandori.party/activity/49488/oh-we-postin-pride-edits-Pareo-s-perfect-for-this-made-this-one-a-while-ago-it-potentially/',
},
{
'url': 'https://i.bandori.party/u/activities/E95o0p21uhxtdtWPk1PfuWOxP0zgrl.jpeg',
'about_url': 'https://bandori.party/activity/49717/The-Pride-Ready-event-may-be-over-but-will-I-still-make-edits-You-bet-I-will-So-here-s-Pan/',
},
{
'about_url': u'https://bandori.party/activity/54276/Happy-Pride-Month-Here-s-Tsugumi-wearing-some-Bisexual-pride-colors-Hope-everyone-can-enjoy/',
'url': u'https://i.bandori.party/u/activities/0cUmlZ7b4aDpxCyE2nZsgPtTSRJPzW.png',
},
{
'about_url': u'https://bandori.party/activity/54267/Happy-pride-month-Here-is-edit-of-my-favorite-card-of-PAREO-I-have-never-done-an-edit-before-but/',
'url': u'https://i.bandori.party/u/activities/h1GrUghPXboQVhj65ksCiodd3aZpoC.jpg',
},
{
'about_url': u'https://bandori.party/activity/54266/These-outfits-are-just-so-perfect-bandori-knows-what-s-up-lol-Also-I-saw-the-art-of-Tae-and/',
'url': u'https://i.bandori.party/u/activities/9HIuyDbuBM3hzwPfcYXKVYRB4yinvj.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54265/2-pride-edits-in-one-day-It-s-more-likely-than-you-think-Tae-wearing-some-demigirl-pride-colors/',
'url': u'https://i.bandori.party/u/activities/Ww8aU0WrQurs3RZd4cTSJRXqx8r1HG.png',
},
{
'about_url': u'https://bandori.party/activity/54264/Sayo-wearing-some-Aromantic-Pride-Happy-Pride-Month/',
'url': u'https://i.bandori.party/u/activities/YzBDwoZBgSQGFTXI5gtar3i7rmcFnu.png',
},
{
'about_url': u'https://bandori.party/activity/54263/idk-how-this-website-works-but-i-made-a-few-pride-edits-like-earlier-and-found-out-there-s-an-event/',
'foreground_url': u'https://i.bandori.party/u/activities/QwukoClpMoWRA5Rv0CVnkbXAUmyYF6.png',
},
{
'about_url': u'https://bandori.party/activity/54262/EXPLAIN-BANDORI-PARTY-You-promised-that-all-participants-of-the-pride-ready-event-would-get-a/',
'foreground_url': u'https://i.bandori.party/u/activities/GIBj5bQ0Ssn1YIDsEUoCUINTewdEeV.PNG',
},
{
'about_url': u'https://bandori.party/activity/54261/Во-чё-наделала-всех-с-гордым-месяцом/',
'url': u'https://i.bandori.party/u/activities/CEHMForphaZdsn6jLnXgAUHTYxW6gE.png',
},
{
'about_url': u'https://bandori.party/activity/54260/panmoca-i-imgur-com-126An7w-png-what-do-you-mean-this-isn-t-the-original-card/',
'url': u'https://camo.githubusercontent.com/7e403422fabe942be20fe00ce226b30e48e27bb6b54bbd092c1fa0b9b7be2031/68747470733a2f2f692e696d6775722e636f6d2f313236416e37772e706e67',
},
{
'about_url': u'https://bandori.party/activity/54257/Aw-yeah-it-s-Tomoe-wooo-I-had-to-get-a-little-creative-with-this-one-because-I-m-starting-to/',
'url': u'https://i.bandori.party/u/activities/E7WUHObpBmpWHbFlo9WL03r8TiSb7y.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54256/I-didn-t-even-have-to-do-anything-It-already-shows-transgender-PAREO-she-already-has-all-the/',
'foreground_url': u'https://i.bandori.party/u/activities/2bf8HredGjg3HKFziE7uDUFO252T0K.png',
},
{
'about_url': u'https://bandori.party/activity/54255/HAPPY-PRIDE-MONTH-It-might-not-rlly-be-easy-to-see-bc-my-mom-can-see-my-photo-gallery-and-idk-if/',
'url': u'https://i.bandori.party/u/activities/9HSjEfR1bqmmny69I7UgrZ18d2IoDq.png',
},
{
'about_url': u'https://bandori.party/activity/54254/Lesbian-chisato-My-headcanon-is-she-is-married-with-kanon/',
'url': u'https://i.bandori.party/u/activities/EoRo0pkVEA0ksPAxcvXE7u4nRKse9E.png',
},
{
'about_url': u'https://bandori.party/activity/54253/I-retried-my-pride-month-thing/',
'url': u'https://i.bandori.party/u/activities/p7rS4d0hu8y2C70vTIvbVrZhY4myek.jpg',
},
{
'about_url': u'https://bandori.party/activity/54251/Nanami-for-the-win-lol-I-ve-started-to-mix-up-the-way-that-I-edit-the-cards-because-if-I-add/',
'url': u'https://i.bandori.party/u/activities/lG0RgQreJS0sZyz0pRK6DKicz5yzxU.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54250/Choose-your-fighter-flag-sorry-Hagumi-for-hiding-your-face/',
'url': u'https://i.bandori.party/u/activities/iKCGHl5eeRG0aZRqjTEayf33vyzMRJ.png',
},
{
'about_url': u'https://bandori.party/activity/54244/This-will-be-the-last-edit-I-ll-make-for-the-event-And-now-I-introduce-you-Bi-Himari/',
'url': u'https://i.bandori.party/u/activities/okC4UyauYRP8r2Af54xL15cTFuzOJU.jpg',
},
{
'about_url': u'https://bandori.party/activity/54241/extremely-lazy-himari-edit-bc-i-hc-her-as-a-raging-bisexual/',
'url': u'https://i.bandori.party/u/activities/lNy9nSJYNKWLVoq42BHD4qCWJHhb7p.png',
},
{
'about_url': u'https://bandori.party/activity/54237/Happy-pride-month-3/',
'url': u'https://i.bandori.party/u/activities/x4FHdP4Y2v5UYSaYXdpqTgcHVVrihA.jpg',
},
{
'about_url': u'https://bandori.party/activity/54235/heyo-here-s-a-uh-proper-pride-post/',
'url': u'https://camo.githubusercontent.com/e9c573b49c411ba3c8ec00c7be2d7f980c9a3681ffd53a06cb5a805cd9a28c3d/68747470733a2f2f692e62616e646f72692e70617274792f752f616374697669746965732f325466646454374d375650683964623263726a70664757534f41324b71592e706e67',
},
{
'about_url': u'https://bandori.party/activity/54232/Edits-go-brrrrrrrrrrrrrr-That-s-all-I-have-to-say-at-this-point-lol-I-ve-run-out-of-comments/',
'url': u'https://i.bandori.party/u/activities/o3urAbKF1pnpuxcjMtWtPTwiWLPE0K.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54230/Pride-supporting-idol-Maruyama-Aya-desu-彡-Happy-Pride-Month/',
'url': u'https://i.bandori.party/u/activities/hXxq7iXo3vMB9rsx3KP5kdaMaVsreF.png',
},
{
'about_url': u'https://bandori.party/activity/54229/Happy-Pride-Month-Here-s-my-submission-for-the-PRIDE-Ready-event-It-doesn-t-look-the-best/',
'url': u'https://i.bandori.party/u/activities/7Mx2t8UvipgppIrtLmj9pb1L5PmA1f.jpg',
},
{
'about_url': u'https://bandori.party/activity/54220/Moreeee-head-canons-Wahahaha-I-know-that-it-s-kinda-stupid-that-I-put-a-watermark-on-the/',
'url': u'https://i.bandori.party/u/activities/dyOLWaitqlc6my9S8Ysd6hVaYoh79g.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54214/hi-everyone-this-is-my-first-post-here-i-m-new-to-bandori-i-absolutely-love-pareo/',
'url': u'https://i.bandori.party/u/activities/m8uofRvvMQxkpn7O0Ker2jkWQ7KRlt.png',
},
{
'about_url': u'https://bandori.party/activity/54213/Happy-Pride-Month-Here-we-have-Maya-sporting-some-genderfluid-pride-colors-More-coming-soon/',
'url': u'https://i.bandori.party/u/activities/bLVITTQpq36WBpzrPWRVGBxZZVGiLa.png',
},
{
'about_url': u'https://bandori.party/activity/54211/my-friend-saw-this-event-and-wanted-to-do-an-edit-so-here-it-is-a-bi-Rinko-edit/',
'url': u'https://i.bandori.party/u/activities/PPGnm2tUj4k5TnNUyGOMQqKJoPODSK.png',
},
{
'about_url': u'https://bandori.party/activity/54209/Another-edit-for-Pride-month-and-this-time-it-s-Pareo-Yes-I-used-one-of-her-2-star-cards-and/',
'url': u'https://i.bandori.party/u/activities/tizAqDAZnucoaNRAJTFthsykfsQiwo.png',
},
{
'about_url': u'https://bandori.party/activity/54203/This-edit-s-a-little-more-lazy-than-the-last-one-but-enjoy-I-added-her-signature-to-make/',
'url': u'https://i.bandori.party/u/activities/u7HlP7hsY2gzyJ3DiCTFSwARwvnett.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54192/unlabeled-bride-maya-that-i-kinda-made-for-my-partner-lmao/',
'url': u'https://i.bandori.party/u/activities/vVTOJLxaQrySebTedRwQcZeHGrXBIV.png',
},
{
'about_url': u'https://bandori.party/activity/54191/HAPPY-PRIDE-MONTHHHH-I-grant-you-all-this-years-edit-PANSEXUAL-MOCA-AOBA/',
'url': u'https://i.bandori.party/u/activities/JWOWX2oBRWTWxr4VTpr6fBefsGUDxc.png',
},
{
'about_url': u'https://bandori.party/activity/54190/I-never-edited-something-before-so-it-might-look-ugly-Welp-practice-makes-perfect/',
'url': u'https://i.bandori.party/u/activities/KckyzeYH44yBcy5Ut95PEgP8gIasGY.png',
},
{
'about_url': u'https://bandori.party/activity/54189/I-heard-pride-ready-2022-I-had-to-do-the-sequel-to-bi-Ako-coming-out-and-what-I-originally/',
'url': u'https://i.bandori.party/u/activities/SfF9OsFsNQo3jWHOZT0vBaL1PUI4VU.png',
},
{
'about_url': u'https://bandori.party/activity/54186/she-is-so-adorable/',
'url': u'https://i.bandori.party/u/activities/KUauSQd70jrDdTbaot6GDlgMzngB4Y.png',
},
{
'about_url': u'https://bandori.party/activity/54185/I-once-again-do-the-pride-card-edits-but-this-time/',
'url': u'https://i.bandori.party/u/activities/NUCFZtP7IXTuhZvON17YTQrDebCkJB.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54184/Happy-pride-month-everyone/',
'url': u'https://i.bandori.party/u/activities/NQAYnMmlS1BmuHIMSUXAyeh9SqLIs8.png',
},
{
'about_url': u'https://bandori.party/activity/54178/Happy-Pride-Month/',
'url': u'https://i.bandori.party/u/activities/7KIyL6SwvrQZfgUOig13AJscVFUo6Z.png',
},
{
'about_url': u'https://bandori.party/activity/54175/Second-edit-of-the-month-Mwahaha-I-think-that-even-though-the-contest-ends-on-the-tenth/',
'url': u'https://i.bandori.party/u/activities/P5xl3r5D7D4VbcAMsGJq3OIsOfLjtS.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54174/idc-if-im-one-day-late-BUT-HAPPY-PRIDE-MONTH-GAYSS-Anyways-haruhapi-best-lesbians/',
'url': u'https://i.bandori.party/u/activities/P4PHS3yx8fdRhIrxCRYulYmgto576C.jpg',
},
{
'about_url': u'https://bandori.party/activity/54173/BanG-Dream-but-gayer/',
'url': u'https://i.bandori.party/u/activities/UhwUTQbpZGQdUATDuP4bTLALbWzQsn.png',
},
{
'about_url': u'https://bandori.party/activity/54171/i-transed-pareo/',
'url': u'https://i.bandori.party/u/activities/oEE9QBJ1FBQKIE0Fh6B7IPR9QvDjAX.png',
},
{
'about_url': u'https://bandori.party/activity/54165/shooba-hooba-here-s-Ummm-more-flag-colour-picks-hoo-hoo-kanon-trans-lesbian/',
'url': u'https://camo.githubusercontent.com/369b7be7203a95a4027222521c2400efc03907ac0cd0749c21f08ca88ff2c0a3/68747470733a2f2f692e62616e646f72692e70617274792f752f616374697669746965732f7a6a53616534443571327936615774476f4e3130656743384564693146482e706e67',
},
{
'about_url': u'https://bandori.party/activity/54165/shooba-hooba-here-s-Ummm-more-flag-colour-picks-hoo-hoo-kanon-trans-lesbian/',
'url': u'https://camo.githubusercontent.com/38637cebd206835deb74af542b0abe8456a2e0d473db827c99012460e5eb7dd7/68747470733a2f2f692e62616e646f72692e70617274792f752f616374697669746965732f69714861324c7968356542377a4f354d7746786d653271703739496364352e706e67',
},
{
'about_url': u'https://bandori.party/activity/54165/shooba-hooba-here-s-Ummm-more-flag-colour-picks-hoo-hoo-kanon-trans-lesbian/',
'url': u'https://camo.githubusercontent.com/dc4e48f64aca81feb969bdef0a630650d154c2eba7c1582e9cf701db0859e89b/68747470733a2f2f692e62616e646f72692e70617274792f752f616374697669746965732f6c743951493436704a67656f686b454c38394969586f6f4c5136756847562e706e67',
},
{
'about_url': u'https://bandori.party/activity/54161/agender-lesbian-moca-edit/',
'url': u'https://i.bandori.party/u/activities/eipY7cSYNl2D0mwUGT2SPSM6rnyN1M.png',
},
{
'about_url': u'https://bandori.party/activity/54160/nb-chu2-edit/',
'url': u'https://i.bandori.party/u/activities/htK2EhDWDnNLhEKuEJvKDZjHX4zVe5.png',
},
{
'about_url': u'https://bandori.party/activity/54154/IIIIIIIIT-S-PRIDE-MONTH-WOOOO-I-ve-had-this-edit-in-my-photos-for-over-a-little-while/',
'url': u'https://i.bandori.party/u/activities/hKCvKRGmqUKIxXClDNs1i4dhuBbRaB.jpeg',
},
{
'about_url': u'https://bandori.party/activity/54153/hi-banpa-hope-you-all-behave-this-month-here-is-the-nonbinary-lesbian-flags-colourpicked/',
'url': u'https://i.bandori.party/u/activities/KxvIGgIHKscmfqchcdGkumexEEuz0e.png',
},
{
'about_url': u'https://bandori.party/activity/54152/Here-s-a-WIP-of-my-Lesbian-Kaoru-edit-Kaoru-is-one-of-my-favorite-characters-and-Kaoru-is-heavily/',
'url': u'https://i.bandori.party/u/activities/0L88Fixa6f5HWgy3m7pX1xSoEwHXzk.png',
},
{
'about_url': u'https://bandori.party/activity/54144/Most-colorful-month-of-the-year-let-s-gooooo/',
'url': u'https://i.bandori.party/u/activities/G8NQcgoyyyxqQdu1d8bmewvQLXfhBK.png',
},
{
'about_url': u'https://bandori.party/activity/54143/It-s-that-time-again-Take-this-quick-edit-of-Rinko-And-yes-I-headcanon-her-as-asexual/',
'url': u'https://i.bandori.party/u/activities/wEskHoZuXVGhMs8MWT0ERnC2EevvkI.jpg',
},
{
'about_url': u'https://bandori.party/activity/54141/lil-maya-edit-for-pride-month-not-only-do-i-hc-her-as-a-lesbian-i-also-see-her-as-a-trans-girl-3/',
'url': u'https://i.bandori.party/u/activities/DilX2VNJ27VJMtk4BNwN3UWPC8SsRL.png',
},
{
'about_url': u'https://bandori.party/activity/56544/Last-one-Here-s-Mashiro-wearing-some-genderqueer-pride-colors-Happy-Pride-Month/',
'url': u'https://i.bandori.party/u/activities/3R0dZrC9NS8v4TTIom1mBahpIdMzp2.png'
},
{
'about_url': u'https://bandori.party/activity/56482/PRIDE-IS-HERE-here-s-some-food-enjoy/',
'url': u'https://i.bandori.party/u/activities/dhs1wZBeDyNwfPhGV6wkztfyEksIQw.jpeg'
},
{
'about_url': u'https://bandori.party/activity/56485//',
'url': u'https://64.media.tumblr.com/0c1e85f42de7a16d60641dd32c1e55d5/b89b246dbdb5a801-61/s2048x3072/d4dd0beb09ecf4b3c1200dac5a9910a0335e6eae.png'
},
{
'about_url': u'https://bandori.party/activity/56508/for-my-damn-near-yearly-pansexual-edit-for-myself-my-boyfriend-chose-saayas-new-dremfes-card-for-me/',
'url': u'https://i.bandori.party/u/activities/kihixQHEn2AxuHx74L1nII3mfrfGRE.png'
},
{
'about_url': u'https://bandori.party/activity/56490/Hello-Happy-Pride/',
'url': u'https://i.bandori.party/u/activities/5XaIXjkx63ULRsVIruQjSmHLPBwEtd.png'
},
{
'about_url': u'https://bandori.party/activity/56503/ACE-CHU2-FTW-no-grey-heart-emoji-smh-I-m-gonna-try-to-post-edits-of-my-head/',
'url': u'https://i.bandori.party/u/activities/GrOYWw16mnLhVBG1OVgLOHNVk2YxLP.jpeg'
},
{
'about_url': u'https://bandori.party/activity/56492/Happy-Pride-Month-Here-we-have-Ako-wearing-some-trans-pride-colors-More-coming-soon/',
'url': u'https://i.bandori.party/u/activities/gxBLjmGWI7TUW5LO9VxB7AadeUwZvm.png'
},
{
'about_url': u'https://bandori.party/activity/56527/I-M-BACK-So-I-heard-there-s-another-Pride-Ready-event-so-I-might-as-well-participate/',
'url': u'https://i.bandori.party/u/activities/abVQw1PowqIzt21DK2DSD9nHoc2x1D.jpg'
},
{
'about_url': u'https://bandori.party/activity/56529/PAREO/',
'url': u'https://i.bandori.party/u/activities/6CsicLHwegt7y4zUiJsPzrshK4e9AN.png'
},
{
'about_url': u'https://bandori.party/activity/56531/Kokoro-wearing-mlm-flag-colours/',
'url': u'https://i.bandori.party/u/activities/oYzKNA5ApCWjuzKRHitOVx8ma5V1og.png'
},
{
'about_url': u'https://bandori.party/activity/56533/color-red-Pride-color-color-orange-Kokoro-color-color-yellow-Happy-color/',
'url': u'https://i.bandori.party/u/activities/mvKgUbi1ulXFHWzdjsgtMJaawZeFhA.png'
},
{
'about_url': u'https://bandori.party/activity/56505/a-i-imgur-com-ppuwhr4-png-I-am-once-again-spreading-the-non-binary-Moca-agenda/',
'url': u'https://i.imgur.com/ppuwhr4.png'
},
{
'about_url': u'https://bandori.party/activity/56504/HAPPY-PRIDE-MONTH-Kanon-pride-edit-bc-I-love-this-card-sm/',
'url': u'https://i.bandori.party/u/activities/IMqvgp755rF8VihkbpMqvAzhPYaLPC.png'
},
{
'about_url': u'https://bandori.party/activity/56537/Happy-pride-month-I-like-Kasumi-and-I-made-this/',
'url': u'https://i.bandori.party/u/activities/s1cSDKB1d9S7v5MyBxbLbNhDqWlyDs.png'
},
{
'about_url': u'https://bandori.party/activity/56506/HAPPY-PRIDE-THIS-YEAR-I-GRANT-THE-GIFT-OF-DEMIGIRL-HINA-more-to-come/',
'url': u'https://i.bandori.party/u/activities/LgPPRbjYZoDMLOP1dYblj99ObKVYlF.png'
},
{
'about_url': u'https://bandori.party/activity/56495/WE-FINALLY-HAVE-A-NEW-PRIDE-EVENTTTTTTT-I-decided-to-continue-the-Ako-lore-once-again/',
'url': u'https://i.bandori.party/u/activities/fhCldRqd0qZNILRvxhQ5yi5k06ZuPP.png'
},
{
'about_url': u'https://bandori.party/activity/56540/Happy-Rainbow-Month-Lemme-jump-on-the-bandwagon-It-s-Kan-on-and-T-sato/',
'url': u'https://i.bandori.party/u/activities/iM9GhXDO2QpUFDjS54jFGwDhEfWm2f.png'
},
{
'about_url': u'https://bandori.party/activity/56542/Sorry-to-spam-but-I-made-a-whole-lot-of-edits-during-school-this-week-Here-s-Eve-wearing-lesbian/',
'url': u'https://i.bandori.party/u/activities/KVVWC9MWYdtg7yrTsWnOzU4uiHw8zP.png'
},
{
'about_url': u'https://bandori.party/activity/56543/Pareo-is-wearing-some-agender-pride-colors-in-this-look-Happy-Pride-Month-Again-sorry-to-spam/',
'url': u'https://i.bandori.party/u/activities/TY4LjZF88m0DNg294uaNR7D2ZKX6e9.png'
},
]
def getPrideArts():
print 'called?'
return PRIDE_ARTS
def getRandomPrideArt():
return random.choice(PRIDE_ARTS)
|
[
"[email protected]"
] | |
fe568ab2ae061bd22fdba789143115d98757ce79
|
9fb1c85a6d39c08e2a3cc235335bc482ad909b71
|
/prowler/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py
|
dd48255b14d584610fe0c0b2327022a71b29afe2
|
[
"Apache-2.0"
] |
permissive
|
muharihar/prowler
|
06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a
|
25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b
|
refs/heads/master
| 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null |
UTF-8
|
Python
| false | false | 945 |
py
|
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.apigateway.apigateway_client import (
apigateway_client,
)
class apigateway_endpoint_public(Check):
def execute(self):
findings = []
for rest_api in apigateway_client.rest_apis:
report = Check_Report_AWS(self.metadata())
report.region = rest_api.region
report.resource_id = rest_api.name
report.resource_arn = rest_api.arn
if rest_api.public_endpoint:
report.status = "FAIL"
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} is internet accesible."
else:
report.status = "PASS"
report.status_extended = (
f"API Gateway {rest_api.name} ID {rest_api.id} is private."
)
findings.append(report)
return findings
|
[
"[email protected]"
] | |
af8af055ae6e1ed2806b5c3f803bf49f74269d9c
|
a86293a2033c06410aa8ed19bcbce8ca55ea3c55
|
/src/client_libraries/python/dynamics/customerinsights/api/models/hierarchy_dependency.py
|
fb99c2e6f2cd0cec933998fef17969965b30fc13
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ramotheonly/Dynamics365-CustomerInsights-Client-Libraries
|
a3ca28aa78d2b5509e65d9895ff4a0d42d05f611
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
refs/heads/main
| 2023-08-02T08:09:04.063030 | 2021-09-28T22:42:15 | 2021-09-28T22:42:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,346 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HierarchyDependency(Model):
"""Represents metadata for a Hierarchy Dependency.
:param source_entity: Gets the source entities fully qualified name.
:type source_entity: str
:param account_id_attribute: Gets entity account Id.
:type account_id_attribute: str
:param parent_account_id_attribute: Gets parent account id.
:type parent_account_id_attribute: str
"""
_attribute_map = {
'source_entity': {'key': 'sourceEntity', 'type': 'str'},
'account_id_attribute': {'key': 'accountIdAttribute', 'type': 'str'},
'parent_account_id_attribute': {'key': 'parentAccountIdAttribute', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HierarchyDependency, self).__init__(**kwargs)
self.source_entity = kwargs.get('source_entity', None)
self.account_id_attribute = kwargs.get('account_id_attribute', None)
self.parent_account_id_attribute = kwargs.get('parent_account_id_attribute', None)
|
[
"[email protected]"
] | |
a6ce5cb881f64be94cf0f5da6e0511a2849f7d7e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03986/s472536811.py
|
0e513eaef16d85aa0b34b6fd2545952c6d79a389
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
x = input()
cs = 0
ct = 0
prev = ""
n = len(x)
cnt = 0
for i in range(n):
if x[i] =="S":
cs+=1
elif x[i] =="T":
if cs>0:
ct+=1
else:
cs = 0
ct = 0
if cs>0 and ct>0:
cnt+=1
cs-=1
ct-=1
print(n-cnt*2)
|
[
"[email protected]"
] | |
6e3be21352074c547520d6711374fad2530e1908
|
63481ad34cca1a90c09819dd5f65fae490e59240
|
/AddColumn.py
|
7be5e9fd1172002b875c15d78d4ed93c2b519ecc
|
[] |
no_license
|
icequeen5931/SmartSheets
|
8d6ca26e269c83986ba302648e9c532ebdd6970d
|
9c26269670f5b5679d08f9cb2bc765bb7753461c
|
refs/heads/master
| 2021-05-11T21:36:01.269525 | 2017-01-12T15:37:50 | 2017-01-12T15:37:50 | 117,471,837 | 0 | 1 | null | 2018-01-14T22:25:12 | 2018-01-14T22:25:12 | null |
UTF-8
|
Python
| false | false | 560 |
py
|
__author__ = 'jpisano'
import requests
import json
sheetid = '4816554870237060' # "test" Sheet ID
rowid = '4542989902079876' # row number 4
customer_col = '4113607471458180' # Customer name
url = 'https://api.smartsheet.com/2.0/sheets/' + sheetid + '/columns'
myheader = {'Authorization': 'Bearer 519zl07z3k1uef6rfjxqqm5630', 'Content-Type': 'application/json'}
response = requests.post (url,headers=myheader,json={"index": "5", "title": "my1stcol", "type": "TEXT_NUMBER"})
print (response.url)
print (response.content)
data = json.loads(response.text)
|
[
"[email protected]"
] | |
636ceb065354c55694cf707a2445c3403708f5a4
|
cccf8da8d41ae2c14f5f4313c1edcf03a27956bb
|
/python/python2latex/writeLTXparbox.py
|
10108a1f33c2e2ce95c5acb886bfbdd1ba13e27d
|
[] |
no_license
|
LucaDiStasio/transpilers
|
e8f8ac4d99be3b42a050148ca8fbc5d025b83290
|
c55d4f5240083ffd512f76cd1d39cff1016909b8
|
refs/heads/master
| 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,309 |
py
|
# Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXparbox(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXparbox.varargin
nargin = writeLTXparbox.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <[email protected]>
# <[email protected]>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Defines a box whose contents are created in paragraph mode. SeeBoxes.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\parbox'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return
|
[
"[email protected]"
] | |
82a1071af62dab8396e2f20057846ed7b6b6ca47
|
c6af5dcdb1a3cd9d20abdf50c5571836a1b76298
|
/servlets/login.py
|
54bbbd9d129286c5db8dc6fab7dddadd00a6e83b
|
[] |
no_license
|
mikelambert/dancedeets
|
82b1cb0c32b14485cd9cbbc051421d1cb7499830
|
8dd51007bb2faa56d835a149b60740141d472c25
|
refs/heads/master
| 2021-01-21T00:30:09.963623 | 2016-11-29T12:04:00 | 2016-11-29T12:04:00 | 42,857,923 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,058 |
py
|
#!/usr/bin/env python
import logging
import app
import base_servlet
from logic import mobile
from users import users
@app.route('/login')
class LoginHandler(base_servlet.BaseRequestHandler):
def requires_login(self):
return False
def is_login_page(self):
return True
# TODO(lambert): move this into the same base / handler, so we don't do stupid redirects to /login
def get(self):
next_url = self.request.get('next') or '/'
# If they're logged in, and have an account created, update and redirect
if self.fb_uid:
user = users.User.get_by_id(self.fb_uid)
if user and not user.expired_oauth_token:
self.redirect(next_url)
return
want_specific_page = (next_url != '/?')
if want_specific_page:
self.display['next'] = next_url
self.display['suppress_promos'] = True
logging.info(self.display['next'])
self.render_template('login_only')
return
# Treat them like a totally logged-out user since they have no user object yet
self.fb_uid = None
# Explicitly do not preload anything from facebook for this servlet
# self.finish_preload()
self.display['user_message'] = self.get_cookie('User-Message')
from util import country_dialing_codes
self.display['suppress_promos'] = True
self.display['country_codes'] = sorted(country_dialing_codes.mapping.items())
self.display['android_url'] = mobile.ANDROID_URL
self.display['ios_url'] = mobile.IOS_URL
self.display['prefix'] = ''
self.display['phone'] = '' # Set the default, and then let any errors-and-refilling occur on /mobile_apps
self.display['mobile_show_smartbanner'] = False
self.display['next'] = next_url
logging.info(self.display['next'])
if bool(self.request.get('nd', 1)):
self.render_template('new_homepage')
else:
self.render_template('login')
|
[
"[email protected]"
] | |
0031184772a691b823929ac81fe865d20d594792
|
d5b6b19ab192180ae1e04eff99a37f629e1feb10
|
/goods/sellgoods/salesquantity/local_util/sales_util.py
|
124a73cd88e88090953229c4709c3d6c998ff4ab
|
[] |
no_license
|
maxenergy/goodsdl2
|
9e88dd499fa4c6d536e4444839e7fbe549c7070a
|
42d0eb797e9710ca85d885e6b4d0ed97cbf88607
|
refs/heads/master
| 2022-12-05T12:34:18.061604 | 2020-09-03T04:28:29 | 2020-09-03T04:28:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,200 |
py
|
from set_config import config
from goods.sellgoods.salesquantity.utils import mysql_util
from goods.sellgoods.sql import sales_quantity
import time
ai = config.ai
def get_predict_sales(shop_ids):
mysql_ins = mysql_util.MysqlUtil(ai)
sql = sales_quantity.sql_params["sales_ai"]
exe_time = str(time.strftime('%Y-%m-%d', time.localtime()))
exe_time = str("'"+exe_time+"'")
if len(shop_ids) == 1:
shop_ids = str("( "+str(shop_ids[0])+" )")
elif(len(shop_ids) > 1):
shop_ids = str(tuple(shop_ids))
sql = sql.format(shop_ids,exe_time)
print (sql)
results = mysql_ins.selectAll(sql)
shop_ids = []
upcs = []
predict_sales = []
for row in results:
shop_id = row[0]
upc = row[1]
predict_sale = row[2]
shop_ids.append(shop_id)
upcs.append(upc)
predict_sales.append(predict_sale)
shop_upc_sales = {}
for shop_id in list(set(shop_ids)):
upc_sales = {}
for shop_id1,upc,predict_sale in zip(shop_ids,upcs,predict_sales):
if shop_id == shop_id1:
upc_sales[upc] = predict_sale
shop_upc_sales[shop_id] = upc_sales
return shop_upc_sales
|
[
"[email protected]"
] | |
2e1d77bc5d60ab64e8e3ec36b17200fe8b53f725
|
e9e3169d354c840104595dcd660cd16d7d56f72e
|
/dz5_asp/task1.py
|
dbf086d9af3d3fb70c51908923f24d927c786782
|
[] |
no_license
|
davendiy/combinatorics_2course
|
f8d08c9164fa544662c86e254f3a7181928db3a1
|
b18618335812c3a185a94be8fbbc8f28fd2dea78
|
refs/heads/master
| 2020-04-05T09:58:37.904216 | 2019-01-11T21:23:02 | 2019-01-11T21:23:02 | 156,782,776 | 0 | 0 | null | 2018-12-06T22:18:22 | 2018-11-08T23:32:12 |
Python
|
UTF-8
|
Python
| false | false | 3,977 |
py
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 07.12.18
# by David Zashkolny
# 2 course, comp math
# Taras Shevchenko National University of Kyiv
# email: [email protected]
"""The following implementation assumes that the activities
are already sorted according to their finish time
"""
import random
import time
import functools
def cache(func):
""" Decorator for save answers of any function
"""
results = {}
@functools.wraps(func)
def __cache(*args): # changed function
nonlocal results # if this function call with parameters that already used
if args in results.keys(): # then answer gets from dictionary
# print("{} - got from cache".format(args))
rez = results[args]
else:
rez = func(*args)
results[args] = rez
return rez
return __cache
def recursive(s, f):
s = tuple([0] + s + [1000050000])
f = tuple([0] + f + [1000050000])
n = len(f)
return _recursive(s, f, 0, n-1)
@cache
def _recursive(func_s, func_f, i, j):
_max = 0
for k in range(i, j+1):
if func_f[i] <= func_s[k] < func_f[k] <= func_s[j]:
tmp_max = _recursive(func_s, func_f, i, k) + _recursive(func_s, func_f, k, j) + 1
if tmp_max > _max:
_max = tmp_max
return _max
def dynamic(s, f):
""" Dynamic solution of ASP problem. Using recurrent formula
from Kormen.
:param s: An array that contains start time of all activities
:param f: An array that contains finish time of all activities
:return: optimal sequence of indexes
"""
n = len(s)
func_s = [0] + s + [10005000] # adding to arrays of activities a fictive elements
func_f = [0] + f + [10005000]
dp = [[0 for i in range(n+2)] for i in range(n+2)] # dp[i][j] is max activities from i to j
for i in range(n+2):
for j in range(n+2): # fills all positions in dynamic table
_max = 0
for k in range(i, j+1): # go through all activities that might be done between i-th and j-th
if func_f[i] <= func_s[k] < func_f[k] <= func_s[j]:
tmp_max = dp[i][k] + dp[k][j] + 1 # find maximum
if tmp_max > _max:
_max = tmp_max
dp[i][j] = _max
return dp[0][n+1]
def printMaxActivities(s, f):
"""Prints a maximum set of activities that can be done by a
single person, one at a time
:param s: An array that contains start time of all activities
:param f: An array that contains finish time of all activities
:return: optimal sequence of indexes
"""
n = len(f)
print("The following activities are selected")
# The first activity is always selected
i = 0
print(i, end=' ')
# Consider rest of the activities
for j in range(1, n):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if s[j] >= f[i]:
print(j, end=' ')
i = j
# Driver program to test above functions
if __name__ == '__main__':
test_s = []
test_f = []
test = []
N = 1000
for count in range(N):
tmp_s = random.randrange(1, N)
tmp_f = random.randrange(tmp_s+1, N+1)
test.append((tmp_f, tmp_s))
test.sort()
for el in test:
test_s.append(el[1])
test_f.append(el[0])
print(test_s)
print(test_f)
print(f"n == {N}")
print('\n=====by greedy=====')
t = time.time()
print('result:')
printMaxActivities(test_s, test_f)
print('\ntime elapsed: {}'.format(time.time() - t))
print('\n=====by dynamic=====')
t = time.time()
print('result:\n{}'.format(dynamic(test_s, test_f)))
print('time elapsed: {}'.format(time.time() - t))
# print('\n===by recursive===')
# print(recursive(test_s, test_f))
|
[
"[email protected]"
] | |
9fea3d155012b6af9e4d7879b888a09a41598709
|
1b496449867e60fcec110d1f0d6b7bc0bc8cddf6
|
/mydeploy/myapp/setup.py
|
35cc2459c6856afa68137edf4f9b170e088b2e91
|
[] |
no_license
|
sebbekarlsson/tarship
|
f4cca6dc27174fc0d31ee3ceb8ba2a8864070e42
|
c7b57472a3e0146d38260c3607473914750e1ffd
|
refs/heads/master
| 2020-03-28T21:37:40.004937 | 2018-12-06T12:02:33 | 2018-12-06T12:02:33 | 149,170,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from setuptools import setup, find_packages
setup(
name='myapp',
version='1.0',
install_requires=[
'flask'
],
packages=find_packages()
)
|
[
"[email protected]"
] | |
064352bb445eb7efa4572729fcfb9c94567bea19
|
c57b42b6e8b38ac6cf575787502b03290887f470
|
/grids/boundary_condition_helper.py
|
0ade90eb06b982b7f1cb2e767ac429a385409940
|
[] |
no_license
|
leifdenby/pysolver
|
790643d242e1726846303b4561d326842535ab72
|
fceb9d82c08f4911e2beceb1189d3766d0c80675
|
refs/heads/master
| 2022-12-11T12:21:37.704152 | 2012-12-04T15:38:30 | 2012-12-04T15:38:30 | 294,418,150 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,810 |
py
|
import numpy as np
import matplotlib.pyplot as plot
from grids import boundary_conditions as BCs
"""
This module contains helpers to easily construct the strings needed for setting boundary conditions.
The main work is done by the 'make_bc_string' method, which may then used for higher level operations, see
e.g. 'make_2d_all_reflective_bc_string' as an example.
"""
def make_2d_all_transmissive_bc_string(var_name, boundary_thickness):
s = []
for t in range(1,boundary_thickness+1):
for (i, j) in [(1,0), (-1,0), (0,-1), (0,1)]:
n = i != 0 and -i or -j
s.append(make_bc_string(n, (i,j), t, var_name=var_name))
return "\n".join(s)
def make_2d_all_periodic_bc_string(var_name, boundary_thickness):
s = []
for t in range(1,boundary_thickness+1):
for (i, j) in [(1,0), (-1,0), (0,-1), (0,1)]:
n = i != 0 and i or j
s.append(make_bc_string(n, (i,j), t, var_name=var_name))
return "\n".join(s)
def make_1d_fixed_gradient_bc(normal, edge_pos, boundary_thickness, var_name, gradient_times_spacing):
if gradient_times_spacing != 0.0 and boundary_thickness > 1:
raise Exception("Error: None-zero gradient has not been implemented for boundaries thicker than one cell")
else:
return make_bc_string(normal=normal, edge_pos=edge_pos, boundary_thickness=boundary_thickness, var_name=var_name, coefficients = (-gradient_times_spacing, None))
def make_bc_string(normal, edge_pos, boundary_thickness, apply_to_edges = False, var_name="Q", coefficients = (None, None)):
"""
Constructs the strings for correctly indexing an array of arbitrary
dimension (though likely you wont need more than 3 dimensions).
The following arguments are passed to 'make_bc_slices' internally, the
documentation is reproduced here:
The edge_pos is a tuple discribing the position of the edge we are
currently working on. E.g. for a 2D domain (-1,0) would be the
leftmost edge, if x is in the horizontal direction, (0,1) would be
the topmost edge.
The normal direction is expected to be either 1 or -1, pointing in
the direction of the axis that the position vector defines.
Depending the relative sign of the position and the normal the
indexing will wrap around (useful for creating cyclic boundary
conditions).
boundary_thickness is fairly selfexplanatory
It may sometimes be the case that the boundary conditions should be
applied all the way to the domain edge, e.g. for a moving wall
boundary condition. This can be set by 'apply_to_edges'.
Optionally the variable name can be set through 'var_name' and
coefficients used in constructing the string may be passed in as a
two-tuple as 'coefficients'. They will be combined in the strings to
give e.g. 'Q[1:-2,-1,1:-2] = a +(b)*Q[1:-2,-2,1:-2]'
"""
if normal != 0:
(a, b) = (coefficients[0] and "%s +" % str(coefficients[0]) or "", coefficients[1] and "(%s)*" % str(coefficients[1]) or "")
elif normal == 0:
a = str(coefficients[0])
slices = make_bc_slices(normal, edge_pos, boundary_thickness, apply_to_edges)
def slice_to_str(slice):
if slice.start is not None and slice.stop is not None:
return "%d:%d" % (slice.start, slice.stop)
elif slice.stop is not None:
return str(slice.stop)
else:
return ":"
s = []
for bc_slice in slices:
ghost_slice_str = ",".join([slice_to_str(ss) for ss in bc_slice['i_ghost']])
if normal != 0:
internal_slice_str = ",".join([slice_to_str(ss) for ss in bc_slice['i_internal']])
s.append("%s[%s] = %s%s%s[%s]" % (var_name, ghost_slice_str, a, b, var_name, internal_slice_str))
elif normal == 0:
s.append("%s[%s] = %s" % (var_name, ghost_slice_str, a))
return "\n".join(s)
def make_bc_slices(normal, edge_pos, boundary_thickness, apply_to_edges = False):
"""
Constructs boundary condition slices for correctly indexing an array of
arbitrary dimension (though likely you wont need more than 3
dimensions).
The edge_pos is a tuple discribing the position of the edge we are
currently working on. E.g. for a 2D domain (-1,0) would be the leftmost
edge, if x is in the horizontal direction, (0,1) would be the topmost
edge.
The normal direction is expected to be either 1 or -1, pointing in the
direction of the axis that the position vector defines. Depending the
relative sign of the position and the normal the indexing will wrap
around (useful for creating cyclic boundary conditions).
boundary_thickness is fairly selfexplanatory
It may sometimes be the case that the boundary conditions should be
applied all the way to the domain edge, e.g. for a moving wall boundary
condition. This can be set by 'apply_to_edges'.
"""
def get_ranges(pos, n = 0):
def l(p):
# p is either 0, each which case it represents that the range of indecies for this direction is requested
# or p is non-zeros, in which case it represents the distance from the boundary for which we are requesting an index
if p != 0:
# if the normal direction and the position have the same sign then we need to do some wrapping
if n != 0:
# requesting the index of the cell from which data is taken
wrap = n * p > 0
if wrap:
# indexing must wrap-around the end of the domain, used for cyclic BCs
if p < 0:
return p-boundary_thickness
elif p > 0:
return p+boundary_thickness-1
else:
if p < 0:
return boundary_thickness-p-1
elif p > 0:
return -p-boundary_thickness
else:
# just requesting the index of the boundary cell
if p > 0:
return -boundary_thickness-1+p
elif p < 0:
return boundary_thickness+p
else:
# not requesting the index for a single cell row, return ranges of cells in the plane of the boundary
if apply_to_edges:
return slice(None)
else:
return slice(boundary_thickness, -boundary_thickness)
return map(lambda p: l(p), pos)
if list(edge_pos).count(0) != len(edge_pos)-1:
raise Exception("Only one of the position indexes should be non-zero")
if abs(normal) != 1 and normal != 0:
raise Exception("The normal should be either 1, -1 or 0")
if not (-1 in edge_pos or 1 in edge_pos):
raise Exception("The edge position should be a tuple of either -1, 0 or 1, e.g. (-1,0) for the leftmost boundary in 2D")
s = []
for r in range(1,boundary_thickness+1):
# create a local position vector which represents the relative distance between the ghost cells we are currently
# interested in and the boundary
pos = map(lambda t: t == 0 and t or t*r, edge_pos)
if normal != 0:
s.append({'i_ghost':get_ranges(pos), 'i_internal':get_ranges(pos, normal), 'row':r-1})
elif normal == 0:
s.append({'i_ghost':get_ranges(pos), 'i_internal':None, 'row':r-1})
return s
def applyCellCenteredBCs(Q, all_boundary_conditions, grid, num_ghost_cells = None):
if num_ghost_cells is None:
num_ghost_cells = grid.num_ghost_cells
for component, boundary_conditions in all_boundary_conditions.items():
c = component
for i, bc in enumerate(boundary_conditions):
axis = i / 2
side = -1 if i % 2 == 0 else 1
if isinstance(bc, BCs.Neumann) or isinstance(bc, BCs.Dirichlet):
# normal is facing in
normal = side * -1
elif isinstance(bc, BCs.Periodic):
normal = side
else:
raise Exception("Primitive boundary condition type not understood")
edge_pos = grid.edges[i]
if isinstance(bc, BCs.MovingWall):
apply_to_edges = True
else:
apply_to_edges = False
slices = make_bc_slices(normal=normal, edge_pos=edge_pos,
boundary_thickness=num_ghost_cells, apply_to_edges=apply_to_edges)
if isinstance(bc, BCs.Periodic):
for bc_slice in slices:
Q[...,c][bc_slice['i_ghost']] = Q[...,c][bc_slice['i_internal']]
elif isinstance(bc, BCs.Neumann):
for bc_slice in slices:
row = bc_slice['row'] # distance from interface
dx = grid.getGridSpacing()[axis]
Q[...,c][bc_slice['i_ghost']] = Q[...,c][bc_slice['i_internal']] - (row*2 + 1)*dx*bc.slope*normal
elif isinstance(bc, BCs.Dirichlet):
for bc_slice in slices:
Q[...,c][bc_slice['i_ghost']] = 2*bc.fixed_value - Q[...,c][bc_slice['i_internal']]
else:
raise Exception("Primitive boundary condition type not understood")
def test():
from grids import grid2d
edges = grid2d.edges
assert sliceFromEdge(edges[0]) == (0, slice(None, None, None))
if __name__ == "__main__":
test()
|
[
"[email protected]"
] | |
fe79ba37dfe75f6ff0503aad5ac0e6a96ee458f1
|
68c29e7a17d87e34b1d6613c3e2e70a36fd2adcc
|
/easy/485_max_consecutive_ones.py
|
67224cb9e68d8e2bbf47be570831d8cff6dfae9b
|
[
"MIT"
] |
permissive
|
Sukhrobjon/leetcode
|
284242fbfded3e47a57ce9230f9bc1175685cd7a
|
547c200b627c774535bc22880b16d5390183aeba
|
refs/heads/master
| 2022-02-26T20:56:57.347119 | 2022-02-05T01:58:49 | 2022-02-05T01:58:49 | 192,158,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 908 |
py
|
"""
Given a binary array, find the maximum number of consecutive 1s in this array.
Example 1:
Input: [1,1,0,1,1,1]
Output: 3
Explanation: The first two digits or the last three digits are consecutive 1s.
The maximum number of consecutive 1s is 3.
NOTE:
The input array will only contain 0 and 1.
The length of input array is a positive integer and will not exceed 10,000
"""
class Solution(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
max_len = 0
for digit in nums:
print(count, digit)
if digit == 1:
count += 1
else:
count = 0
max_len = max(max_len, count)
return max_len
nums = [1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
obj = Solution()
result = obj.findMaxConsecutiveOnes(nums)
print(result)
|
[
"[email protected]"
] | |
1423e38aeb02629c653981912db7913b499d81fa
|
6cbaade56c5db347d1be9a3422a69af52df39b97
|
/python_workspace/01_jump_to_python/4_input_output/3_file_io/174.py
|
0496363484d3cc8d30da51d8af3917c2569201ed
|
[] |
no_license
|
baewonje/iot_bigdata_-
|
b54e3772f64b9695efee8632183590628b679e11
|
2ce1af67d2f05abeb2ecd442b7299f349bdb9753
|
refs/heads/master
| 2020-09-06T09:53:53.018320 | 2019-12-06T08:19:33 | 2019-12-06T08:19:33 | 220,390,928 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 91 |
py
|
f = open("새파일.txt", 'r',encoding='UTF-8')
line = f.readline()
print(line)
f.close()
|
[
"[email protected]"
] | |
af4c2009dc27568a6632da49167c992f4cbb6714
|
acf6d4d8fa9d13e4d5e9c6e3576eefeb384d7b11
|
/confluent_server/confluent/discovery/core.py
|
c368a1cf6c89f3a511930d3ac5c75d3a0b0376f4
|
[
"Apache-2.0"
] |
permissive
|
dstam/confluent
|
6fa6b1916f907003cda89ece047d6e2c462f42ed
|
929392c74620299a9be8d73c4baeb27d455aba5b
|
refs/heads/master
| 2023-08-11T15:50:51.603433 | 2021-09-23T14:42:47 | 2021-09-23T14:42:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 57,469 |
py
|
# Copyright 2016-2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This manages the detection and auto-configuration of nodes.
# Discovery sources may implement scans and may be passive or may provide
# both.
# The phases and actions:
# - Detect - Notice the existance of a potentially supported target
# - Potentially apply a secure replacement for default credential
# (perhaps using some key identifier combined with some string
# denoting temporary use, and use confluent master integrity key
# to generate a password in a formulaic way?)
# - Do some universal reconfiguration if applicable (e.g. if something is
# part of an enclosure with an optionally enabled enclosure manager,
# check and request enclosure manager enablement
# - Throughout all of this, at this phase no sensitive data is divulged,
# only using credentials that are factory default or equivalent to
# factory default
# - Request transition to Locate
# - Locate - Use available cues to ascertain the physical location. This may
# be mac address lookup through switch or correlated by a server
# enclosure manager. If the location data suggests a node identity,
# then proceed to the 'verify' state
# - Verify - Given the current information and candidate upstream verifier,
# verify the authenticity of the servers claim in an automated way
# if possible. A few things may happen at this juncture
# - Verification outright fails (confirmed negative response)
# - Audit log entry created, element is not *allowed* to
# proceed
# - Verification not possible (neither good or bad)
# - If security policy is set to low, proceed to 'Manage'
# - Otherwise, log the detection event and stop (user
# would then manually bless the endpoint if applicable
# - Verification succeeds
# - If security policy is set to strict (or manual, whichever
# word works best, note the successfull verification, but
# do not manage
# - Otherwise, proceed to 'Manage'
# -Pre-configure - Given data up to this point, try to do some pre-config.
# For example, if located and X, then check for S, enable S
# This happens regardless of verify, as verify may depend on
# S
# - Manage
# - Create the node if autonode (Deferred)
# - If there is not a defined ip address, collect the current LLA and use
# that value.
# - If no username/password defined, generate a unique password, 20 bytes
# long, written to pass most complexity rules (15 random bytes, base64,
# retry until uppercase, lowercase, digit, and symbol all present)
# - Apply defined configuration to endpoint
import base64
import confluent.config.configmanager as cfm
import confluent.collective.manager as collective
import confluent.discovery.protocols.pxe as pxe
import confluent.discovery.protocols.ssdp as ssdp
import confluent.discovery.protocols.slp as slp
import confluent.discovery.handlers.imm as imm
import confluent.discovery.handlers.cpstorage as cpstorage
import confluent.discovery.handlers.tsm as tsm
import confluent.discovery.handlers.pxe as pxeh
import confluent.discovery.handlers.smm as smm
import confluent.discovery.handlers.xcc as xcc
import confluent.exceptions as exc
import confluent.log as log
import confluent.messages as msg
import confluent.networking.macmap as macmap
import confluent.noderange as noderange
import confluent.util as util
import eventlet
import traceback
import socket as nsocket
webclient = eventlet.import_patched('pyghmi.util.webclient')
import eventlet
import eventlet.greenpool
import eventlet.semaphore
autosensors = set()
scanner = None
try:
unicode
except NameError:
unicode = str
class nesteddict(dict):
def __missing__(self, key):
v = self[key] = nesteddict()
return v
nodehandlers = {
'service:lenovo-smm': smm,
'service:lenovo-smm2': smm,
'service:management-hardware.Lenovo:lenovo-xclarity-controller': xcc,
'service:management-hardware.IBM:integrated-management-module2': imm,
'pxe-client': pxeh,
'onie-switch': None,
'cumulus-switch': None,
'service:io-device.Lenovo:management-module': None,
'service:thinkagile-storage': cpstorage,
'service:lenovo-tsm': tsm,
}
servicenames = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'service:lenovo-smm': 'lenovo-smm',
'service:lenovo-smm2': 'lenovo-smm2',
'service:management-hardware.Lenovo:lenovo-xclarity-controller': 'lenovo-xcc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
'service:io-device.Lenovo:management-module': 'lenovo-switch',
'service:thinkagile-storage': 'thinkagile-storagebmc',
'service:lenovo-tsm': 'lenovo-tsm',
}
servicebyname = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'lenovo-smm': 'service:lenovo-smm',
'lenovo-smm2': 'service:lenovo-smm2',
'lenovo-xcc': 'service:management-hardware.Lenovo:lenovo-xclarity-controller',
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
'lenovo-switch': 'service:io-device.Lenovo:management-module',
'thinkagile-storage': 'service:thinkagile-storagebmc',
'lenovo-tsm': 'service:lenovo-tsm',
}
discopool = eventlet.greenpool.GreenPool(500)
runningevals = {}
# Passive-only auto-detection protocols:
# PXE
# Both passive and active
# SLP (passive mode listens for SLP DA and unicast interrogation of the system)
# mDNS
# SSD
# Also there are location providers
# Switch
# chassis
# chassis may in turn describe more chassis
# We normalize discovered node data to the following pieces of information:
# * Detected node name (if available, from switch discovery or similar or
# auto generated node name.
# * Model number
# * Model name
# * Serial number
# * System UUID (in x86 space, specifically whichever UUID would be in DMI)
# * Network interfaces and addresses
# * Switch connectivity information
# * enclosure information
# * Management TLS fingerprint if validated (switch publication or enclosure)
# * System TLS fingerprint if validated (switch publication or system manager)
#TODO: by serial, by uuid, by node
known_info = {}
known_services = {}
known_serials = {}
known_uuids = nesteddict()
known_nodes = nesteddict()
unknown_info = {}
pending_nodes = {}
pending_by_uuid = {}
def enrich_pxe_info(info):
sn = None
mn = None
nodename = info.get('nodename', None)
uuid = info.get('uuid', '')
if not uuid_is_valid(uuid):
return info
for mac in known_uuids.get(uuid, {}):
if not sn and 'serialnumber' in known_uuids[uuid][mac]:
info['serialnumber'] = known_uuids[uuid][mac]['serialnumber']
if not mn and 'modelnumber' in known_uuids[uuid][mac]:
info['modelnumber'] = known_uuids[uuid][mac]['modelnumber']
if nodename is None and 'nodename' in known_uuids[uuid][mac]:
info['nodename'] = known_uuids[uuid][mac]['nodename']
def uuid_is_valid(uuid):
if not uuid:
return False
return uuid.lower() not in ('00000000-0000-0000-0000-000000000000',
'ffffffff-ffff-ffff-ffff-ffffffffffff',
'00112233-4455-6677-8899-aabbccddeeff',
'20202020-2020-2020-2020-202020202020')
def _printable_ip(sa):
return nsocket.getnameinfo(
sa, nsocket.NI_NUMERICHOST|nsocket.NI_NUMERICSERV)[0]
def send_discovery_datum(info):
addresses = info.get('addresses', [])
if info['handler'] == pxeh:
enrich_pxe_info(info)
yield msg.KeyValueData({'nodename': info.get('nodename', '')})
yield msg.KeyValueData({'ipaddrs': [_printable_ip(x) for x in addresses]})
sn = info.get('serialnumber', '')
mn = info.get('modelnumber', '')
uuid = info.get('uuid', '')
if uuid:
relatedmacs = []
for mac in known_uuids.get(uuid, {}):
if mac and mac != info.get('hwaddr', ''):
relatedmacs.append(mac)
if relatedmacs:
yield msg.KeyValueData({'relatedmacs': relatedmacs})
yield msg.KeyValueData({'serialnumber': sn})
yield msg.KeyValueData({'modelnumber': mn})
yield msg.KeyValueData({'uuid': uuid})
if 'enclosure.uuid' in info:
yield msg.KeyValueData({'enclosure_uuid': info['enclosure.uuid']})
if 'enclosure.bay' in info:
yield msg.KeyValueData({'bay': int(info['enclosure.bay'])})
yield msg.KeyValueData({'macs': [info.get('hwaddr', '')]})
types = []
for infotype in info.get('services', []):
if infotype in servicenames:
types.append(servicenames[infotype])
yield msg.KeyValueData({'types': types})
if 'otheraddresses' in info:
yield msg.KeyValueData({'otheripaddrs': list(info['otheraddresses'])})
if 'location' in info:
yield msg.KeyValueData({'location': info['location']})
if 'room' in info:
yield msg.KeyValueData({'room': info['room']})
if 'rack' in info:
yield msg.KeyValueData({'rack': info['rack']})
if 'u' in info:
yield msg.KeyValueData({'lowest_u': info['u']})
if 'hostname' in info:
yield msg.KeyValueData({'hostname': info['hostname']})
if 'modelname' in info:
yield msg.KeyValueData({'modelname': info['modelname']})
def _info_matches(info, criteria):
model = criteria.get('by-model', None)
devtype = criteria.get('by-type', None)
node = criteria.get('by-node', None)
serial = criteria.get('by-serial', None)
status = criteria.get('by-state', None)
uuid = criteria.get('by-uuid', None)
if model and info.get('modelnumber', None) != model:
return False
if devtype and devtype not in info.get('services', []):
return False
if node and info.get('nodename', None) != node:
return False
if serial and info.get('serialnumber', None) != serial:
return False
if status and info.get('discostatus', None) != status:
return False
if uuid and info.get('uuid', None) != uuid:
return False
return True
def list_matching_nodes(criteria):
retnodes = []
for node in known_nodes:
for mac in known_nodes[node]:
info = known_info[mac]
if _info_matches(info, criteria):
retnodes.append(node)
break
retnodes.sort(key=noderange.humanify_nodename)
return [msg.ChildCollection(node + '/') for node in retnodes]
def list_matching_serials(criteria):
for serial in sorted(list(known_serials)):
info = known_serials[serial]
if _info_matches(info, criteria):
yield msg.ChildCollection(serial + '/')
def list_matching_uuids(criteria):
for uuid in sorted(list(known_uuids)):
for mac in known_uuids[uuid]:
info = known_uuids[uuid][mac]
if _info_matches(info, criteria):
yield msg.ChildCollection(uuid + '/')
break
def list_matching_states(criteria):
return [msg.ChildCollection(x) for x in ('discovered/', 'identified/',
'unidentified/')]
def list_matching_macs(criteria):
for mac in sorted(list(known_info)):
info = known_info[mac]
if _info_matches(info, criteria):
yield msg.ChildCollection(mac.replace(':', '-'))
def list_matching_types(criteria):
rettypes = []
for infotype in known_services:
typename = servicenames[infotype]
if ('by-model' not in criteria or
criteria['by-model'] in known_services[infotype]):
rettypes.append(typename)
return [msg.ChildCollection(typename + '/')
for typename in sorted(rettypes)]
def list_matching_models(criteria):
for model in sorted(list(detected_models())):
if ('by-type' not in criteria or
model in known_services[criteria['by-type']]):
yield msg.ChildCollection(model + '/')
def show_info(mac):
mac = mac.replace('-', ':')
if mac not in known_info:
raise exc.NotFoundException(mac + ' not a known mac address')
for i in send_discovery_datum(known_info[mac]):
yield i
list_info = {
'by-node': list_matching_nodes,
'by-serial': list_matching_serials,
'by-type': list_matching_types,
'by-model': list_matching_models,
'by-mac': list_matching_macs,
'by-state': list_matching_states,
'by-uuid': list_matching_uuids,
}
multi_selectors = set([
'by-type',
'by-model',
'by-state',
'by-uuid',
])
node_selectors = set([
'by-node',
'by-serial',
])
single_selectors = set([
'by-mac',
])
def _parameterize_path(pathcomponents):
listrequested = False
childcoll = True
if len(pathcomponents) % 2 == 1:
listrequested = pathcomponents[-1]
pathcomponents = pathcomponents[:-1]
pathit = iter(pathcomponents)
keyparams = {}
validselectors = multi_selectors | node_selectors | single_selectors
for key, val in zip(pathit, pathit):
if key not in validselectors:
raise exc.NotFoundException('{0} is not valid here'.format(key))
if key == 'by-type':
keyparams[key] = servicebyname.get(val, '!!!!invalid-type')
else:
keyparams[key] = val
validselectors.discard(key)
if key in single_selectors:
childcoll = False
validselectors = set([])
elif key in node_selectors:
validselectors = single_selectors | set([])
return validselectors, keyparams, listrequested, childcoll
def handle_autosense_config(operation, inputdata):
autosense = cfm.get_global('discovery.autosense')
autosense = autosense or autosense is None
if operation == 'retrieve':
yield msg.KeyValueData({'enabled': autosense})
elif operation == 'update':
enabled = inputdata['enabled']
if type(enabled) in (unicode, bytes):
enabled = enabled.lower() in ('true', '1', 'y', 'yes', 'enable',
'enabled')
if autosense == enabled:
return
cfm.set_global('discovery.autosense', enabled)
if enabled:
start_autosense()
else:
stop_autosense()
def handle_api_request(configmanager, inputdata, operation, pathcomponents):
if pathcomponents == ['discovery', 'autosense']:
return handle_autosense_config(operation, inputdata)
if operation == 'retrieve':
return handle_read_api_request(pathcomponents)
elif (operation in ('update', 'create') and
pathcomponents == ['discovery', 'rescan']):
if inputdata != {'rescan': 'start'}:
raise exc.InvalidArgumentException()
rescan()
return (msg.KeyValueData({'rescan': 'started'}),)
elif operation in ('update', 'create'):
if 'node' not in inputdata:
raise exc.InvalidArgumentException('Missing node name in input')
mac = _get_mac_from_query(pathcomponents)
info = known_info[mac]
if info['handler'] is None:
raise exc.NotImplementedException(
'Unable to {0} to {1}'.format(operation,
'/'.join(pathcomponents)))
handler = info['handler'].NodeHandler(info, configmanager)
try:
eval_node(configmanager, handler, info, inputdata['node'],
manual=True)
except Exception as e:
# or... incorrect passworod provided..
if 'Incorrect password' in str(e) or 'Unauthorized name' in str(e):
return [msg.ConfluentTargetInvalidCredentials(
inputdata['node'])]
raise
return [msg.AssignedResource(inputdata['node'])]
elif operation == 'delete':
mac = _get_mac_from_query(pathcomponents)
del known_info[mac]
return [msg.DeletedResource(mac)]
raise exc.NotImplementedException(
'Unable to {0} to {1}'.format(operation, '/'.join(pathcomponents)))
def _get_mac_from_query(pathcomponents):
_, queryparms, _, _ = _parameterize_path(pathcomponents[1:])
if 'by-mac' not in queryparms:
raise exc.InvalidArgumentException('Must target using "by-mac"')
mac = queryparms['by-mac'].replace('-', ':')
if mac not in known_info:
raise exc.NotFoundException('{0} not found'.format(mac))
return mac
def handle_read_api_request(pathcomponents):
# TODO(jjohnson2): This should be more generalized...
# odd indexes into components are 'by-'*, even indexes
# starting at 2 are parameters to previous index
if pathcomponents == ['discovery', 'rescan']:
return (msg.KeyValueData({'scanning': bool(scanner)}),)
subcats, queryparms, indexof, coll = _parameterize_path(pathcomponents[1:])
if len(pathcomponents) == 1:
dirlist = [msg.ChildCollection(x + '/') for x in sorted(list(subcats))]
dirlist.append(msg.ChildCollection('rescan'))
dirlist.append(msg.ChildCollection('autosense'))
return dirlist
if not coll:
return show_info(queryparms['by-mac'])
if not indexof:
return [msg.ChildCollection(x + '/') for x in sorted(list(subcats))]
if indexof not in list_info:
raise exc.NotFoundException('{0} is not found'.format(indexof))
return list_info[indexof](queryparms)
def detected_services():
for srv in known_services:
yield servicenames[srv]
def detected_models():
knownmodels = set([])
for info in known_info:
info = known_info[info]
if 'modelnumber' in info and info['modelnumber'] not in knownmodels:
knownmodels.add(info['modelnumber'])
yield info['modelnumber']
def _recheck_nodes(nodeattribs, configmanager):
if rechecklock.locked():
# if already in progress, don't run again
# it may make sense to schedule a repeat, but will try the easier and less redundant way first
return
with rechecklock:
return _recheck_nodes_backend(nodeattribs, configmanager)
def _recheck_nodes_backend(nodeattribs, configmanager):
global rechecker
_map_unique_ids(nodeattribs)
# for the nodes whose attributes have changed, consider them as potential
# strangers
if nodeattribs:
macmap.vintage = 0 # expire current mac map data, in case
# the attributes changed impacted the result
for node in nodeattribs:
if node in known_nodes:
for somemac in known_nodes[node]:
unknown_info[somemac] = known_nodes[node][somemac]
unknown_info[somemac]['discostatus'] = 'unidentified'
# Now we go through ones we did not find earlier
for mac in list(unknown_info):
try:
_recheck_single_unknown(configmanager, mac)
except Exception:
traceback.print_exc()
continue
# now we go through ones that were identified, but could not pass
# policy or hadn't been able to verify key
for nodename in pending_nodes:
info = pending_nodes[nodename]
try:
if info['handler'] is None:
next
handler = info['handler'].NodeHandler(info, configmanager)
discopool.spawn_n(eval_node, configmanager, handler, info, nodename)
except Exception:
traceback.print_exc()
log.log({'error': 'Unexpected error during discovery of {0}, check debug '
'logs'.format(nodename)})
def _recheck_single_unknown(configmanager, mac):
info = unknown_info.get(mac, None)
_recheck_single_unknown_info(configmanager, info)
def _recheck_single_unknown_info(configmanager, info):
global rechecker
global rechecktime
if not info or info['handler'] is None:
return
if info['handler'] != pxeh and not info.get('addresses', None):
#log.log({'info': 'Missing address information in ' + repr(info)})
return
handler = info['handler'].NodeHandler(info, configmanager)
if handler.https_supported and not handler.https_cert:
if handler.cert_fail_reason == 'unreachable':
log.log(
{
'info': '{0} with hwaddr {1} is not reachable at {2}'
''.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
# addresses data is bad, delete the offending ip
info['addresses'] = [x for x in info.get('addresses', []) if x != handler.ipaddr]
# TODO(jjohnson2): rescan due to bad peer addr data?
# not just wait around for the next announce
return
log.log(
{
'info': '{0} with hwaddr {1} at address {2} is not yet running '
'https, will examine later'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
if rechecker is not None and rechecktime > util.monotonic_time() + 300:
rechecker.cancel()
# if cancel did not result in dead, then we are in progress
if rechecker is None or rechecker.dead:
rechecktime = util.monotonic_time() + 300
rechecker = eventlet.spawn_after(300, _periodic_recheck,
configmanager)
return
nodename, info['maccount'] = get_nodename(configmanager, handler, info)
if nodename:
if handler.https_supported:
dp = configmanager.get_node_attributes([nodename],
('pubkeys.tls_hardwaremanager',))
lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
if util.cert_matches(lastfp, handler.https_cert):
info['nodename'] = nodename
known_nodes[nodename][info['hwaddr']] = info
info['discostatus'] = 'discovered'
return # already known, no need for more
discopool.spawn_n(eval_node, configmanager, handler, info, nodename)
def safe_detected(info):
if 'hwaddr' not in info or not info['hwaddr']:
return
if info['hwaddr'] in runningevals:
# Do not evaluate the same mac multiple times at once
return
runningevals[info['hwaddr']] = discopool.spawn(eval_detected, info)
def eval_detected(info):
try:
detected(info)
except Exception as e:
traceback.print_exc()
del runningevals[info['hwaddr']]
def detected(info):
global rechecker
global rechecktime
# later, manual and CMM discovery may act on SN and/or UUID
for service in info['services']:
if service in nodehandlers:
if service not in known_services:
known_services[service] = set([])
handler = nodehandlers[service]
info['handler'] = handler
break
else: # no nodehandler, ignore for now
return
if (handler and not handler.NodeHandler.adequate(info) and
info.get('protocol', None)):
eventlet.spawn_after(10, info['protocol'].fix_info, info,
safe_detected)
return
try:
snum = info['attributes']['enclosure-serial-number'][0].strip()
if snum:
info['serialnumber'] = snum
known_serials[info['serialnumber']] = info
except (KeyError, IndexError):
pass
try:
info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0]
known_services[service].add(info['modelnumber'])
except (KeyError, IndexError):
pass
if info['hwaddr'] in known_info and 'addresses' in info:
# we should tee these up for parsing when an enclosure comes up
# also when switch config parameters change, should discard
# and there's also if wiring is fixed...
# of course could periodically revisit known_nodes
# replace potentially stale address info
#TODO(jjohnson2): remove this
# temporary workaround for XCC not doing SLP DA over dedicated port
# bz 93219, fix submitted, but not in builds yet
# strictly speaking, going ipv4 only legitimately is mistreated here,
# but that should be an edge case
oldaddr = known_info[info['hwaddr']].get('addresses', [])
for addr in info['addresses']:
if addr[0].startswith('fe80::'):
break
else:
for addr in oldaddr:
if addr[0].startswith('fe80::'):
info['addresses'].append(addr)
if known_info[info['hwaddr']].get(
'addresses', []) == info['addresses']:
# if the ip addresses match, then assume no changes
# now something resetting to defaults could, in theory
# have the same address, but need to be reset
# in that case, however, a user can clear pubkeys to force a check
return
known_info[info['hwaddr']] = info
cfg = cfm.ConfigManager(None)
if handler:
handler = handler.NodeHandler(info, cfg)
handler.scan()
uuid = info.get('uuid', None)
if uuid_is_valid(uuid):
known_uuids[uuid][info['hwaddr']] = info
info['otheraddresses'] = set([])
for i4addr in info.get('attributes', {}).get('ipv4-address', []):
info['otheraddresses'].add(i4addr)
if handler and handler.https_supported and not handler.https_cert:
if handler.cert_fail_reason == 'unreachable':
log.log(
{
'info': '{0} with hwaddr {1} is not reachable by https '
'at address {2}'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
info['addresses'] = [x for x in info.get('addresses', []) if x != handler.ipaddr]
return
log.log(
{'info': '{0} with hwaddr {1} at address {2} is not yet running '
'https, will examine later'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
if rechecker is not None and rechecktime > util.monotonic_time() + 300:
rechecker.cancel()
if rechecker is None or rechecker.dead:
rechecktime = util.monotonic_time() + 300
rechecker = eventlet.spawn_after(300, _periodic_recheck, cfg)
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentfied'
#TODO, eventlet spawn after to recheck sooner, or somehow else
# influence periodic recheck to shorten delay?
return
nodename, info['maccount'] = get_nodename(cfg, handler, info)
if nodename and handler and handler.https_supported:
dp = cfg.get_node_attributes([nodename],
('pubkeys.tls_hardwaremanager', 'id.uuid', 'discovery.policy'))
dp = dp.get(nodename, {})
lastfp = dp.get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
if util.cert_matches(lastfp, handler.https_cert):
info['nodename'] = nodename
known_nodes[nodename][info['hwaddr']] = info
info['discostatus'] = 'discovered'
uuid = info.get('uuid', None)
if uuid:
storeuuid = dp.get('id.uuid', {}).get('value', None)
if not storeuuid:
discop = dp.get('discovery.policy', {}).get('value', '')
if discop:
policies = set(discop.split(','))
else:
policies = set([])
if policies & {'open', 'permissive'}:
cfg.set_node_attributes({nodename: {'id.uuid': info['uuid']}})
return # already known, no need for more
#TODO(jjohnson2): We might have to get UUID for certain searches...
#for now defer probe until inside eval_node. We might not have
#a nodename without probe in the future.
if nodename and handler:
eval_node(cfg, handler, info, nodename)
elif handler:
#log.log(
# {'info': 'Detected unknown {0} with hwaddr {1} at '
# 'address {2}'.format(
# handler.devname, info['hwaddr'], handler.ipaddr
# )})
info['discostatus'] = 'unidentified'
unknown_info[info['hwaddr']] = info
def b64tohex(b64str):
bd = base64.b64decode(b64str)
bd = bytearray(bd)
return ''.join(['{0:02x}'.format(x) for x in bd])
def get_enclosure_chain_head(nodename, cfg):
ne = True
members = [nodename]
while ne:
ne = cfg.get_node_attributes(
nodename, 'enclosure.extends').get(nodename, {}).get(
'enclosure.extends', {}).get('value', None)
if not ne:
return nodename
if ne in members:
raise exc.InvalidArgumentException(
'Circular chain that includes ' + nodename)
if not cfg.is_node(ne):
raise exc.InvalidArgumentException(
'{0} is chained to nonexistent node {1} '.format(
nodename, ne))
nodename = ne
members.append(nodename)
return nodename
def get_chained_smm_name(nodename, cfg, handler, nl=None, checkswitch=True):
# nodename is the head of the chain, cfg is a configmanager, handler
# is the handler of the current candidate, nl is optional indication
# of the next link in the chain, checkswitch can disable the switch
# search if not indicated by current situation
# returns the new name and whether it has been securely validated or not
# first we check to see if directly connected
mycert = handler.https_cert
if checkswitch:
fprints = macmap.get_node_fingerprints(nodename, cfg)
for fprint in fprints:
if util.cert_matches(fprint[0], mycert):
# ok we have a direct match, it is this node
return nodename, fprint[1]
# ok, unable to get it, need to traverse the chain from the beginning
if not nl:
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
while nl:
if len(nl) != 1:
raise exc.InvalidArgumentException('Multiple enclosures trying to '
'extend a single enclosure')
cd = cfg.get_node_attributes(nodename, ['hardwaremanagement.manager',
'pubkeys.tls_hardwaremanager'])
pkey = cd[nodename].get('pubkeys.tls_hardwaremanager', {}).get(
'value', None)
if not pkey:
# We cannot continue through a break in the chain
return None, False
smmaddr = cd.get(nodename, {}).get('hardwaremanagement.manager', {}).get('value', None)
if not smmaddr:
return None, False
if pkey:
cv = util.TLSCertVerifier(
cfg, nodename, 'pubkeys.tls_hardwaremanager').verify_cert
for fprint in get_smm_neighbor_fingerprints(smmaddr, cv):
if util.cert_matches(fprint, mycert):
# a trusted chain member vouched for the cert
# so it's validated
return nl[0], True
# advance down the chain by one and try again
nodename = nl[0]
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
return None, False
def get_smm_neighbor_fingerprints(smmaddr, cv):
if ':' in smmaddr:
smmaddr = '[{0}]'.format(smmaddr)
wc = webclient.SecureHTTPConnection(smmaddr, verifycallback=cv)
try:
neighs = wc.grab_json_response('/scripts/neighdata.json')
except Exception:
log.log({'error': 'Failure getting LLDP information from {}'.format(smmaddr)})
return
if not neighs:
return
for neigh in neighs:
if 'sha256' not in neigh:
continue
yield 'sha256$' + b64tohex(neigh['sha256'])
def get_nodename(cfg, handler, info):
nodename = None
maccount = None
info['verified'] = False
if not handler:
return None, None
if handler.https_supported:
currcert = handler.https_cert
if not currcert:
info['discofailure'] = 'nohttps'
return None, None
currprint = util.get_fingerprint(currcert, 'sha256')
nodename = nodes_by_fprint.get(currprint, None)
if not nodename:
# Try SHA512 as well
currprint = util.get_fingerprint(currcert)
nodename = nodes_by_fprint.get(currprint, None)
if not nodename:
curruuid = info.get('uuid', None)
if uuid_is_valid(curruuid):
nodename = nodes_by_uuid.get(curruuid, None)
if nodename is None:
_map_unique_ids()
nodename = nodes_by_uuid.get(curruuid, None)
if not nodename and info['handler'] == pxeh:
enrich_pxe_info(info)
nodename = info.get('nodename', None)
if not nodename:
# Ok, see if it is something with a chassis-uuid and discover by
# chassis
nodename = get_nodename_from_enclosures(cfg, info)
if not nodename and handler.devname == 'SMM':
nodename = get_nodename_from_chained_smms(cfg, handler, info)
if not nodename: # as a last resort, search switches for info
# This is the slowest potential operation, so we hope for the
# best to occur prior to this
nodename, macinfo = macmap.find_nodeinfo_by_mac(info['hwaddr'], cfg)
maccount = macinfo['maccount']
if nodename:
if handler.devname == 'SMM':
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
if nl:
# We found an SMM, and it's in a chain per configuration
# we need to ask the switch for the fingerprint to see
# if we have a match or not
newnodename, v = get_chained_smm_name(nodename, cfg,
handler, nl)
if newnodename:
# while this started by switch, it was disambiguated
info['verified'] = v
return newnodename, None
else:
errorstr = ('Attempt to discover SMM in chain but '
'unable to follow chain to the specific '
'SMM, it may be waiting on an upstream '
'SMM, chain starts with {0}'.format(
nodename))
log.log({'error': errorstr})
return None, None
if (nodename and
not handler.discoverable_by_switch(macinfo['maccount'])):
if handler.devname == 'SMM':
errorstr = 'Attempt to discover SMM by switch, but chained ' \
'topology or incorrect net attributes detected, ' \
'which is not compatible with switch discovery ' \
'of SMM, nodename would have been ' \
'{0}'.format(nodename)
log.log({'error': errorstr})
return None, None
return nodename, maccount
def get_nodename_from_chained_smms(cfg, handler, info):
nodename = None
for fprint in get_smm_neighbor_fingerprints(
handler.ipaddr, lambda x: True):
if fprint in nodes_by_fprint:
# need to chase the whole chain
# to support either direction
chead = get_enclosure_chain_head(nodes_by_fprint[fprint],
cfg)
newnodename, v = get_chained_smm_name(
chead, cfg, handler, checkswitch=False)
if newnodename:
info['verified'] = v
nodename = newnodename
return nodename
def get_node_guess_by_uuid(uuid):
for mac in known_uuids.get(uuid, {}):
nodename = known_uuids[uuid][mac].get('nodename', None)
if nodename:
return nodename
return None
def get_node_by_uuid_or_mac(uuidormac):
node = pxe.macmap.get(uuidormac, None)
if node is not None:
return node
return nodes_by_uuid.get(uuidormac, None)
def get_nodename_from_enclosures(cfg, info):
nodename = None
cuuid = info.get('attributes', {}).get('chassis-uuid', [None])[0]
if cuuid and cuuid in nodes_by_uuid:
encl = nodes_by_uuid[cuuid]
bay = info.get('enclosure.bay', None)
if bay:
tnl = cfg.filter_node_attributes('enclosure.manager=' + encl)
tnl = list(
cfg.filter_node_attributes('enclosure.bay={0}'.format(bay),
tnl))
if len(tnl) == 1:
# This is not a secure assurance, because it's by
# uuid instead of a key
nodename = tnl[0]
return nodename
def eval_node(cfg, handler, info, nodename, manual=False):
try:
handler.probe() # unicast interrogation as possible to get more data
# switch concurrently
# do some preconfig, for example, to bring a SMM online if applicable
handler.preconfig(nodename)
except Exception as e:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
errorstr = 'An error occured during discovery, check the ' \
'trace and stderr logs, mac was {0} and ip was {1}' \
', the node or the containing enclosure was {2}' \
''.format(info['hwaddr'], handler.ipaddr, nodename)
traceback.print_exc()
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
# first, if had a bay, it was in an enclosure. If it was discovered by
# switch, it is probably the enclosure manager and not
# the node directly. switch is ambiguous and we should leave it alone
if 'enclosure.bay' in info and handler.is_enclosure:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
log.log({'error': 'Something that is an enclosure reported a bay, '
'not possible'})
if manual:
raise exc.InvalidArgumentException()
return
nl = list(cfg.filter_node_attributes('enclosure.manager=' + nodename))
if not handler.is_enclosure and nl:
# The specified node is an enclosure (has nodes mapped to it), but
# what we are talking to is *not* an enclosure
# might be ambiguous, need to match chassis-uuid as well..
if 'enclosure.bay' not in info:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
errorstr = '{2} with mac {0} is in {1}, but unable to ' \
'determine bay number'.format(info['hwaddr'],
nodename,
handler.ipaddr)
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
enl = list(cfg.filter_node_attributes('enclosure.extends=' + nodename))
if enl:
# ambiguous SMM situation according to the configuration, we need
# to match uuid
encuuid = info['attributes'].get('chassis-uuid', None)
if encuuid:
encuuid = encuuid[0]
enl = list(cfg.filter_node_attributes('id.uuid=' + encuuid))
if len(enl) != 1:
# errorstr = 'No SMM by given UUID known, *yet*'
# if manual:
# raise exc.InvalidArgumentException(errorstr)
# log.log({'error': errorstr})
if encuuid in pending_by_uuid:
pending_by_uuid[encuuid].append(info)
else:
pending_by_uuid[encuuid] = [info]
return
# We found the real smm, replace the list with the actual smm
# to continue
nl = list(cfg.filter_node_attributes(
'enclosure.manager=' + enl[0]))
else:
errorstr = 'Chained SMM configuration with older XCC, ' \
'unable to perform zero power discovery'
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
# search for nodes fitting our description using filters
# lead with the most specific to have a small second pass
nl = list(cfg.filter_node_attributes(
'enclosure.bay={0}'.format(info['enclosure.bay']), nl))
if len(nl) != 1:
info['discofailure'] = 'ambigconfig'
if len(nl):
errorstr = 'The following nodes have duplicate ' \
'enclosure attributes: ' + ','.join(nl)
else:
errorstr = 'The {0} in enclosure {1} bay {2} does not ' \
'seem to be a defined node ({3})'.format(
handler.devname, nodename,
info['enclosure.bay'],
handler.ipaddr,
)
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
return
nodename = nl[0]
if not discover_node(cfg, handler, info, nodename, manual):
# store it as pending, assuming blocked on enclosure
# assurance...
pending_nodes[nodename] = info
else:
# we can and did accurately discover by switch or in enclosure
# but... is this really ok? could be on an upstream port or
# erroneously put in the enclosure with no nodes yet
# so first, see if the candidate node is a chain host
if not manual:
if info.get('maccount', False):
# discovery happened through switch
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
if nl:
# The candidate nodename is the head of a chain, we must
# validate the smm certificate by the switch
fprints = macmap.get_node_fingerprints(nodename, cfg)
for fprint in fprints:
if util.cert_matches(fprint[0], handler.https_cert):
if not discover_node(cfg, handler, info,
nodename, manual):
pending_nodes[nodename] = info
return
if (info.get('maccount', False) and
not handler.discoverable_by_switch(info['maccount'])):
errorstr = 'The detected node {0} was detected using switch, ' \
'however the relevant port has too many macs learned ' \
'for this type of device ({1}) to be discovered by ' \
'switch.'.format(nodename, handler.devname)
log.log({'error': errorstr})
return
if not discover_node(cfg, handler, info, nodename, manual):
pending_nodes[nodename] = info
def discover_node(cfg, handler, info, nodename, manual):
if manual:
if not cfg.is_node(nodename):
raise exc.InvalidArgumentException(
'{0} is not a defined node, must be defined before an '
'endpoint may be assigned to it'.format(nodename))
if handler.https_supported:
currcert = handler.https_cert
if currcert:
currprint = util.get_fingerprint(currcert, 'sha256')
prevnode = nodes_by_fprint.get(currprint, None)
if prevnode and prevnode != nodename:
raise exc.InvalidArgumentException(
'Attempt to assign {0} conflicts with existing node {1} '
'based on TLS certificate.'.format(nodename, prevnode))
known_nodes[nodename][info['hwaddr']] = info
if info['hwaddr'] in unknown_info:
del unknown_info[info['hwaddr']]
info['discostatus'] = 'identified'
dp = cfg.get_node_attributes(
[nodename], ('discovery.policy', 'id.uuid',
'pubkeys.tls_hardwaremanager'))
policy = dp.get(nodename, {}).get('discovery.policy', {}).get(
'value', None)
if policy is None:
policy = ''
policies = set(policy.split(','))
lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
# TODO(jjohnson2): permissive requires we guarantee storage of
# the pubkeys, which is deferred for a little bit
# Also, 'secure', when we have the needed infrastructure done
# in some product or another.
curruuid = info.get('uuid', False)
if 'pxe' in policies and info['handler'] == pxeh:
return do_pxe_discovery(cfg, handler, info, manual, nodename, policies)
elif ('permissive' in policies and handler.https_supported and lastfp and
not util.cert_matches(lastfp, handler.https_cert) and not manual):
info['discofailure'] = 'fingerprint'
log.log({'info': 'Detected replacement of {0} with existing '
'fingerprint and permissive discovery policy, not '
'doing discovery unless discovery.policy=open or '
'pubkeys.tls_hardwaremanager attribute is cleared '
'first'.format(nodename)})
return False # With a permissive policy, do not discover new
elif policies & set(('open', 'permissive')) or manual:
info['nodename'] = nodename
if info['handler'] == pxeh:
return do_pxe_discovery(cfg, handler, info, manual, nodename, policies)
elif manual or not util.cert_matches(lastfp, handler.https_cert):
# only 'discover' if it is not the same as last time
try:
handler.config(nodename)
except Exception as e:
info['discofailure'] = 'bug'
if manual:
raise
log.log(
{'error':
'Error encountered trying to set up {0}, {1}'.format(
nodename, str(e))})
traceback.print_exc()
return False
newnodeattribs = {}
if list(cfm.list_collective()):
# We are in a collective, check collective.manager
cmc = cfg.get_node_attributes(nodename, 'collective.manager')
cm = cmc.get(nodename, {}).get('collective.manager', {}).get('value', None)
if not cm:
# Node is being discovered in collective, but no collective.manager, default
# to the collective member actually able to execute the discovery
newnodeattribs['collective.manager'] = collective.get_myname()
if 'uuid' in info:
newnodeattribs['id.uuid'] = info['uuid']
if 'serialnumber' in info:
newnodeattribs['id.serial'] = info['serialnumber']
if 'modelnumber' in info:
newnodeattribs['id.model'] = info['modelnumber']
if handler.https_cert:
newnodeattribs['pubkeys.tls_hardwaremanager'] = \
util.get_fingerprint(handler.https_cert, 'sha256')
if newnodeattribs:
cfg.set_node_attributes({nodename: newnodeattribs})
log.log({'info': 'Discovered {0} ({1})'.format(nodename,
handler.devname)})
info['discostatus'] = 'discovered'
for i in pending_by_uuid.get(curruuid, []):
eventlet.spawn_n(_recheck_single_unknown_info, cfg, i)
try:
del pending_by_uuid[curruuid]
except KeyError:
pass
return True
if info['handler'] == pxeh:
olduuid = dp.get(nodename, {}).get('id.uuid', {}).get(
'value', None)
if olduuid.lower() != info['uuid']:
log.log({'info': 'Detected {0}, but discovery.policy is not set to a '
'value allowing discovery (open, permissive, or pxe)'.format(
nodename)})
info['discofailure'] = 'policy'
else:
log.log({'info': 'Detected {0}, but discovery.policy is not set to a '
'value allowing discovery (open or permissive)'.format(
nodename)})
info['discofailure'] = 'policy'
return False
def do_pxe_discovery(cfg, handler, info, manual, nodename, policies):
# use uuid based scheme in lieu of tls cert, ideally only
# for stateless 'discovery' targets like pxe, where data does not
# change
uuidinfo = cfg.get_node_attributes(nodename, ['id.uuid', 'id.serial', 'id.model', 'net*.hwaddr', 'net*.bootable'])
if manual or policies & set(('open', 'pxe')):
enrich_pxe_info(info)
attribs = {}
olduuid = uuidinfo.get(nodename, {}).get('id.uuid', None)
if isinstance(olduuid, dict):
olduuid = olduuid.get('value', None)
uuid = info.get('uuid', None)
if uuid and uuid != olduuid:
attribs['id.uuid'] = info['uuid']
sn = info.get('serialnumber', None)
mn = info.get('modelnumber', None)
if sn and sn != uuidinfo.get(nodename, {}).get('id.serial', None):
attribs['id.serial'] = sn
if mn and mn != uuidinfo.get(nodename, {}).get('id.model', None):
attribs['id.model'] = mn
for attrname in uuidinfo.get(nodename, {}):
if attrname.endswith('.bootable') and uuidinfo[nodename][attrname].get('value', None):
newattrname = attrname[:-8] + 'hwaddr'
oldhwaddr = uuidinfo.get(nodename, {}).get(newattrname, {}).get('value', None)
if info['hwaddr'] != oldhwaddr:
attribs[newattrname] = info['hwaddr']
if attribs:
cfg.set_node_attributes({nodename: attribs})
if info['uuid'] in known_pxe_uuids:
return True
if uuid_is_valid(info['uuid']):
known_pxe_uuids[info['uuid']] = nodename
#log.log({'info': 'Detected {0} ({1} with mac {2})'.format(
# nodename, handler.devname, info['hwaddr'])})
return True
attribwatcher = None
nodeaddhandler = None
needaddhandled = False
def _handle_nodelist_change(configmanager):
global needaddhandled
global nodeaddhandler
macmap.vintage = 0 # the current mac map is probably inaccurate
_recheck_nodes((), configmanager)
if needaddhandled:
needaddhandled = False
nodeaddhandler = eventlet.spawn(_handle_nodelist_change, configmanager)
else:
nodeaddhandler = None
def newnodes(added, deleting, renamed, configmanager):
global attribwatcher
global needaddhandled
global nodeaddhandler
alldeleting = set(deleting) | set(renamed)
for node in alldeleting:
if node not in known_nodes:
continue
for mac in known_nodes[node]:
if mac in known_info:
del known_info[mac]
del known_nodes[node]
_map_unique_ids()
configmanager.remove_watcher(attribwatcher)
allnodes = configmanager.list_nodes()
attribwatcher = configmanager.watch_attributes(
allnodes, ('discovery.policy', 'net*.switch',
'hardwaremanagement.manager', 'net*.switchport',
'id.uuid', 'pubkeys.tls_hardwaremanager',
'net*.bootable'), _recheck_nodes)
if nodeaddhandler:
needaddhandled = True
else:
nodeaddhandler = eventlet.spawn(_handle_nodelist_change, configmanager)
rechecker = None
rechecktime = None
rechecklock = eventlet.semaphore.Semaphore()
def _periodic_recheck(configmanager):
global rechecker
global rechecktime
rechecker = None
try:
_recheck_nodes((), configmanager)
except Exception:
traceback.print_exc()
log.log({'error': 'Unexpected error during discovery, check debug '
'logs'})
# if rechecker is set, it means that an accelerated schedule
# for rechecker was requested in the course of recheck_nodes
if rechecker is None:
rechecktime = util.monotonic_time() + 900
rechecker = eventlet.spawn_after(900, _periodic_recheck,
configmanager)
def rescan():
_map_unique_ids()
global scanner
if scanner:
return
else:
scanner = eventlet.spawn(blocking_scan)
def blocking_scan():
global scanner
slpscan = eventlet.spawn(slp.active_scan, safe_detected, slp)
ssdpscan = eventlet.spawn(ssdp.active_scan, safe_detected, ssdp)
slpscan.wait()
ssdpscan.wait()
scanner = None
def start_detection():
global attribwatcher
global rechecker
global rechecktime
_map_unique_ids()
cfg = cfm.ConfigManager(None)
allnodes = cfg.list_nodes()
attribwatcher = cfg.watch_attributes(
allnodes, ('discovery.policy', 'net*.switch',
'hardwaremanagement.manager', 'net*.switchport', 'id.uuid',
'pubkeys.tls_hardwaremanager'), _recheck_nodes)
cfg.watch_nodecollection(newnodes)
autosense = cfm.get_global('discovery.autosense')
if autosense or autosense is None:
start_autosense()
if rechecker is None:
rechecktime = util.monotonic_time() + 900
rechecker = eventlet.spawn_after(900, _periodic_recheck, cfg)
eventlet.spawn_n(ssdp.snoop, None, None, ssdp, get_node_by_uuid_or_mac)
def stop_autosense():
for watcher in list(autosensors):
watcher.kill()
autosensors.discard(watcher)
def start_autosense():
autosensors.add(eventlet.spawn(slp.snoop, safe_detected, slp))
autosensors.add(eventlet.spawn(pxe.snoop, safe_detected, pxe, get_node_guess_by_uuid))
nodes_by_fprint = {}
nodes_by_uuid = {}
known_pxe_uuids = {}
def _map_unique_ids(nodes=None):
global nodes_by_uuid
global nodes_by_fprint
global known_pxe_uuids
# Map current known ids based on uuid and fingperprints for fast lookup
cfg = cfm.ConfigManager(None)
if nodes is None:
nodes_by_uuid = {}
nodes_by_fprint = {}
known_pxe_uuids = {}
nodes = cfg.list_nodes()
bigmap = cfg.get_node_attributes(nodes,
('id.uuid',
'pubkeys.tls_hardwaremanager'))
for uuid in list(nodes_by_uuid):
node = nodes_by_uuid[uuid]
if node in bigmap:
del nodes_by_uuid[uuid]
for uuid in list(known_pxe_uuids):
node = known_pxe_uuids[uuid]
if node in bigmap:
del known_pxe_uuids[uuid]
for fprint in list(nodes_by_fprint):
node = nodes_by_fprint[fprint]
if node in bigmap:
del nodes_by_fprint[fprint]
for node in bigmap:
uuid = bigmap[node].get('id.uuid', {}).get('value', '').lower()
if uuid_is_valid(uuid):
nodes_by_uuid[uuid] = node
known_pxe_uuids[uuid] = node
fprint = bigmap[node].get(
'pubkeys.tls_hardwaremanager', {}).get('value', None)
if fprint:
nodes_by_fprint[fprint] = node
if __name__ == '__main__':
start_detection()
while True:
eventlet.sleep(30)
|
[
"[email protected]"
] | |
ae3069ea5b56ee41d89ee8c8e24a57ba2f5ca18e
|
675c5e97a84cfda399ca74c1804e0218c43b7c70
|
/xTool/contextmanagers/temp.py
|
78739d83c8720635e0978184092eabe4db5c3ca6
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
fengzhongzhu1621/xTool
|
37a232e5ea220c8e5cb48aaf6868bc9cf24181e8
|
57d745ce6be531c000a3b477c38bfdd4c2ac74e3
|
refs/heads/master
| 2023-07-20T22:31:50.278926 | 2023-07-18T02:29:21 | 2023-07-18T02:29:21 | 88,688,127 | 3 | 4 |
Apache-2.0
| 2023-05-09T21:53:19 | 2017-04-19T01:49:20 |
Python
|
UTF-8
|
Python
| false | false | 304 |
py
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from pathlib import Path
from tempfile import TemporaryDirectory
@contextmanager
def temp_path(name):
""" a simple cross platform replacement for NamedTemporaryFile """
with TemporaryDirectory() as td:
yield Path(td, name)
|
[
"[email protected]"
] | |
f73b54bc15fb86b8360de52a82dabc4c873ff957
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/contrib/tensor_forest/proto/fertile_stats_pb2.py
|
584ce5ba078ba2067643a2757cab80357484b93a
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4ad07e1cfb048ba607c58cfdff9fbfc290deaa4051df70b3ef39683a1fe3896b
size 20881
|
[
"github@cuba12345"
] |
github@cuba12345
|
5fa61eb6f1f27f9cd58eebdfa987ab4e30cc3809
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/application_gateway_available_ssl_options.py
|
9e1d2f87d101b4f926d8a7a1660b409e667bd8eb
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 3,052 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ApplicationGatewayAvailableSslOptions(Resource):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param predefined_policies: List of available Ssl predefined policy.
:type predefined_policies:
list[~azure.mgmt.network.v2018_10_01.models.SubResource]
:param default_policy: Name of the Ssl predefined policy applied by
default to application gateway. Possible values include:
'AppGwSslPolicy20150501', 'AppGwSslPolicy20170401',
'AppGwSslPolicy20170401S'
:type default_policy: str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslPolicyName
:param available_cipher_suites: List of available Ssl cipher suites.
:type available_cipher_suites: list[str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslCipherSuite]
:param available_protocols: List of available Ssl protocols.
:type available_protocols: list[str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslProtocol]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'},
'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'},
'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'},
'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayAvailableSslOptions, self).__init__(**kwargs)
self.predefined_policies = kwargs.get('predefined_policies', None)
self.default_policy = kwargs.get('default_policy', None)
self.available_cipher_suites = kwargs.get('available_cipher_suites', None)
self.available_protocols = kwargs.get('available_protocols', None)
|
[
"[email protected]"
] | |
74f2b12334b2c53165e62947970d47f48a275f4c
|
a7f459bcc3da31e4cce7c838e716e089a31cb662
|
/tables.py
|
fc44e3abdad6c6227d08dbff3ea4f18197e6433a
|
[] |
no_license
|
ravenusmc/myDB
|
3d5246a2ad2ffc367d5a540eaa3e71322ed55ace
|
09aa1d88e4cdc7fb19a01807d3d678bf9e3d777a
|
refs/heads/master
| 2020-03-23T13:49:29.450115 | 2018-10-08T23:49:18 | 2018-10-08T23:49:18 | 141,639,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
#This file only create the database name for each user.
#importing needed files
import mysql.connector
class Tables():
def __init__(self):
self.conn = mysql.connector.connect(user='ted',
password='pass',
host='localhost',
port=3306)
self.cursor = self.conn.cursor()
def create_database(self, database_name):
sql = 'CREATE DATABASE ' + database_name
self.cursor.execute(sql)
|
[
"[email protected]"
] | |
9505f1bdb5def62407913d377071ecfcf1f5306b
|
226e8d309e978240fbd6d4b31238daa357f51042
|
/core.py
|
77f3c9d7a2c130bdeda4fcc7d9b7f9f9a0167e06
|
[] |
no_license
|
zokis/Zy
|
e183f9ffb2fdfbe2b253666d5a17093b4929658d
|
9a4959661d8a221cb359e119945febd6573b5165
|
refs/heads/master
| 2021-01-01T19:30:18.393976 | 2015-04-30T21:46:21 | 2015-04-30T21:46:21 | 33,011,032 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,501 |
py
|
# coding: utf-8
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import re
ESC_STR = r'#[!]#'
class Symbol(str):
pass
class Lambda(object):
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
return zy_eval(self.body, Env(self.parms, args, self.env))
class Env(dict):
def __init__(self, parms=(), args=(), outer=None):
self.outer = outer
self.update(zip(parms, args))
def find(self, var):
return self if (var in self) else self.outer.find(var)
class ZyString(str):
def __div__(self, other):
return map(ZyString, self.split(other))
__truediv__ = __div__
def __sub__(self, other):
return ZyString(self.replace(other, ''))
def __add__(self, other):
return ZyString(super(ZyString, self).__add__(other))
def __mul__(self, other):
return ZyString(super(ZyString, self).__mul__(other))
class ZyBool(object):
true = True
false = False
def __new__(cls, val):
if val:
if cls.true is True:
cls.true = super(ZyBool, cls).__new__(cls, cls.true)
return cls.true
else:
if cls.false is False:
cls.false = super(ZyBool, cls).__new__(cls, cls.false)
return cls.false
def __init__(self, val):
self.val = val
def __nonzero__(self):
return self.val
def __repr__(self):
return '#t' if self.val else '#f'
__str__ = __repr__
ZyTrue = ZyBool(True)
ZyFalse = ZyBool(False)
def atom(token):
if token[0] == '"':
return ZyString(token[1:-1].decode('utf-8'))
try:
return float(token)
except ValueError:
return Symbol(token)
def tokenize(program):
program_iter = iter(program)
strings = []
while True:
try:
c = program_iter.next()
except StopIteration:
break
if c == '"':
r = []
while True:
try:
c = program_iter.next()
except StopIteration:
break
if c == '"':
strings.append(''.join(r).replace('"', ''))
break
else:
r.append(c)
tokens = re.sub('\"(.+?)\"', ESC_STR, program).replace(')', ' ) ').replace('(', ' ( ').split()
str_index = 0
for k, t in enumerate(tokens):
if t == ESC_STR:
tokens[k] = '"%s"' % strings[str_index]
str_index += 1
return tokens
def atomize(tokens):
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
r = []
while tokens[0] != ')':
r.append(atomize(tokens))
tokens.pop(0)
return r
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def parse(program):
return atomize(tokenize(program))
def standard_env():
env = Env()
env.update({
'.': lambda *args, **kwargs: None,
'!': lambda x: ZyBool(x),
'!!': lambda x: ZyBool(not x),
'#pi': 3.141592653589793,
'#nil': None,
'#f': ZyFalse,
'#t': ZyTrue,
'*': lambda x, y: x * y,
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'/': lambda x, y: x / y,
'<': lambda x, y: x < y,
'>': lambda x, y: x > y,
'=': lambda x, y: x == y,
'**': lambda x, y: x ** y,
'++': lambda x: x + 1.,
'--': lambda x: x - 1.,
'..': lambda x, y, s=1: range(int(x), int(y), int(s)),
'/]': lambda x: float(int(x)),
'/[': round,
'[]': lambda *x: list(x),
'[:]': lambda x, y: y[int(x)],
',': float,
"'": ZyString,
'<=': lambda x, y: x <= y,
'>=': lambda x, y: x >= y,
'<->': lambda x, y: [y, x],
'>>': print,
'<<': raw_input,
})
return env
GLOBAL_ENV = standard_env()
def zy_eval(x, env=GLOBAL_ENV):
if isinstance(x, Symbol):
return env.find(x)[x]
elif not isinstance(x, list):
return x
elif x[0] == '?':
_, test, _if, _else = x
exp = (_if if zy_eval(test, env) else _else)
return zy_eval(exp, env)
elif x[0] == '->':
_, var, exp = x
env[var] = zy_eval(exp, env)
elif x[0] == ',->':
x = x[1:]
ln = int(len(x) / 2)
params, args = x[:ln], x[ln:]
if len(params) != len(args):
raise ValueError('It has not been possible to do the unpack')
for i in range(ln):
env[params[i]] = zy_eval(args[i], env)
elif x[0] == '@':
_, parms, body = x
return Lambda(parms, body, env)
elif x[0] == '*>':
_, var, _list, body, r = x
_env = env
for w in zy_eval(_list, _env):
_env = Env([var], [w], _env)
zy_eval(body, _env)
return zy_eval(r, _env)
else:
return zy_eval(x[0], env)(*[zy_eval(exp, env) for exp in x[1:]])
def to_zy_str(exp):
if isinstance(exp, Symbol):
return exp
elif isinstance(exp, ZyString):
return '"%s"' % exp.encode('utf-8').replace('"', r'\"')
elif isinstance(exp, list):
return "(%s)" % ' '.join(map(to_zy_str, exp))
else:
return str(exp)
|
[
"[email protected]"
] | |
50c7c5264c127c5133745ffb614f121b6f470cc6
|
78316ffc5c14d6c0c6a144c25be0ba695ae6f4db
|
/svgout/manipulator.py
|
82dab696782ef31e1663d4413bd0b01725d1065e
|
[] |
no_license
|
meyt/svgout
|
235fad9ee59b05f6caddde8ad5b67823c3f8916b
|
21770144dade3c22314143291030a3bc24a5b248
|
refs/heads/master
| 2023-02-02T08:55:14.760035 | 2020-12-07T09:28:15 | 2020-12-07T09:28:15 | 319,265,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,928 |
py
|
import re
import yaml
import logging
import cssutils
from os.path import join
from bs4 import BeautifulSoup
cssutils.log.setLevel(logging.CRITICAL)
class ElementStyle:
def __init__(self, bs_element):
self.el = bs_element
self.style = cssutils.parseStyle(self.el["style"])
def __getitem__(self, key):
return self.style[key]
def __setitem__(self, key, val):
self.style[key] = val
self.el["style"] = self.style.cssText
def __delitem__(self, key):
self.style.removeProperty(key)
self.el["style"] = self.style.cssText
class Element:
def __init__(self, bs_element):
self.el = bs_element
@property
def style(self):
return ElementStyle(self.el)
def hide(self):
self.style["display"] = "none"
def show(self):
del self.style["display"]
class Manipulator:
def __init__(self, config_filename: str, svg_filename: str):
with open(config_filename, "r") as f:
self.config = yaml.load(f, Loader=yaml.Loader)
with open(svg_filename, "r") as f:
self.bs = BeautifulSoup(f.read(), "xml")
def save(self, filename):
with open(filename, "w", encoding="utf-8") as f:
f.write(str(self.bs))
def process(self, output_dir: str, stdout: bool = True):
config = self.config
bs = self.bs
for outkey, outval in config.items():
output_filename = join(output_dir, outkey + ".svg")
if stdout:
print(output_filename)
for command, elementpatterns in outval.items():
for elementpattern in elementpatterns:
elements = bs.findAll(id=re.compile(elementpattern))
for bs_element in elements:
el = Element(bs_element)
getattr(el, command)()
self.save(output_filename)
|
[
"[email protected]"
] | |
6fa3c5bb33d1e17219f01a2c7a0ac9688776ac2a
|
35e892b01d2dfeea6f66a29fa336b2478e06bcea
|
/src/mcedit2/widgets/mcedockwidget.py
|
8bb08d93090fa2ac5f3d094f99e0e8efe57bc3a2
|
[
"BSD-3-Clause"
] |
permissive
|
theomission/mcedit2
|
bf1d0b7e00eaf3523b386b5909b3e3796e73dc2f
|
39a717b3cab5dd8366ed8542a070e4120386eb92
|
refs/heads/master
| 2020-12-31T02:00:52.356814 | 2015-11-09T21:43:44 | 2015-11-09T21:44:52 | 46,336,522 | 1 | 0 | null | 2015-11-17T09:26:11 | 2015-11-17T09:26:11 | null |
UTF-8
|
Python
| false | false | 1,294 |
py
|
"""
mcedockwidget
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from PySide import QtGui, QtCore
import logging
log = logging.getLogger(__name__)
class MCEDockWidget(QtGui.QDockWidget):
def __init__(self, *a, **kw):
super(MCEDockWidget, self).__init__(*a, **kw)
self._unfocusedOpacity = 1.0
def setUnfocusedOpacity(self, value):
self._unfocusedOpacity = value
def animate(self, value):
self.setWindowOpacity(value)
def enterEvent(self, event):
if self._unfocusedOpacity == 1.0:
return
self.animation = animation = QtCore.QPropertyAnimation(self, 'windowOpacity')
animation.setDuration(100)
animation.setStartValue(self.windowOpacity())
animation.setEndValue(1.0)
animation.valueChanged.connect(self.animate)
animation.start()
def leaveEvent(self, event):
if self._unfocusedOpacity == 1.0:
return
self.animation = animation = QtCore.QPropertyAnimation(self, 'windowOpacity')
animation.setDuration(250)
animation.setStartValue(self.windowOpacity())
animation.setEndValue(self._unfocusedOpacity)
animation.valueChanged.connect(self.animate)
animation.start()
|
[
"[email protected]"
] | |
c0e5d3a39d855c37ca27c9e9df1c27e7e57350ab
|
7b667511748ded171b66bf313d1dffe6f875289e
|
/tests/matrix_add_global_addr_offset/matrix_add_global_addr_offset.py
|
82fa1a80ab86e72ea6ffc595134fafa0757fd3f2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
herosugi/nngen
|
3b94301ba43ba0684be31c42c4977e1f72a081de
|
ce09cd1dba55d815163adfe901c7cca65dc0709f
|
refs/heads/master
| 2020-09-09T02:14:04.746559 | 2019-11-12T09:11:33 | 2019-11-12T09:11:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,536 |
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import functools
import math
import numpy as np
if sys.version_info.major < 3:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def run(a_shape=(15, 15), b_shape=(15, 15),
a_dtype=ng.int32, b_dtype=ng.int32, c_dtype=ng.int32,
par=1, axi_datawidth=32, silent=False, global_addr_offset=0,
filename=None, simtype='iverilog', outputfile=None):
# create target hardware
a = ng.placeholder(a_dtype, shape=a_shape, name='a')
b = ng.placeholder(b_dtype, shape=b_shape, name='b')
c = ng.add(a, b, dtype=c_dtype, par=par)
targ = ng.to_veriloggen([c], 'matrix_add_global_addr_offset', silent=silent,
config={'maxi_datawidth': axi_datawidth,
'default_global_addr_offset': global_addr_offset})
# verification data
va = np.arange(a.length, dtype=np.int64).reshape(a.shape) % [5]
vb = (np.arange(b.length, dtype=np.int64).reshape(b.shape) + [100]) % [6]
vc = ng.verify.add(va, vb, par=par,
dtype=c_dtype,
x_dtype=a_dtype, y_dtype=b_dtype)
# to memory image
size_max = int(math.ceil(max(a.memory_size, b.memory_size, c.memory_size) / 4096)) * 4096
check_addr = max(a.addr, b.addr, c.addr) + size_max
size_check = size_max
tmp_addr = check_addr + size_check
memimg_datawidth = 32
mem = np.zeros([1024 * 1024 * 8 // memimg_datawidth], dtype=np.int64)
mem = mem + [100]
axi.set_memory(mem, va, memimg_datawidth,
a_dtype.width, a.addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / a_dtype.width)), par))
axi.set_memory(mem, vb, memimg_datawidth,
b_dtype.width, b.addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / b_dtype.width)), par))
axi.set_memory(mem, vc, memimg_datawidth,
c_dtype.width, check_addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / c_dtype.width)), par))
# test controller
m = Module('test')
params = m.copy_params(targ)
ports = m.copy_sim_ports(targ)
clk = ports['CLK']
resetn = ports['RESETN']
rst = m.Wire('RST')
rst.assign(Not(resetn))
# AXI memory model
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
memory = axi.AxiMemoryModel(m, 'memory', clk, rst,
datawidth=axi_datawidth,
memimg=mem, memimg_name=memimg_name,
memimg_datawidth=memimg_datawidth)
memory.connect(ports, 'maxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
# timer
time_counter = m.Reg('time_counter', 32, initval=0)
seq = Seq(m, 'seq', clk, rst)
seq(
time_counter.inc()
)
num_rep = functools.reduce(lambda x, y: x * y, c.shape[:-1], 1)
def ctrl():
for i in range(100):
pass
ng.sim.set_global_offset(_saxi, global_addr_offset)
ng.sim.set_global_addrs(_saxi, tmp_addr)
start_time = time_counter.value
ng.sim.start(_saxi)
print('# start')
ng.sim.wait(_saxi)
end_time = time_counter.value
print('# end')
print('# execution cycles: %d' % (end_time - start_time))
# verify
ok = True
for i in range(num_rep):
for j in range(c.shape[-1]):
orig = memory.read_word(i * c.aligned_shape[-1] + j,
c.addr + global_addr_offset, c_dtype.width)
check = memory.read_word(i * c.aligned_shape[-1] + j,
check_addr + global_addr_offset, c_dtype.width)
if vthread.verilog.NotEql(orig, check):
print('NG', i, j, orig, check)
ok = False
# else:
# print('OK', i, j, orig, check)
if ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(targ, 'uut',
params=m.connect_params(targ),
ports=m.connect_ports(targ))
# simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')
init.add(
Delay(1000000),
Systask('finish'),
)
# output source code
if filename is not None:
m.to_verilog(filename)
# run simulation
sim = simulation.Simulator(m, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(silent=False, filename='tmp.v')
print(rslt)
|
[
"[email protected]"
] | |
25aaf0c09cf04ff7e328393bfd3aac3c91ea28c4
|
bc2327d2bce695bb4881be63b1912f550857fd14
|
/comps_and_gens/avoid_injecting_data.py
|
c40913be675650a5917f6c9caa4c0590c0adef57
|
[] |
no_license
|
mentalclear/fluent-in-python
|
1a1d9ad30e949e72d8633156091b84b6d52b85bc
|
243cff274861abc853b4ba5d03090191df5cd7db
|
refs/heads/master
| 2023-08-05T19:26:48.787996 | 2021-10-06T13:04:14 | 2021-10-06T13:04:14 | 402,944,060 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,528 |
py
|
import math
def wave(amplitude, steps):
step_size = 2 * math.pi / steps
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
output = amplitude * fraction
yield output
def transmit(output):
if output is None:
print(f'Output is None')
else:
print(f'Output: {output:>5.1f}')
def run(it):
for output in it:
transmit(output)
run(wave(3.0, 8))
def my_generator():
received = yield 1
print(f'received = {received}')
# it = iter(my_generator())
# output = next(it) # Get first generator output
# print(f'output = {output}')
# try:
# next(it) # Run generator until it exits
# except StopIteration:
# pass
it = iter(my_generator())
output = it.send(None) # Get first generator output
print(f'output = {output}')
try:
it.send('hello!') # Send value into the generator
except StopIteration:
pass
def wave_modulating(steps):
step_size = 2 * math.pi / steps
amplitude = yield # Receive initial amplitude
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
output = amplitude * fraction
amplitude = yield output # Receive next amplitude
def run_modulating(it):
amplitudes = [
None, 7, 7, 7, 2, 2, 2, 2, 10, 10, 10, 10, 10]
for amplitude in amplitudes:
output = it.send(amplitude)
transmit(output)
run_modulating(wave_modulating(12))
def complex_wave():
yield from wave(7.0, 3)
yield from wave(2.0, 4)
yield from wave(10.0, 5)
run(complex_wave())
print("\n")
def complex_wave_modulating():
yield from wave_modulating(3)
yield from wave_modulating(4)
yield from wave_modulating(5)
run_modulating(complex_wave_modulating())
def wave_cascading(amplitude_it, steps):
step_size = 2 * math.pi / steps
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
amplitude = next(amplitude_it) # Get next input
output = amplitude * fraction
yield output
print("\n")
def complex_wave_cascading(amplitude_it):
yield from wave_cascading(amplitude_it, 3)
yield from wave_cascading(amplitude_it, 4)
yield from wave_cascading(amplitude_it, 5)
def run_cascading():
amplitudes = [7, 7, 7, 2, 2, 2, 2, 10, 10, 10, 10, 10]
it = complex_wave_cascading(iter(amplitudes))
for amplitude in amplitudes:
output = next(it)
transmit(output)
run_cascading()
|
[
"[email protected]"
] | |
0211ba058f2a125de890a88deb09db150dcd28fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03377/s880122693.py
|
549d34c27687fe087fad3b2a8b81d930c93bff0d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 74 |
py
|
a,b,x=map(int,input().split())
print("YES" if a+b-x>=0 and x>=a else "NO")
|
[
"[email protected]"
] | |
4e9afc0ee7bcec1dc84aa50b3eca0655bcedea07
|
66c6df450753acc7c41db5afe66abd35d5018c8c
|
/cliente Rujel/bin33.py
|
b8af2ff866744ea341d41bab5428a230e2eef354
|
[] |
no_license
|
hanmiton/CodigoCompletoEncriptacion
|
a33807d9470b538842751071031c9ce60951260f
|
efb7898af5d39025e98c82f1f71c8e9633cce186
|
refs/heads/master
| 2020-03-24T02:03:08.242655 | 2018-07-25T22:41:05 | 2018-07-25T22:41:05 | 142,360,817 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,288 |
py
|
import sys
import math
import random
import openpyxl
LAMBDA = 16 #security parameter
N = LAMBDA
P = LAMBDA ** 2
Q = LAMBDA ** 5
def principal(m1,m2):
doc = openpyxl.load_workbook('cifrado.xlsx')
doc.get_sheet_names()
hoja = doc.get_sheet_by_name('Hoja1')
m1 = int(hoja['A7'].value)
boln1 = bin(m1)
boln2 = bin(m2)
boln1Encrypt = []
boln2Encrypt = []
sumEncrypt = []
mulEnctypt = []
res = []
aux = []
if(len(boln1) > len(boln2)):
print len(boln1) - len(boln2)
for i in range(0, len(boln1) - len(boln2)):
aux.append(0)
boln2 = aux + boln2
else:
print len(boln2) - len(boln1)
for i in range(0, len(boln2) - len(boln1)):
aux.append(0)
boln1 = aux + boln1
key = map(keygen,boln1)
boln1Encrypt = map(encrypt,key,boln1)
boln2Encrypt = map(encrypt,key,boln2)
sumEncrypt = map(add,boln1Encrypt,boln2Encrypt)
mulEnctypt = map(mult,boln1Encrypt, boln2Encrypt)
resSuma = map (decrypt, key, sumEncrypt)
strSuma = ''.join(str(e) for e in resSuma)
decSuma = int(strSuma, 2)
resMult = map (decrypt, key, mulEnctypt)
strMult = ''.join(str(e) for e in resMult)
decMult = int(strMult, 2)
return sumEncrypt
def quot(z, p):
# http://stackoverflow.com/questions/3950372/round-with-integer-division
return (z + p // 2) // p
def mod(z, p):
return z - quot(z,p) * p
def keygen(n):
key = random.getrandbits(P)
while(key % 2 == 0):
key = random.getrandbits(P)
return key
def encrypt(key, aBit):
q = random.getrandbits(Q)
m_a = 2 * random.getrandbits(N - 1)
c = key * q + m_a + aBit
return c
def decrypt(key, cipherText):
return mod(cipherText, key) % 2
def add(cipherText1, cipherText2):
return cipherText1 + cipherText2
def mult(cipherText1, cipherText2):
return cipherText1 * cipherText2
def bin(numero):
binario = ""
listaN = []
listaRn = []
if (numero >0):
while (numero >0):
if(numero%2 ==0):
listaN.append(0)
binario="0"+binario
else:
listaN.append(1)
binario = "1"+ binario
numero = int (math.floor(numero/2))
else:
if (numero ==0):
listaN.append(0)
return listaN
else:
return " no se pudo convertir el numero. ingrese solo numeros positivos"
for i in reversed(listaN):
listaRn.append(i)
return listaRn
if __name__ == '__main__':
principal(m1,m2)
|
[
"[email protected]"
] | |
1ec4d6b7f1ee5824542f78212d28e4851ad938e3
|
eac22714038e840028cc5abb72bc750004626ebb
|
/mct_camera_calibrator/src/mct_camera_calibrator/calibrator_service.py
|
825e733d2ae04f8e770576b619f5196ebe8297e9
|
[
"Apache-2.0"
] |
permissive
|
iorodeo/mct
|
79b19f6dab9f6567452df7274d67245bf64b1801
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
refs/heads/master
| 2022-11-11T18:03:18.178182 | 2014-08-20T19:21:27 | 2014-08-20T19:21:27 | 273,790,182 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,345 |
py
|
from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_calibrator')
import rospy
from mct_msg_and_srv.srv import GetBool
from mct_msg_and_srv.srv import GetString
def good_enough(calibrator):
"""
Wraper for the good_enough service provided by the cameracalibrator nodes.
Given a the camera calibrator, e.g. '/mct_master/camera_1/camera/camera_calibrator',
returns boolean value (True or False) indicating whether or not the camera data
collected good enough to calculate the camera calibration.
"""
srv_name = '{0}/good_enough'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetBool)
try:
response = proxy()
value = response.value
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
value = None
return value
def calibrate(calibrator):
"""
Wrapper for the calibrate service provided by the cameracalibrator nodes.
Given a the camera calibrator, e.g. '/mct_master/camera_1/camera/camera_calibrator',
this function requests that the node calculate the camera calibration given the data
collected so far. Returns True if a calibration can be calculated and False otherwise.
"""
srv_name = '{0}/calibrate'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetBool)
try:
response = proxy()
value = response.value
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
value = None
return value
def get_calibration(calibrator):
"""
Wrapper for the get_calibration service proviced by the
cameracalibrator nodes. Given a camera calibrator, e.g.,
'/mct_master/camera_1/camera/camera_calibrator', returns the camera
calibration or an empty string if a calibration has not yet been calculated.
"""
srv_name = '{0}/get_calibration'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetString)
try:
response = proxy()
data = response.data
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
data = None
return data
|
[
"[email protected]"
] | |
6ed74c82e53f1e6b15c7c46353e105569c667bb2
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/110_concurrency_parallelism/_exercises/templates/Learning Concurrency in Python/Chapter 08/mapPool.py
|
ba7352deaa660b0e307545fa085edce324923c64
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 202 |
py
|
# ____ m.. ______ P..
# ______ ti..
#
# ___ myTask n
# t__.s.. ?+2
# r_ ?+2
#
# ___ main
# w__ P.. 4 __ p
# ___ iter __ ?.i_u.. ? |1,3,2,1
# print ?
#
# __ _________ __ ________
# ?
|
[
"[email protected]"
] | |
f90ab8530e28474c7c8a23196d0f20aabcc1e379
|
bdc45798b67c0d12b78845c3c31e690564b40ed5
|
/projects/bbs/bb/views.py
|
4ac49069fe6e4eca8557f9ce4126a9d9156b3771
|
[
"MIT"
] |
permissive
|
mozillazg/django-simple-projects
|
c16d8105985707ef572fcb1fb53f76d7e3ed0362
|
6ccd1232cb76595f6dbafa282cef2c20edbb1148
|
refs/heads/master
| 2023-08-23T20:23:59.139601 | 2015-09-25T23:28:56 | 2015-09-25T23:28:56 | 7,768,010 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,055 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django import forms
from django.template import RequestContext
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from bb.models import Node
from bb.models import Topic
from bb.models import Reply
from bb.models import UserProfile
class SignupForm(forms.Form):
name = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
email = forms.EmailField()
class SigninForm(SignupForm):
def __init__(self, *args, **kwargs):
SignupForm.__init__(self, *args, **kwargs)
if 'email' in self.fields:
del self.fields['email']
class ChangePasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
class ReplyForm(forms.Form):
reply = forms.CharField(widget=forms.Textarea())
class CreateForm(forms.ModelForm):
class Meta:
model = Topic
exclude = ('user', 'hits', 'reply_count')
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
password = form.cleaned_data['password']
email = form.cleaned_data['email']
user = User.objects.create_user(name, email, password)
user.save()
return HttpResponseRedirect('/')
else:
form = SignupForm()
return render_to_response('signup.html', {'form': form},
context_instance=RequestContext(request))
def change_password(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
username = request.user.username
user = User.objects.get(username=username)
user.set_password(password)
user.save()
logout(request)
return HttpResponseRedirect('/account/signin')
else:
form = ChangePasswordForm()
return render_to_response('account.html', {'form': form},
context_instance=RequestContext(request))
def signin(request):
if request.method == 'POST':
form = SigninForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
password = form.cleaned_data['password']
user = authenticate(username=name, password=password)
if user:
if user.is_active:
userprofile = UserProfile.objects.get(user=user)
userprofile.login_count += 1
userprofile.save()
login(request, user)
return HttpResponseRedirect('/')
else:
form = SigninForm()
return render_to_response('signin.html', {'form': form},
context_instance=RequestContext(request))
def log_out(request):
logout(request)
return HttpResponseRedirect('/')
# def index(request):
# topics = Topic.objects.all()
# page_count = [i for i in range(len(topics)/5)] else [0]
# context =
# return render_to_response('index.html', {'topics': topics})
def page(request, page_id=1, node_id=0, popular=False):
nav_name = ''
topics = Topic.objects.order_by('-last_reply_time')
if popular:
topics = topics.order_by('-reply_count', '-last_reply_time')
nav_name = 'Popular'
elif node_id:
node = Node.objects.get(id=node_id)
topics = topics.filter(node=node)
count = topics.count()
nav_name = node.title
# Pagination
limit = 10
paginator = Paginator(topics, limit)
try:
topics = paginator.page(page_id)
except EmptyPage:
topics = paginator.page(paginator.num_pages)
user = request.user
context = {
'topics': topics,
'user': user,
'node_id': node_id,
'nav_name': nav_name,
}
return render_to_response('index.html', context,
context_instance=RequestContext(request))
def nodes(request):
nodes = Node.objects.all()
nav_name = 'Nodes'
return render_to_response('nodes.html', {'nodes': nodes,
'nav_name': nav_name},
context_instance=RequestContext(request))
def topic(request, topic_id, page_id=1):
topic_ = Topic.objects.get(id=topic_id)
replies = Reply.objects.filter(topic=topic_).order_by('-created')
topic_.hits += 1
topic_.save()
# Pagination
limit = 5
paginator = Paginator(replies, limit)
try:
replies = paginator.page(page_id)
except EmptyPage:
replies = paginator.page(paginator.num_pages)
context = {
'user': request.user,
'topic': topic_,
# 'replies': replies,
# 'form': ReplyForm(),
}
return render_to_response('topic.html', context,
context_instance=RequestContext(request))
def reply(request, topic_id):
if request.method == 'POST':
form = ReplyForm(request.POST)
if form.is_valid() and request.user.is_authenticated():
name = request.user.username
user = User.objects.get(username=name)
content = form.cleaned_data['reply']
topic_ = Topic.objects.get(id=topic_id)
reply_ = Reply(topic=topic_, user=user, content=content)
reply_.save()
topic_.reply_count += 1
topic_.save()
return HttpResponseRedirect('/topic/' + str(topic_id))
return HttpResponseRedirect('/')
def create(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.method == 'POST':
form = CreateForm(request.POST)
if form.is_valid():
name = request.user.username
user = User.objects.get(username=name)
title = form.cleaned_data['title']
content = form.cleaned_data['content']
node_title = form.cleaned_data['node']
node = Node.objects.get(title=node_title)
topic_ = Topic(title=title, content=content, node=node,
user=user)
topic_.save()
node.topic_count += 1
node.save()
return HttpResponseRedirect('/topic/' + str(topic_.id))
else:
form = CreateForm()
context = {
'form': form,
}
return render_to_response('create.html', context,
context_instance=RequestContext(request))
|
[
"[email protected]"
] | |
90d9af82c6f7b23981f29ff3435d608517689b8f
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Geron17Hands/B_PartI/G_Chapter7/D_RandomForests/index.py
|
8ff8d66c8fd6f9f178763d49d57ebff430e40833
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,865 |
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_ExtraTrees.index import ExtraTrees as A_ExtraTrees
from .B_FeatureImportance.index import FeatureImportance as B_FeatureImportance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# Sampling features results in even more predictor diversity, trading a bit more bias for
# a lower variance.
#
# Random Forests
# As we have discussed, a Random Forest9 is an ensemble of Decision Trees, generally
# trained via the bagging method (or sometimes pasting), typically with max_samples
# set to the size of the training set. Instead of building a BaggingClassifier and pass‐
# ing it a DecisionTreeClassifier, you can instead use the RandomForestClassifier
# class, which is more convenient and optimized for Decision Trees10 (similarly, there is
# a RandomForestRegressor class for regression tasks). The following code trains a
# Random Forest classifier with 500 trees (each limited to maximum 16 nodes), using
# all available CPU cores:
# from sklearn.ensemble import RandomForestClassifier
#
# rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1)
# rnd_clf.fit(X_train, y_train)
#
# y_pred_rf = rnd_clf.predict(X_test)
#
# With a few exceptions, a RandomForestClassifier has all the hyperparameters of a
# DecisionTreeClassifier (to control how trees are grown), plus all the hyperpara‐
# meters of a BaggingClassifier to control the ensemble itself.11
# The Random Forest algorithm introduces extra randomness when growing trees;
# instead of searching for the very best feature when splitting a node (see Chapter 6), it
# searches for the best feature among a random subset of features. This results in a
# greater tree diversity, which (once again) trades a higher bias for a lower variance,
# generally yielding an overall better model. The following BaggingClassifier is
# roughly equivalent to the previous RandomForestClassifier:
# bag_clf = BaggingClassifier(
# DecisionTreeClassifier(splitter="random", max_leaf_nodes=16),
# n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1
# )
#
#
#
#
# 9 “Random Decision Forests,” T. Ho (1995).
# 10 The BaggingClassifier class remains useful if you want a bag of something other than Decision Trees.
# 11 There are a few notable exceptions: splitter is absent (forced to "random"), presort is absent (forced to
# False), max_samples is absent (forced to 1.0), and base_estimator is absent (forced to DecisionTreeClassi
# fier with the provided hyperparameters).
#
#
#
# Random Forests | 189
#
# Download from finelybook www.finelybook.com
# Extra-Trees
# When you are growing a tree in a Random Forest, at each node only a random subset
# of the features is considered for splitting (as discussed earlier). It is possible to make
# trees even more random by also using random thresholds for each feature rather than
# searching for the best possible thresholds (like regular Decision Trees do).
# A forest of such extremely random trees is simply called an Extremely Randomized
# Trees ensemble12 (or Extra-Trees for short). Once again, this trades more bias for a
# lower variance. It also makes Extra-Trees much faster to train than regular Random
# Forests since finding the best possible threshold for each feature at every node is one
# of the most time-consuming tasks of growing a tree.
# You can create an Extra-Trees classifier using Scikit-Learn’s ExtraTreesClassifier
# class. Its API is identical to the RandomForestClassifier class. Similarly, the Extra
# TreesRegressor class has the same API as the RandomForestRegressor class.
#
# It is hard to tell in advance whether a RandomForestClassifier
# will perform better or worse than an ExtraTreesClassifier. Gen‐
# erally, the only way to know is to try both and compare them using
# cross-validation (and tuning the hyperparameters using grid
# search).
#
#
# Feature Importance
# Lastly, if you look at a single Decision Tree, important features are likely to appear
# closer to the root of the tree, while unimportant features will often appear closer to
# the leaves (or not at all). It is therefore possible to get an estimate of a feature’s impor‐
# tance by computing the average depth at which it appears across all trees in the forest.
# Scikit-Learn computes this automatically for every feature after training. You can
# access the result using the feature_importances_ variable. For example, the follow‐
# ing code trains a RandomForestClassifier on the iris dataset (introduced in Chap‐
# ter 4) and outputs each feature’s importance. It seems that the most important
# features are the petal length (44%) and width (42%), while sepal length and width are
# rather unimportant in comparison (11% and 2%, respectively):
# >>> from sklearn.datasets import load_iris
# >>> iris = load_iris()
# >>> rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
# >>> rnd_clf.fit(iris["data"], iris["target"])
# >>> for name, score in zip(iris["feature_names"], rnd_clf.feature_importances_):
# >>> print(name, score)
# sepal length (cm) 0.112492250999
#
#
#
# 12 “Extremely randomized trees,” P. Geurts, D. Ernst, L. Wehenkel (2005).
#
#
#
# 190 | Chapter 7: Ensemble Learning and Random Forests
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Random Forests",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RandomForests(HierNode):
def __init__(self):
super().__init__("Random Forests")
self.add(Content(), "content")
self.add(A_ExtraTrees())
self.add(B_FeatureImportance())
# eof
|
[
"[email protected]"
] | |
d87ca8f97309bfa2251a3beb33d65a0fc9ba27bc
|
21bbc3fbeb7a1616dbd6993b66dc44d9b30df3e7
|
/PycharmProjects/samp_proj1/assignment1.py
|
9f586fef9c106afcc8bd0ec1f73adb8da96936b9
|
[] |
no_license
|
PoornimaDevii/python_training
|
6124640608d8bf14289ae61b2b28e0db3b473b6f
|
42b535590a6a244a91bd48b4451b74a29c1aaa80
|
refs/heads/master
| 2020-04-05T19:55:49.723114 | 2018-12-04T11:49:59 | 2018-12-04T11:49:59 | 157,157,063 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,514 |
py
|
# Adapter Pattern (client:projector vga, dell lap hdmi mac usb)
# format() similar to __repr__
class Projector:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} projector'.format(self.name)
def vga(self):
return 'has VGA'
# c1 = Computer('mycomp')
# print(c1)
# print(c1.execute())
# Synthesizer class
class Dell:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} Laptop'.format(self.name)
def hdmi(self):
return 'has HDMI'
# s1 = Synthesizer('googlemusic')
# print(s1)
# print(s1.play())
class Mac:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} Laptop'.format(self.name)
def usb(self):
return 'has USB'
# sp1 = Human('poornima')
# print(sp1)
# print(sp1.speak())
class Adapter:
def __init__(self,o, adapter_methods):
self.obj = o
self.__dict__.update(adapter_methods)
def __str__(self):
return str(self.obj)
# objects = Computer('Asus') # Client interface
# synth = Synthesizer('moog')
# human = Human('Bob')
# asy = Adapter(synth, dict(execute=synth.play))
# ahu = Adapter(human,dict(execute=human.speak))
# print(asy.execute())
# print(ahu.execute())
pro1 = Projector('myprojector')
dell1 = Dell('mydell')
mac1 = Mac('mymac')
adell = Adapter(dell1, dict(vga=dell1.hdmi))
amac = Adapter(mac1, dict(vga=mac1.usb))
print("The Dell laptop", adell.vga())
print("The Mac laptop",amac.vga())
|
[
"[email protected]"
] | |
e54bffad4d3b08796d2abad7fabdf8706e5308f7
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/minigame/PatternGameGlobals.py
|
23901749a7104eb5c1e9373c735e47592d0219e8
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
# PatternGameGlobals.py: contains pattern game stuff
# used by AI and client
import MinigameGlobals
# pattern constants
INITIAL_ROUND_LENGTH = 2
ROUND_LENGTH_INCREMENT = 2
NUM_ROUNDS = 4
TOONTOWN_WORK = 1
# how long the players have to input the pattern
InputTime = 10
# this is how long the AI server will wait for msgs from the clients
# before assuming that the msg is not coming
ClientsReadyTimeout = 5 + MinigameGlobals.latencyTolerance
InputTimeout = InputTime + MinigameGlobals.latencyTolerance
|
[
"[email protected]"
] | |
65acadcf72878f7a7d12f8ee3b703911f2fda295
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/audio/tacotron2_ID0406_for_PyTorch/test_infer.py
|
1e7d04b508274fe794465445a3c225c019ca8888
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 11,619 |
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
## *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
import sys
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with FP16')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
waveglow_latency = measurements_all['waveglow_latency']
tacotron2_latency = measurements_all['tacotron2_latency']
denoiser_latency = measurements_all['denoiser_latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.0f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {:.0f}".format(np.mean(num_mels_per_audio)))
print("Tacotron2 latency average (seconds) = {:.2f}".format(np.mean(tacotron2_latency)))
print("WaveGlow latency average (seconds) = {:.2f}".format(np.mean(waveglow_latency)))
print("Denoiser latency average (seconds) = {:.4f}".format(np.mean(denoiser_latency)))
print("Latency average (seconds) = {:.2f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.2f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.2f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.2f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.2f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.2f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.2f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"denoiser_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.npu()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing", args.cpu):
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad():
with MeasureTime(measurements, "latency", args.cpu):
with MeasureTime(measurements, "tacotron2_latency", args.cpu):
mel, mel_lengths, _ = tacotron2.infer(sequences_padded, input_lengths)
with MeasureTime(measurements, "waveglow_latency", args.cpu):
audios = waveglow.infer(mel, sigma=args.sigma_infer)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion", args.cpu):
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_latency", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
with MeasureTime(measurements, "data_transfer", args.cpu):
audios = audios.cpu()
with MeasureTime(measurements, "storage", args.cpu):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
11413c5a57372ae3eadb0c9d39eba7ac4f30600f
|
b79e567b9709c50588b77174a20bb3bf2a5ae210
|
/fan_tools/django/models.py
|
118c5c1ebe689fd845df0889f88de5497a1f22b4
|
[
"MIT"
] |
permissive
|
micro-fan/fan_tools
|
325d05f46fece9fe6e49a12b7a7c8d2259d42e1f
|
2a6455b206158f471295b1e4d17e35ab5f98f754
|
refs/heads/master
| 2023-07-25T03:40:14.963178 | 2023-07-24T18:56:21 | 2023-07-24T18:56:45 | 224,145,427 | 2 | 0 |
MIT
| 2022-01-16T18:33:24 | 2019-11-26T08:53:55 |
Python
|
UTF-8
|
Python
| false | false | 750 |
py
|
import os
import uuid
class UploadNameGenerator(object):
def __init__(self, model_name, field_name):
self.model_name = model_name
self.field_name = field_name
def deconstruct(self):
return (
'fan_tools.django.UploadNameGenerator',
(),
{
'model_name': self.model_name,
'field_name': self.field_name,
},
)
def __call__(self, instance, filename):
return os.path.join(
'static',
self.model_name,
'%s-%s-%s%s' % (
self.model_name,
self.field_name,
uuid.uuid1(),
os.path.splitext(filename)[1],
),
)
|
[
"[email protected]"
] | |
10c23c7085652bbb944fd917a4f62fe73419cc4f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_bulkheads.py
|
b62773592aff3e1d3625faee7a8c8744150fbac0
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
from xai.brain.wordbase.nouns._bulkhead import _BULKHEAD
#calss header
class _BULKHEADS(_BULKHEAD, ):
def __init__(self,):
_BULKHEAD.__init__(self)
self.name = "BULKHEADS"
self.specie = 'nouns'
self.basic = "bulkhead"
self.jsondata = {}
|
[
"[email protected]"
] | |
7f0bca0bd4ab5c6fd530aa522500b9670194e8ad
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc024/B/4328471.py
|
422d6eb2f88b6b69cd1a961089ef14ecba94644a
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
n=int(input())
P=[int(input()) for _ in range(n)]
Q=[0]*n
for i,j in enumerate(P):
Q[j-1]=i
cresc=1
cnt=1
for i in range(1,n):
if Q[i-1]<Q[i]:
cnt+=1
else:
cresc=max(cresc,cnt)
cnt=1
cresc=max(cresc,cnt)
print(n-cresc)
|
[
"[email protected]"
] | |
a07ca542fe9a301a620158b1438fc385225f567c
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/21ffbcb3d32e0ee52eb1/snippet.py
|
5cff930ed25c3e97206a7417a8478b8a596eec8c
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 |
Python
|
UTF-8
|
Python
| false | false | 796 |
py
|
import hashlib
import itertools
import string
import time
import console
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def encrypt(data):
return hashlib.md5(data).hexdigest()
password = encrypt('pass')
def crack(hash, charset, maxlength):
attempts = 0
for attempt in (''.join(candidate) for candidate in itertools.chain.from_iterable(itertools.product(charset, repeat=i) for i in range(1, maxlength + 1))):
attempts += 1
print 'attempts:', attempts
console.clear()
if encrypt(attempt) == hash:
print 'Found:', attempt
break
s = time.time()
print len(string.ascii_letters+string.digits)
crack(encrypt('pass'), string.ascii_letters+string.digits, 3)
print 'finished in', round(s-time.time(), 3)/-1, 'seconds'
|
[
"[email protected]"
] | |
35fc48a9512d4e2a4b7468acb42ffaee49821ba9
|
f0b5917fe0cb6c263e892d2dda6a541094123a16
|
/grammar-generator/Elements/STG/Visible/VisibleSpecifiedColumnElementForStg.py
|
d89df6da48734e55f3d76ab47b53d77f40784dad
|
[
"MIT"
] |
permissive
|
afronski/grammar-generator
|
61a7de686ecc65dfa73f29a000bfed8b699de9ae
|
231bf88e28dd02b2cd2a79e0d42cb0613a90501a
|
refs/heads/master
| 2016-09-05T10:16:33.228488 | 2014-04-27T20:09:29 | 2014-04-27T20:09:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
from Elements.STG.Base.IElementForStg import IElementForStg
class VisibleSpecifiedColumnElementForStg(IElementForStg):
def __init__(self, templates, settingsObject):
self.typeName = "Specified"
self.templateName = "SpecifiedVisibledColumn"
super(VisibleSpecifiedColumnElementForStg, self).__init__(templates, settingsObject)
def getType(self):
return self.typeName
def getTemplateName(self):
return self.templateName
|
[
"[email protected]"
] | |
d92db4bf193ecaffa5187a3dbaf23ac6086d60f2
|
1070490055b5c981d936038959731134b01ce272
|
/apps/utils/mixin_utils.py
|
96866fa06ec2322ff13ee3b096fd4b88d85d9128
|
[] |
no_license
|
ljingen/MxOnline
|
401d5be37e11cb866dc8eb78acc9b6de053c5708
|
1b471dd6b4968f79dd6866bb5e3e6413b760c8a1
|
refs/heads/master
| 2021-10-11T08:57:05.304124 | 2018-02-11T06:59:32 | 2018-02-11T06:59:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
@method_decorator(login_required(login_url='/login/'))
def dispatch(self, request, *args, **kwars):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwars)
|
[
"[email protected]"
] | |
3318351747613ba0bee934e6538ceda331bda98c
|
b53c6ec03b24ad21f3ee395d085c07cd302c3402
|
/tests/chainer_tests/datasets_tests/test_image_dataset.py
|
1f36e27c0a68509843c447a77daf65914c659b48
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pyotr777/chainer
|
c0799791f85f499e32ea68636df5ecbe2c0f5675
|
8532edbd921ab0ea98c9447957565777e4601662
|
refs/heads/master
| 2021-04-28T05:00:16.181948 | 2018-02-20T06:41:21 | 2018-02-20T06:41:21 | 122,169,206 | 0 | 0 |
MIT
| 2018-02-20T07:50:22 | 2018-02-20T07:50:21 | null |
UTF-8
|
Python
| false | false | 2,565 |
py
|
import os
import unittest
import numpy
from chainer import datasets
from chainer.datasets import image_dataset
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
self.dataset = datasets.ImageDataset(path, root=root, dtype=self.dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
def test_get_grey(self):
img = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
'label_dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'labeled_img.lst')
self.dataset = datasets.LabeledImageDataset(
path, root=root, dtype=self.dtype, label_dtype=self.label_dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img, label = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 0)
def test_get_grey(self):
img, label = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 1)
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDatasetInvalidFormat(unittest.TestCase):
def test_invalid_column(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
with self.assertRaises(ValueError):
datasets.LabeledImageDataset(path)
testing.run_module(__name__, __file__)
|
[
"[email protected]"
] | |
63fb29a944d3dcc789050d4e71e9af3eb41d5f1c
|
fa9bae32c203323dfb345d9a415d4eaecb27a931
|
/859. Buddy Strings.py
|
2195a1fcd904a6ecbd11d94049d250458faa4dc6
|
[] |
no_license
|
IUIUN/The-Best-Time-Is-Now
|
48a0c2e9d449aa2f4b6e565868a227b6d555bf29
|
fab660f98bd36715d1ee613c4de5c7fd2b69369e
|
refs/heads/master
| 2020-09-14T12:06:24.074973 | 2020-02-15T06:55:08 | 2020-02-15T06:55:08 | 223,123,743 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 276 |
py
|
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B): return False
if A == B and len(set(A)) < len(A): return True
dif = [(a, b) for a, b in zip(A, B) if a != b]
return len(dif) == 2 and dif[0] == dif[1][::-1]
|
[
"[email protected]"
] | |
e16beced7372f00c2c8a5abf408ccb718aa1a43d
|
b158713324a92e88a925bc231d567435a38f31a1
|
/src/reportlab/graphics/charts/spider.py
|
27d1aa10634c6d3dedaf81fe446c5d78c36ce04c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
plttlp/reportlab-ecomobile
|
fd043c6d1aacbf5b3018fd3c09f97b68fcf05f5a
|
aca28b5fbc0d60ecac807957ba5b6832543dac7a
|
refs/heads/master
| 2021-05-28T00:56:05.713731 | 2014-06-21T22:46:21 | 2014-06-21T22:46:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,739 |
py
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/spider.py
# spider chart, also known as radar chart
__version__=''' $Id$ '''
__doc__="""Spider Chart
Normal use shows variation of 5-10 parameters against some 'norm' or target.
When there is more than one series, place the series with the largest
numbers first, as it will be overdrawn by each successive one.
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors, isNumberOrNone,\
isNoneOrListOfNoneOrStrings, isTextAnchor,\
isNoneOrListOfNoneOrNumbers, isBoxAnchor,\
isStringOrNone, isStringOrNone, EitherOr,\
isCallable
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, PolyLine, Ellipse, \
Wedge, String, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
from piecharts import WedgeLabel
from reportlab.graphics.widgets.markers import makeMarker, uSymbol2Symbol, isSymbol
class StrandProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
symbol = AttrMapValue(EitherOr((isStringOrNone,isSymbol)), desc='Widget placed at data points.'),
symbolSize= AttrMapValue(isNumber, desc='Symbol size.'),
name = AttrMapValue(isStringOrNone, desc='Name of the strand.'),
)
def __init__(self):
self.strokeWidth = 1
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.symbol = None
self.symbolSize = 5
self.name = None
class SpokeProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
labelRadius = AttrMapValue(isNumber),
visible = AttrMapValue(isBoolean,desc="True if the spoke line is to be drawn"),
)
def __init__(self,**kw):
self.strokeWidth = 0.5
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.visible = 1
self.labelRadius = 1.05
class SpokeLabel(WedgeLabel):
def __init__(self,**kw):
WedgeLabel.__init__(self,**kw)
if '_text' not in kw.keys(): self._text = ''
class StrandLabel(SpokeLabel):
_attrMap = AttrMap(BASE=SpokeLabel,
format = AttrMapValue(EitherOr((isStringOrNone,isCallable)),"Format for the label"),
dR = AttrMapValue(isNumberOrNone,"radial shift for label"),
)
def __init__(self,**kw):
self.format = ''
self.dR = 0
SpokeLabel.__init__(self,**kw)
def _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty):
L = labelClass()
L._text = text
L.x = cx + radius*car
L.y = cy + radius*sar
L._pmv = angle*180/pi
L.boxAnchor = sty.boxAnchor
L.dx = sty.dx
L.dy = sty.dy
L.angle = sty.angle
L.boxAnchor = sty.boxAnchor
L.boxStrokeColor = sty.boxStrokeColor
L.boxStrokeWidth = sty.boxStrokeWidth
L.boxFillColor = sty.boxFillColor
L.strokeColor = sty.strokeColor
L.strokeWidth = sty.strokeWidth
L.leading = sty.leading
L.width = sty.width
L.maxWidth = sty.maxWidth
L.height = sty.height
L.textAnchor = sty.textAnchor
L.visible = sty.visible
L.topPadding = sty.topPadding
L.leftPadding = sty.leftPadding
L.rightPadding = sty.rightPadding
L.bottomPadding = sty.bottomPadding
L.fontName = sty.fontName
L.fontSize = sty.fontSize
L.fillColor = sty.fillColor
return L
class SpiderChart(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
strands = AttrMapValue(None, desc="collection of strand descriptor objects"),
spokes = AttrMapValue(None, desc="collection of spoke descriptor objects"),
strandLabels = AttrMapValue(None, desc="collection of strand label descriptor objects"),
spokeLabels = AttrMapValue(None, desc="collection of spoke label descriptor objects"),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.strands
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(baseStyle, 'strokeWidth',0))
symbol = getattr(style, 'symbol', getattr(baseStyle, 'symbol',None))
ym = y+height/2.0
if fillColor is None and strokeColor is not None and strokeWidth>0:
bg = Line(x,ym,x+width,ym,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray)
elif fillColor is not None:
bg = Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
else:
bg = None
if symbol:
symbol = uSymbol2Symbol(symbol,x+width/2.,ym,color)
if bg:
g = Group()
g.add(bg)
g.add(symbol)
return g
return symbol or bg
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.strands[i],'name',default))
def __init__(self):
PlotArea.__init__(self)
self.data = [[10,12,14,16,14,12], [6,8,10,12,9,11]]
self.labels = None # or list of strings
self.labels = ['a','b','c','d','e','f']
self.startAngle = 90
self.direction = "clockwise"
self.strands = TypedPropertyCollection(StrandProperty)
self.spokes = TypedPropertyCollection(SpokeProperty)
self.spokeLabels = TypedPropertyCollection(SpokeLabel)
self.spokeLabels._text = None
self.strandLabels = TypedPropertyCollection(StrandLabel)
self.x = 10
self.y = 10
self.width = 180
self.height = 180
def demo(self):
d = Drawing(200, 200)
d.add(SpiderChart())
return d
def normalizeData(self, outer = 0.0):
"""Turns data into normalized ones where each datum is < 1.0,
and 1.0 = maximum radius. Adds 10% at outside edge by default"""
data = self.data
assert min(map(min,data)) >=0, "Cannot do spider plots of negative numbers!"
norm = max(map(max,data))
norm *= (1.0+outer)
if norm<1e-9: norm = 1.0
self._norm = norm
return [[e/norm for e in row] for row in data]
def _innerDrawLabel(self, sty, radius, cx, cy, angle, car, sar, labelClass=StrandLabel):
"Draw a label for a given item in the list."
fmt = sty.format
value = radius*self._norm
if not fmt:
text = None
elif isinstance(fmt,str):
if fmt == 'values':
text = sty._text
else:
text = fmt % value
elif callable(fmt):
text = fmt(value)
else:
raise ValueError("Unknown formatter type %s, expected string or function" % fmt)
if text:
dR = sty.dR
if dR:
radius += dR/self._radius
L = _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty)
if dR<0: L._anti = 1
else:
L = None
return L
def draw(self):
# normalize slice data
g = self.makeBackground() or Group()
xradius = self.width/2.0
yradius = self.height/2.0
self._radius = radius = min(xradius, yradius)
cx = self.x + xradius
cy = self.y + yradius
data = self.normalizeData()
self._seriesCount = len(data)
n = len(data[0])
#labels
if self.labels is None:
labels = [''] * n
else:
labels = self.labels
#there's no point in raising errors for less than enough errors if
#we silently create all for the extreme case of no labels.
i = n-len(labels)
if i>0:
labels = labels + ['']*i
S = []
STRANDS = []
STRANDAREAS = []
syms = []
labs = []
csa = []
angle = self.startAngle*pi/180
direction = self.direction == "clockwise" and -1 or 1
angleBetween = direction*(2 * pi)/float(n)
spokes = self.spokes
spokeLabels = self.spokeLabels
for i in xrange(n):
car = cos(angle)*radius
sar = sin(angle)*radius
csa.append((car,sar,angle))
si = self.spokes[i]
if si.visible:
spoke = Line(cx, cy, cx + car, cy + sar, strokeWidth = si.strokeWidth, strokeColor=si.strokeColor, strokeDashArray=si.strokeDashArray)
S.append(spoke)
sli = spokeLabels[i]
text = sli._text
if not text: text = labels[i]
if text:
S.append(_setupLabel(WedgeLabel, text, si.labelRadius, cx, cy, angle, car, sar, sli))
angle += angleBetween
# now plot the polygons
rowIdx = 0
strands = self.strands
strandLabels = self.strandLabels
for row in data:
# series plot
rsty = strands[rowIdx]
points = []
car, sar = csa[-1][:2]
r = row[-1]
points.append(cx+car*r)
points.append(cy+sar*r)
for i in xrange(n):
car, sar, angle = csa[i]
r = row[i]
points.append(cx+car*r)
points.append(cy+sar*r)
L = self._innerDrawLabel(strandLabels[(rowIdx,i)], r, cx, cy, angle, car, sar, labelClass=StrandLabel)
if L: labs.append(L)
sty = strands[(rowIdx,i)]
uSymbol = sty.symbol
# put in a marker, if it needs one
if uSymbol:
s_x = cx+car*r
s_y = cy+sar*r
s_fillColor = sty.fillColor
s_strokeColor = sty.strokeColor
s_strokeWidth = sty.strokeWidth
s_angle = 0
s_size = sty.symbolSize
if type(uSymbol) is type(''):
symbol = makeMarker(uSymbol,
size = s_size,
x = s_x,
y = s_y,
fillColor = s_fillColor,
strokeColor = s_strokeColor,
strokeWidth = s_strokeWidth,
angle = s_angle,
)
else:
symbol = uSymbol2Symbol(uSymbol,s_x,s_y,s_fillColor)
for k,v in (('size', s_size), ('fillColor', s_fillColor),
('x', s_x), ('y', s_y),
('strokeColor',s_strokeColor), ('strokeWidth',s_strokeWidth),
('angle',s_angle),):
if getattr(symbol,k,None) is None:
try:
setattr(symbol,k,v)
except:
pass
syms.append(symbol)
# make up the 'strand'
if rsty.fillColor:
strand = Polygon(points)
strand.fillColor = rsty.fillColor
strand.strokeColor = None
strand.strokeWidth = 0
STRANDAREAS.append(strand)
if rsty.strokeColor and rsty.strokeWidth:
strand = PolyLine(points)
strand.strokeColor = rsty.strokeColor
strand.strokeWidth = rsty.strokeWidth
strand.strokeDashArray = rsty.strokeDashArray
STRANDS.append(strand)
rowIdx += 1
map(g.add,STRANDAREAS+STRANDS+syms+S+labs)
return g
def sample1():
"Make a simple spider chart"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['a','b','c','d','e','f']
sp.strands[0].strokeColor = colors.cornsilk
sp.strands[1].strokeColor = colors.cyan
sp.strands[2].strokeColor = colors.palegreen
sp.strands[0].fillColor = colors.cornsilk
sp.strands[1].fillColor = colors.cyan
sp.strands[2].fillColor = colors.palegreen
sp.spokes.strokeDashArray = (2,2)
d.add(sp)
return d
def sample2():
"Make a spider chart with markers, but no fill"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['U','V','W','X','Y','Z']
sp.strands.strokeWidth = 1
sp.strands[0].fillColor = colors.pink
sp.strands[1].fillColor = colors.lightblue
sp.strands[2].fillColor = colors.palegreen
sp.strands[0].strokeColor = colors.red
sp.strands[1].strokeColor = colors.blue
sp.strands[2].strokeColor = colors.green
sp.strands.symbol = "FilledDiamond"
sp.strands[1].symbol = makeMarker("Circle")
sp.strands[1].symbol.strokeWidth = 0.5
sp.strands[1].symbol.fillColor = colors.yellow
sp.strands.symbolSize = 6
sp.strandLabels[0,3]._text = 'special'
sp.strandLabels[0,1]._text = 'one'
sp.strandLabels[0,0]._text = 'zero'
sp.strandLabels[1,0]._text = 'Earth'
sp.strandLabels[2,2]._text = 'Mars'
sp.strandLabels.format = 'values'
sp.strandLabels.dR = -5
d.add(sp)
return d
if __name__=='__main__':
d = sample1()
from reportlab.graphics.renderPDF import drawToFile
drawToFile(d, 'spider.pdf')
d = sample2()
drawToFile(d, 'spider2.pdf')
|
[
"devnull@localhost"
] |
devnull@localhost
|
ff1207d7894df22fdaa96e181578ed8ce57c263f
|
3edb81366059a3dcb767b7b15476f264fad788e8
|
/submit.py
|
c394ced46f734942c3ba9b60ee354f0c8650cf22
|
[] |
no_license
|
enucatl-phd/sinogram_batch_jobs
|
91179c1a1ec3cc7869b0e0010977ce8b95d14517
|
a5c06b8992b1ad2ed651277c1f54229847b7cc44
|
refs/heads/master
| 2023-08-07T20:32:58.236996 | 2017-11-23T13:17:48 | 2017-11-23T13:17:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,443 |
py
|
import subprocess
import os.path
folders = [
"KO202_LL_control_1",
"KO202_LL_control_2",
"KO202_LL_control_3",
"KO203_LL_control_1",
"KO203_LL_control_2",
"KO203_LL_control_3",
"ko373_LL_smoke_1",
"ko373_LL_smoke_2",
"ko373_LL_smoke_3",
"WT223_LL_control_1",
"WT223_LL_control_2",
"WT223_LL_control_3",
"WT224_LL_control_1",
"WT224_LL_control_2",
"WT224_LL_control_3",
"WT256_LL_smoke_1",
"WT256_LL_smoke_2",
"WT256_LL_smoke_3",
"WT256_LL_smoke_4",
"WT353_LL_smoke_1",
"WT353_LL_smoke_2",
"WT353_LL_smoke_3",
"WT355_LL_smoke_1",
"WT355_LL_smoke_2",
"WT355_LL_smoke_3"
]
for folder in folders:
if os.path.isdir(
os.path.join(
"..",
folder,
"sin")):
continue
command = "prj2sinSGE -d -C -f 1801,30,100,0,0 -I 1 -p {0}####.tif --jobname={0}_fltp --queue=tomcat_offline.q -Y 11.999,0.65E-6,3.7e-8,2.3e-10,0.008 -g 3 -o /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/fltp/ /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/tif/;prj2sinSGE -d -g 0 -I 0 -f 1801,0,0,0,0 -k 1 --hold={0}_fltp --jobname={0}_sin --queue=tomcat_offline.q -j 50 -p {0}####.fltp.DMP -o /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/sin/ /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/fltp/;"
print(command)
subprocess.call(command.format(folder), shell=True)
|
[
"[email protected]"
] | |
652d7a8d6ec191c18ef763835a9eb827497e9673
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_r_ldos1.py
|
5f1ad0f6631db49e3f0ee25cbced343e39d89476
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from xcp2k.inputsection import InputSection
class _r_ldos1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.List = []
self.Xrange = None
self.Yrange = None
self.Zrange = None
self.Erange = None
self._name = "R_LDOS"
self._keywords = {'Xrange': 'XRANGE', 'Yrange': 'YRANGE', 'Zrange': 'ZRANGE', 'Erange': 'ERANGE'}
self._repeated_keywords = {'List': 'LIST'}
|
[
"[email protected]"
] | |
16faddad943bf0bc84ce7a1e8f6aabb41a63554c
|
45fdc51cf264bbd50e59655440eefc91451c50ea
|
/urlib.parse/urllib_parse_unquote.py
|
51e2760854007cbf12352e7d1838aee4ad0eea3b
|
[] |
no_license
|
blindij/python3_stl
|
2163043f3a9113eac21a48a35685a4a01987e926
|
ea138e25f8b5bbf7d8f78e4b1b7e2ae413de4735
|
refs/heads/master
| 2021-12-24T20:37:54.055116 | 2021-09-29T13:37:38 | 2021-09-29T13:37:38 | 191,508,648 | 0 | 0 | null | 2019-08-27T15:45:53 | 2019-06-12T06:10:30 |
Python
|
UTF-8
|
Python
| false | false | 176 |
py
|
from urllib.parse import unquote, unquote_plus
print(unquote('http%3A//localhost%3A8080/%7Ehellmann/'))
print(unquote_plus('http%3A%2F%2Flocalhost%3A8080%2F%7Ehellmann%2F'
))
|
[
"[email protected]"
] | |
a219f316ee477f4840ce21fdc1d506deb1f5a87a
|
a12c090eb57da4c8e1f543a1a9d497abad763ccd
|
/django-stubs/contrib/staticfiles/management/commands/runserver.pyi
|
9542443c2c7afc7adf315bec00bc6da4dd914096
|
[
"BSD-3-Clause"
] |
permissive
|
debuggerpk/django-stubs
|
be12eb6b43354a18675de3f70c491e534d065b78
|
bbdaebb244bd82544553f4547157e4f694f7ae99
|
refs/heads/master
| 2020-04-04T08:33:52.358704 | 2018-09-26T19:32:19 | 2018-09-26T19:32:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
pyi
|
from typing import Any, Optional
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.base import CommandParser
from django.core.management.commands.runserver import \
Command as RunserverCommand
class Command(RunserverCommand):
stderr: django.core.management.base.OutputWrapper
stdout: django.core.management.base.OutputWrapper
style: django.core.management.color.Style
help: str = ...
def add_arguments(self, parser: CommandParser) -> None: ...
def get_handler(self, *args: Any, **options: Any) -> StaticFilesHandler: ...
|
[
"[email protected]"
] | |
0f21ceecb121a2f278245d0803241f774157d555
|
048667e8ded719bb1221d1c9d161df248194cc6e
|
/kgtk/io/kgtkwriter.py
|
8f3941fa152e940c1b7ddb8648820966ea3af22f
|
[
"MIT"
] |
permissive
|
dgarijo/kgtk
|
bda357ddae77082e697111368ddbaaca3b85e900
|
f624754e91afbad8d28006e716189b43d367ef04
|
refs/heads/master
| 2022-11-12T08:53:20.497440 | 2020-06-26T19:17:53 | 2020-06-26T19:17:53 | 275,201,425 | 0 | 0 |
MIT
| 2020-06-26T16:33:41 | 2020-06-26T16:33:40 | null |
UTF-8
|
Python
| false | false | 30,943 |
py
|
"""
Write a KGTK edge or node file in TSV format.
"""
from argparse import ArgumentParser
import attr
import bz2
from enum import Enum
import gzip
import json
import lz4 # type: ignore
import lzma
from pathlib import Path
from multiprocessing import Queue
import sys
import typing
from kgtk.kgtkformat import KgtkFormat
from kgtk.io.kgtkbase import KgtkBase
from kgtk.io.kgtkreader import KgtkReader
from kgtk.utils.enumnameaction import EnumNameAction
from kgtk.utils.gzipprocess import GzipProcess
from kgtk.utils.validationaction import ValidationAction
@attr.s(slots=True, frozen=False)
class KgtkWriter(KgtkBase):
GZIP_QUEUE_SIZE_DEFAULT: int = GzipProcess.GZIP_QUEUE_SIZE_DEFAULT
# TODO: use an enum
OUTPUT_FORMAT_CSV: str = "csv"
OUTPUT_FORMAT_JSON: str = "json"
OUTPUT_FORMAT_JSON_MAP: str = "json-map"
OUTPUT_FORMAT_JSON_MAP_COMPACT: str = "json-map-compact"
OUTPUT_FORMAT_JSONL: str = "jsonl"
OUTPUT_FORMAT_JSONL_MAP: str = "jsonl-map"
OUTPUT_FORMAT_JSONL_MAP_COMPACT: str = "jsonl-map-compact"
OUTPUT_FORMAT_KGTK: str = "kgtk"
OUTPUT_FORMAT_MD: str = "md"
OUTPUT_FORMAT_CHOICES: typing.List[str] = [
OUTPUT_FORMAT_CSV,
OUTPUT_FORMAT_JSON,
OUTPUT_FORMAT_JSON_MAP,
OUTPUT_FORMAT_JSON_MAP_COMPACT,
OUTPUT_FORMAT_JSONL,
OUTPUT_FORMAT_JSONL_MAP,
OUTPUT_FORMAT_JSONL_MAP_COMPACT,
OUTPUT_FORMAT_KGTK,
OUTPUT_FORMAT_MD,
]
OUTPUT_FORMAT_DEFAULT: str = OUTPUT_FORMAT_KGTK
file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)))
file_out: typing.TextIO = attr.ib() # Todo: validate TextIO
column_separator: str = attr.ib(validator=attr.validators.instance_of(str))
column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
column_name_map: typing.Mapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int)))
# Use these names in the output file, but continue to use
# column_names for shuffle lists.
output_column_names: typing.List[str] = \
attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
# For convenience, the count of columns. This is the same as len(column_names).
column_count: int = attr.ib(validator=attr.validators.instance_of(int))
# Require or fill trailing fields?
require_all_columns: bool = attr.ib(validator=attr.validators.instance_of(bool))
prohibit_extra_columns: bool = attr.ib(validator=attr.validators.instance_of(bool))
fill_missing_columns: bool = attr.ib(validator=attr.validators.instance_of(bool))
# How should header errors be processed?
error_file: typing.TextIO = attr.ib(default=sys.stderr)
header_error_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXIT)
# Other implementation options?
gzip_in_parallel: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
gzip_thread: typing.Optional[GzipProcess] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(GzipProcess)), default=None)
gzip_queue_size: int = attr.ib(validator=attr.validators.instance_of(int), default=GZIP_QUEUE_SIZE_DEFAULT)
output_format: str = attr.ib(validator=attr.validators.instance_of(str), default=OUTPUT_FORMAT_DEFAULT) # TODO: use an enum
line_count: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
class Mode(Enum):
"""
There are four file writing modes:
"""
NONE = 0 # Enforce neither edge nor node file required columns
EDGE = 1 # Enforce edge file required columns
NODE = 2 # Enforce node file required columns
AUTO = 3 # Automatically decide whether to enforce edge or node file required columns
@classmethod
def open(cls,
column_names: typing.List[str],
file_path: typing.Optional[Path],
who: str = "output",
require_all_columns: bool = True,
prohibit_extra_columns: bool = True,
fill_missing_columns: bool = False,
error_file: typing.TextIO = sys.stderr,
header_error_action: ValidationAction = ValidationAction.EXIT,
gzip_in_parallel: bool = False,
gzip_queue_size: int = GZIP_QUEUE_SIZE_DEFAULT,
column_separator: str = KgtkFormat.COLUMN_SEPARATOR,
mode: Mode = Mode.AUTO,
output_format: typing.Optional[str] = None,
output_column_names: typing.Optional[typing.List[str]] = None,
old_column_names: typing.Optional[typing.List[str]] = None,
new_column_names: typing.Optional[typing.List[str]] = None,
verbose: bool = False,
very_verbose: bool = False)->"KgtkWriter":
if file_path is None or str(file_path) == "-":
if verbose:
print("KgtkWriter: writing stdout", file=error_file, flush=True)
return cls._setup(column_names=column_names,
file_path=None,
who=who,
file_out=sys.stdout,
require_all_columns=require_all_columns,
prohibit_extra_columns=prohibit_extra_columns,
fill_missing_columns=fill_missing_columns,
error_file=error_file,
header_error_action=header_error_action,
gzip_in_parallel=gzip_in_parallel,
gzip_queue_size=gzip_queue_size,
column_separator=column_separator,
mode=mode,
output_format=output_format,
output_column_names=output_column_names,
old_column_names=old_column_names,
new_column_names=new_column_names,
verbose=verbose,
very_verbose=very_verbose,
)
if verbose:
print("File_path.suffix: %s" % file_path.suffix, file=error_file, flush=True)
if file_path.suffix in [".gz", ".bz2", ".xz", ".lz4"]:
# TODO: find a better way to coerce typing.IO[Any] to typing.TextIO
gzip_file: typing.TextIO
if file_path.suffix == ".gz":
if verbose:
print("KgtkWriter: writing gzip %s" % str(file_path), file=error_file, flush=True)
gzip_file = gzip.open(file_path, mode="wt") # type: ignore
elif file_path.suffix == ".bz2":
if verbose:
print("KgtkWriter: writing bz2 %s" % str(file_path), file=error_file, flush=True)
gzip_file = bz2.open(file_path, mode="wt") # type: ignore
elif file_path.suffix == ".xz":
if verbose:
print("KgtkWriter: writing lzma %s" % str(file_path), file=error_file, flush=True)
gzip_file = lzma.open(file_path, mode="wt") # type: ignore
elif file_path.suffix ==".lz4":
if verbose:
print("KgtkWriter: writing lz4 %s" % str(file_path), file=error_file, flush=True)
gzip_file = lz4.frame.open(file_or_path, mode="wt") # type: ignore
else:
# TODO: throw a better exception.
raise ValueError("Unexpected file_path.suffiz = '%s'" % file_path.suffix)
return cls._setup(column_names=column_names,
file_path=file_path,
who=who,
file_out=gzip_file,
require_all_columns=require_all_columns,
prohibit_extra_columns=prohibit_extra_columns,
fill_missing_columns=fill_missing_columns,
error_file=error_file,
header_error_action=header_error_action,
gzip_in_parallel=gzip_in_parallel,
gzip_queue_size=gzip_queue_size,
column_separator=column_separator,
mode=mode,
output_format=output_format,
output_column_names=output_column_names,
old_column_names=old_column_names,
new_column_names=new_column_names,
verbose=verbose,
very_verbose=very_verbose,
)
else:
if output_format is None:
# TODO: optionally stack these on top of compression
if file_path.suffix == ".md":
output_format = "md"
elif file_path.suffix == ".csv":
output_format = "csv"
elif file_path.suffix == ".json":
output_format = "json"
elif file_path.suffix == ".jsonl":
output_format = "jsonl"
else:
output_format = "kgtk"
if verbose:
print("KgtkWriter: writing file %s" % str(file_path), file=error_file, flush=True)
return cls._setup(column_names=column_names,
file_path=file_path,
who=who,
file_out=open(file_path, "w"),
require_all_columns=require_all_columns,
prohibit_extra_columns=prohibit_extra_columns,
fill_missing_columns=fill_missing_columns,
error_file=error_file,
header_error_action=header_error_action,
gzip_in_parallel=gzip_in_parallel,
gzip_queue_size=gzip_queue_size,
column_separator=column_separator,
mode=mode,
output_format=output_format,
output_column_names=output_column_names,
old_column_names=old_column_names,
new_column_names=new_column_names,
verbose=verbose,
very_verbose=very_verbose,
)
@classmethod
def _setup(cls,
column_names: typing.List[str],
file_path: typing.Optional[Path],
who: str,
file_out: typing.TextIO,
require_all_columns: bool,
prohibit_extra_columns: bool,
fill_missing_columns: bool,
error_file: typing.TextIO,
header_error_action: ValidationAction,
gzip_in_parallel: bool,
gzip_queue_size: int,
column_separator: str,
mode: Mode = Mode.AUTO,
output_format: typing.Optional[str] = None,
output_column_names: typing.Optional[typing.List[str]] = None,
old_column_names: typing.Optional[typing.List[str]] = None,
new_column_names: typing.Optional[typing.List[str]] = None,
verbose: bool = False,
very_verbose: bool = False,
)->"KgtkWriter":
if output_format is None:
output_format = cls.OUTPUT_FORMAT_DEFAULT
if verbose:
print("Defaulting the output format to %s" % output_format, file=error_file, flush=True)
if output_format == cls.OUTPUT_FORMAT_CSV:
column_separator = "," # What a cheat!
if output_column_names is None:
output_column_names = column_names
else:
# Rename all output columns.
if len(output_column_names) != len(column_names):
raise ValueError("%s: %d column names but %d output column names" % (who, len(column_names), len(output_column_names)))
if old_column_names is not None or new_column_names is not None:
# Rename selected output columns:
if old_column_names is None or new_column_names is None:
raise ValueError("%s: old/new column name mismatch" % who)
if len(old_column_names) != len(new_column_names):
raise ValueError("%s: old/new column name length mismatch: %d != %d" % (who, len(old_column_names), len(new_column_names)))
# Rename columns in place. Start by copyin the output column name
# list so the changes don't inadvertantly propogate.
output_column_names = output_column_names.copy()
column_name: str
idx: int
for idx, column_name in enumerate(old_column_names):
if column_name not in output_column_names:
raise ValueError("%s: old column names %s not in the output column names." % (who, column_name))
output_column_names[output_column_names.index(column_name)] = new_column_names[idx]
# Build a map from column name to column index. This is used for
# self.writemap(...) and self.build_shuffle_list(...)
column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names,
header_line=column_separator.join(column_names),
who=who,
error_action=header_error_action,
error_file=error_file)
# Build a header line for error feedback:
header: str = column_separator.join(output_column_names)
# Build a map from output column name to column index.
output_column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(output_column_names,
header_line=header,
who=who,
error_action=header_error_action,
error_file=error_file)
# Should we automatically determine if this is an edge file or a node file?
is_edge_file: bool = False
is_node_file: bool = False
if mode is KgtkWriter.Mode.AUTO:
# If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file.
node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES, output_column_name_map,
header_line=header,
who=who,
error_action=header_error_action,
error_file=error_file,
is_optional=True)
is_edge_file = node1_idx >= 0
is_node_file = not is_edge_file
elif mode is KgtkWriter.Mode.EDGE:
is_edge_file = True
elif mode is KgtkWriter.Mode.NODE:
is_node_file = True
elif mode is KgtkWriter.Mode.NONE:
pass
# Validate that we have the proper columns for an edge or node file,
# ignoring the result.
cls.get_special_columns(output_column_name_map,
header_line=header,
who=who,
error_action=header_error_action,
error_file=error_file,
is_edge_file=is_edge_file,
is_node_file=is_node_file)
gzip_thread: typing.Optional[GzipProcess] = None
if gzip_in_parallel:
if verbose:
print("Starting the gzip process.", file=error_file, flush=True)
gzip_thread = GzipProcess(file_out, Queue(gzip_queue_size))
gzip_thread.start()
kw: KgtkWriter = cls(file_path=file_path,
file_out=file_out,
column_separator=column_separator,
column_names=column_names,
column_name_map=column_name_map,
column_count=len(column_names),
require_all_columns=require_all_columns,
prohibit_extra_columns=prohibit_extra_columns,
fill_missing_columns=fill_missing_columns,
error_file=error_file,
header_error_action=header_error_action,
gzip_in_parallel=gzip_in_parallel,
gzip_thread=gzip_thread,
gzip_queue_size=gzip_queue_size,
output_format=output_format,
output_column_names=output_column_names,
line_count=1,
verbose=verbose,
very_verbose=very_verbose,
)
kw.write_header()
return kw
def join_csv(self, values: typing.List[str])->str:
line: str = ""
value: str
for value in values:
if '"' in value or ',' in value:
value = '"' + '""'.join(value.split('"')) + '"'
if len(line) > 0:
line += ","
line += value
return line
def join_md(self, values: typing.List[str])->str:
line: str = "|"
value: str
for value in values:
value = "\\|".join(value.split("|"))
line += " " + value + " |"
return line
def json_map(self, values: typing.List[str], compact: bool = False)->typing.Mapping[str, str]:
result: typing.MutableMapping[str, str] = { }
idx: int
value: str
for idx, value in enumerate(values):
if len(value) > 0 or not compact:
result[self.output_column_names[idx]] = value
return result
def write_header(self):
header: str
header2: typing.Optional[str] = None
# Contemplate a last-second rename of the columns
column_names: typing.List[str]
if self.output_column_names is not None:
column_names = self.output_column_names
else:
column_names = self.column_names
if self.output_format == self.OUTPUT_FORMAT_JSON:
self.writeline("[")
header = json.dumps(column_names, indent=None, separators=(',', ':')) + ","
elif self.output_format == self.OUTPUT_FORMAT_JSON_MAP:
self.writeline("[")
return
elif self.output_format == self.OUTPUT_FORMAT_JSON_MAP_COMPACT:
self.writeline("[")
return
elif self.output_format == self.OUTPUT_FORMAT_JSONL:
header = json.dumps(column_names, indent=None, separators=(',', ':'))
elif self.output_format == self.OUTPUT_FORMAT_JSONL_MAP:
return
elif self.output_format == self.OUTPUT_FORMAT_JSONL_MAP_COMPACT:
return
elif self.output_format == self.OUTPUT_FORMAT_MD:
header = "|"
header2 = "|"
col: str
for col in column_names:
col = "\\|".join(col.split("|"))
header += " " + col + " |"
header2 += " -- |"
elif self.output_format in [self.OUTPUT_FORMAT_KGTK, self.OUTPUT_FORMAT_CSV]:
header = self.column_separator.join(column_names)
else:
raise ValueError("KgtkWriter: header: Unrecognized output format '%s'." % self.output_format)
# Write the column names to the first line.
if self.verbose:
print("header: %s" % header, file=self.error_file, flush=True)
self.writeline(header)
if header2 is not None:
self.writeline(header2)
def writeline(self, line: str):
if self.gzip_thread is not None:
self.gzip_thread.write(line + "\n") # Todo: use system end-of-line sequence?
else:
self.file_out.write(line + "\n") # Todo: use system end-of-line sequence?
# Write the next list of edge values as a list of strings.
# TODO: Convert integers, coordinates, etc. from Python types
def write(self, values: typing.List[str],
shuffle_list: typing.Optional[typing.List[int]]= None):
if shuffle_list is not None:
if len(shuffle_list) != len(values):
# TODO: throw a better exception
raise ValueError("The shuffle list is %d long but the values are %d long" % (len(shuffle_list), len(values)))
shuffled_values: typing.List[str] = [""] * self.column_count
idx: int
for idx in range(len(shuffle_list)):
shuffle_idx: int = shuffle_list[idx]
if shuffle_idx >= 0:
shuffled_values[shuffle_idx] = values[idx]
values = shuffled_values
# Optionally fill missing trailing columns with empty values:
if self.fill_missing_columns and len(values) < self.column_count:
while len(values) < self.column_count:
values.append("")
# Optionally validate that the line contained the right number of columns:
#
# When we report line numbers in error messages, line 1 is the first line after the header line.
line: str
if self.require_all_columns and len(values) < self.column_count:
line = self.column_separator.join(values)
raise ValueError("Required %d columns in input line %d, saw %d: '%s'" % (self.column_count, self.line_count, len(values), line))
if self.prohibit_extra_columns and len(values) > self.column_count:
line = self.column_separator.join(values)
raise ValueError("Required %d columns in input line %d, saw %d (%d extra): '%s'" % (self.column_count, self.line_count, len(values),
len(values) - self.column_count, line))
if self.output_format == self.OUTPUT_FORMAT_KGTK:
self.writeline(self.column_separator.join(values))
elif self.output_format == self.OUTPUT_FORMAT_CSV:
self.writeline(self.join_csv(values))
elif self.output_format == self.OUTPUT_FORMAT_MD:
self.writeline(self.join_md(values))
elif self.output_format == self.OUTPUT_FORMAT_JSON:
self.writeline(json.dumps(values, indent=None, separators=(',', ':')) + ",")
elif self.output_format == self.OUTPUT_FORMAT_JSON_MAP:
self.writeline(json.dumps(self.json_map(values), indent=None, separators=(',', ':')) + ",")
elif self.output_format == self.OUTPUT_FORMAT_JSON_MAP_COMPACT:
self.writeline(json.dumps(self.json_map(values, compact=True), indent=None, separators=(',', ':')) + ",")
elif self.output_format == self.OUTPUT_FORMAT_JSONL:
self.writeline(json.dumps(values, indent=None, separators=(',', ':')))
elif self.output_format == self.OUTPUT_FORMAT_JSONL_MAP:
self.writeline(json.dumps(self.json_map(values), indent=None, separators=(',', ':')))
elif self.output_format == self.OUTPUT_FORMAT_JSONL_MAP_COMPACT:
self.writeline(json.dumps(self.json_map(values, compact=True), indent=None, separators=(',', ':')))
else:
raise ValueError("Unrecognized output format '%s'." % self.output_format)
self.line_count += 1
if self.very_verbose:
sys.stdout.write(".")
sys.stdout.flush()
def flush(self):
if self.gzip_thread is None:
self.file_out.flush()
def close(self):
if self.output_format == "json":
if self.verbose:
print("Closing the JSON list.", file=self.error_file, flush=True)
self.writeline("]")
if self.gzip_thread is not None:
self.gzip_thread.close()
else:
self.file_out.close()
def writemap(self, value_map: typing.Mapping[str, str]):
"""
Write a map of values to the output file.
"""
column_name: str
# Optionally check for unexpected column names:
if self.prohibit_extra_columns:
for column_name in value_map.keys():
if column_name not in self.column_name_map:
raise ValueError("Unexpected column name %s at data record %d" % (column_name, self.line_count))
values: typing.List[str] = [ ]
for column_name in self.column_names:
if column_name in value_map:
values.append(value_map[column_name])
elif self.require_all_columns:
# TODO: throw a better exception.
raise ValueError("Missing column %s at data record %d" % (column_name, self.line_count))
else:
values.append("")
self.write(values)
def build_shuffle_list(self,
other_column_names: typing.List[str],
fail_on_unknown_column: bool = False)->typing.List[int]:
results: typing.List[int] = [ ]
column_name: str
for column_name in other_column_names:
if column_name in self.column_name_map:
results.append(self.column_name_map[column_name])
elif fail_on_unknown_column:
# TODO: throw a better exception
raise ValueError("Unknown column name %s when building shuffle list" % column_name)
else:
results.append(-1) # Means skip this column.
return results
def main():
"""
Test the KGTK edge file writer.
TODO: full reader options.
TODO: --show-options
"""
parser = ArgumentParser()
parser.add_argument(dest="input_kgtk_file", help="The KGTK file to read", type=Path, nargs="?")
parser.add_argument(dest="output_kgtk_file", help="The KGTK file to write", type=Path, nargs="?")
parser.add_argument( "--header-error-action", dest="header_error_action",
help="The action to take when a header error is detected Only ERROR or EXIT are supported.",
type=ValidationAction, action=EnumNameAction, default=ValidationAction.EXIT)
parser.add_argument( "--gzip-in-parallel", dest="gzip_in_parallel", help="Execute gzip in a subthread.", action='store_true')
parser.add_argument( "--input-mode", dest="input_mode",
help="Determine the input KGTK file mode.", type=KgtkReader.Mode, action=EnumNameAction, default=KgtkReader.Mode.AUTO)
parser.add_argument( "--output-mode", dest="output_mode",
help="Determine the output KGTK file mode.", type=KgtkWriter.Mode, action=EnumNameAction, default=KgtkWriter.Mode.AUTO)
parser.add_argument( "--output-format", dest="output_format", help="The file format (default=kgtk)", type=str)
parser.add_argument( "--output-columns", dest="output_column_names", help="Rename all output columns. (default=%(default)s)", type=str, nargs='+')
parser.add_argument( "--old-columns", dest="old_column_names", help="Rename seleted output columns: old names. (default=%(default)s)", type=str, nargs='+')
parser.add_argument( "--new-columns", dest="new_column_names", help="Rename seleted output columns: new names. (default=%(default)s)", type=str, nargs='+')
parser.add_argument("-v", "--verbose", dest="verbose", help="Print additional progress messages.", action='store_true')
parser.add_argument( "--very-verbose", dest="very_verbose", help="Print additional progress messages.", action='store_true')
args = parser.parse_args()
error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr
kr: KgtkReader = KgtkReader.open(args.input_kgtk_file,
error_file=error_file,
header_error_action=args.header_error_action,
gzip_in_parallel=args.gzip_in_parallel,
mode=args.input_mode,
verbose=args.verbose, very_verbose=args.very_verbose)
kw: KgtkWriter = KgtkWriter.open(kr.column_names,
args.output_kgtk_file,
error_file=error_file,
gzip_in_parallel=args.gzip_in_parallel,
header_error_action=args.header_error_action,
mode=args.output_mode,
output_format=args.output_format,
output_column_names=args.output_column_names,
old_column_names=args.old_column_names,
new_column_names=args.new_column_names,
verbose=args.verbose, very_verbose=args.very_verbose)
line_count: int = 0
row: typing.List[str]
for row in kr:
kw.write(row)
line_count += 1
kw.close()
if args.verbose:
print("Copied %d lines" % line_count, file=error_file, flush=True)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
697a3885eff485ce7088da0fb99a37de47a132fb
|
3e5b0278bb8f7c221c5d3478c0c54cae81123799
|
/database/ingestFiesResults.py
|
987ae5355dae68d3b9457d1671382a5f75d3ee99
|
[] |
no_license
|
jmccormac01/NOT
|
717e8ecc7c157eedf320d87b796010f2cad97dd9
|
3463accce62848142dede0026fa27aba4366f45b
|
refs/heads/master
| 2021-01-18T23:52:22.899766 | 2017-05-03T09:08:42 | 2017-05-03T09:08:42 | 54,653,166 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,211 |
py
|
"""
Script to ingest the results files from fiespipe.py
This is a copy of the ingestCafeResults.py script
Results file has the following structure:
0- Object name
1- MBJD
2- RV
3- error in RV
4- Bisector span
5- error in bisector span
6- instrument
7- pipeline
8- resolving power
9- Efective Temperture
10- log(g)
11- [Fe/H]
12- v*sin(i)
13- value of the continuum normalized CCF at it lowest point
14- standard deviation of the gaussian fitted to the CCF
15- Exposure time
16- Signal to noise ratio at ~5150A
17- path to the CCF plot file
"""
import os
import sys
import argparse as ap
import pymysql
# pylint: disable = invalid-name
def argParse():
"""
Parse the command line arguments
"""
parser = ap.ArgumentParser()
parser.add_argument('--ingest',
help='Ingest the results to the database',
action='store_true')
return parser.parse_args()
RESULTS_FILE = 'results.txt'
if __name__ == '__main__':
args = argParse()
db = pymysql.connect(host='localhost',
db='eblm',
password='mysqlpassword')
if os.path.exists(RESULTS_FILE):
night = os.getcwd().split('/')[-2].split('_')[1]
night = "{}-{}-{}".format(night[:4], night[4:6], night[6:])
print(night)
f = open(RESULTS_FILE).readlines()
for line in f:
ls = line.rstrip().split()
if len(ls) != 18:
print('ERROR: Wrong number of columns in results.txt')
sys.exit(1)
obj = ls[0]
if obj.startswith('1SWASP'):
swasp_id = obj
else:
swasp_id = None
bjd_mid = ls[1]
mask_velocity = ls[2]
mask_velocity_err = ls[3]
bisector = ls[4]
bisector_err = ls[5]
mask_ccf_height = ls[13]
mask_ccf_fwhm = ls[14]
snr_5150 = ls[16]
pdf_name = ls[17].split('/')[-1]
image_id = '{}.fits'.format(pdf_name.split('.')[0])
mask = pdf_name.split('.')[-2].split('_')[-1]
qry = """
REPLACE INTO eblm_fies (
image_id, swasp_id, object_name,
bjd_mid, mask, mask_velocity,
mask_velocity_err, mask_ccf_height,
mask_ccf_fwhm, bisector,
bisector_err, snr_5150, night, analyse
)
VALUES (
'{}', '{}', '{}', {}, '{}', {},
{}, {}, {}, {}, {}, {}, '{}', 1
)
""".format(image_id, swasp_id, obj,
bjd_mid, mask, mask_velocity,
mask_velocity_err, mask_ccf_height,
mask_ccf_fwhm, bisector,
bisector_err, snr_5150, night)
print(qry)
if args.ingest:
with db.cursor() as cur:
cur.execute(qry)
db.commit()
else:
print('{} not found...'.format(RESULTS_FILE))
|
[
"[email protected]"
] | |
9c1c35fa401ea152589015a7a13ebf1c10fc1825
|
628ab6e412e7c4c755bc42d8137acd3da2d4be0e
|
/tests/type/test_type_util.py
|
75c136b72074efd78daecd55cfe6045a1eecb8c4
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
TrendingTechnology/apysc
|
ffd7d9b558707b934c5df127eca817d4f12d619b
|
5c6a4674e2e9684cb2cb1325dc9b070879d4d355
|
refs/heads/main
| 2023-06-01T20:19:20.835539 | 2021-06-20T03:53:33 | 2021-06-20T03:53:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,307 |
py
|
from apysc import Boolean
from apysc import Int
from apysc import Number
from apysc.type import type_util
def test_is_same_class_instance() -> None:
result: bool = type_util.is_same_class_instance(class_=bool, instance=1)
assert not result
result = type_util.is_same_class_instance(class_=int, instance=1)
assert result
def test_is_float_or_number() -> None:
result: bool = type_util.is_float_or_number(value=100.5)
assert result
result = type_util.is_float_or_number(value=Number(value=10.5))
assert result
result = type_util.is_float_or_number(value=100)
assert not result
result = type_util.is_float_or_number(value=Int(value=10))
assert not result
def test_is_number() -> None:
result: bool = type_util.is_number(value=Number(value=10.5))
assert result
result = type_util.is_number(value=10.5)
assert not result
result = type_util.is_number(value=Int(value=10))
assert not result
def test_is_bool() -> None:
result: bool = type_util.is_bool(value=True)
assert result
result = type_util.is_bool(value=False)
assert result
result = type_util.is_bool(value=Boolean(True))
assert result
result = type_util.is_bool(value=1)
assert not result
|
[
"[email protected]"
] | |
da350f298931965ee5690a173c730b6e1f634548
|
5407d32363d4806176c768ef7db65c8f7c9e7f72
|
/main.py
|
307959cadf45e33557f44f8dc1bf3447330b65d3
|
[] |
no_license
|
krishpranav/pyide
|
173efa96d8c7b50b2505c65a0562a4af64ab303f
|
587628367b0ab6535ad3ebd00850c56c33b5fcbf
|
refs/heads/master
| 2023-04-16T09:11:13.381777 | 2021-04-20T12:29:33 | 2021-04-20T12:29:33 | 359,804,202 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,792 |
py
|
#!/usr/bin/env/python3
#imports
from tkinter import *
from tkinter.filedialog import asksaveasfilename, askopenfilename
import subprocess
compiler = Tk()
compiler.title("IDE")
file_path = ''
def set_file_path(path):
global file_path
file_path = path
def open_file():
path = askopenfilename(filetypes=[('Python Files', '*.py')])
with open(path, 'r') as file:
code = file.read()
editor.delete('1.0', END)
editor.insert('1.0', code)
set_file_path(path)
def save_as():
if file_path == '':
path = asksaveasfilename(filetypes=[('Python Files', '*.py')])
else:
path = file_path
with open(path, 'w') as file:
code = editor.get('1.0', END)
file.write(code)
set_file_path(path)
def run():
if file_path == '':
save_prompt = Toplevel()
text = Label(save_prompt, text='Please save your code')
text.pack()
return
command = f'python {file_path}'
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
code_output.insert('1.0', output)
code_output.insert('1.0', error)
menu_bar = Menu(compiler)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Open', command=open_file)
file_menu.add_command(label='Save', command=save_as)
file_menu.add_command(label='Save As', command=save_as)
file_menu.add_command(label='Exit', command=exit)
menu_bar.add_cascade(label='File', menu=file_menu)
run_bar = Menu(menu_bar, tearoff=0)
run_bar.add_command(label='Run', command=run)
menu_bar.add_cascade(label='Run', menu=run_bar)
compiler.config(menu=menu_bar)
editor = Text()
editor.pack()
code_output = Text(height=10)
code_output.pack()
compiler.mainloop()
|
[
"[email protected]"
] | |
5de717f72acb11220ee83be9cd540c6e174a93de
|
bdbc9cd8c64cfa92efffb9e138cb282d36f69b0a
|
/addons/purchase/report/__init__.py
|
1277be8fbe1d1fdbcb3b28903e4f3fdecb7a1945
|
[] |
no_license
|
clebaresu/impra-adns
|
d330cece1b710643625627bfd7ed66bac7d233ef
|
8b9889d86c6ea194cfb7b0db8bdc3284635cc081
|
refs/heads/master
| 2020-05-02T16:51:41.798969 | 2019-03-27T22:03:32 | 2019-03-27T22:03:32 | 178,080,681 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,095 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"[email protected]"
] | |
2586557d647c829ce740f0b06ad560ee16dd0611
|
98383e62e61321f65450bb0fd901215ccbe6293b
|
/hanlp/components/mtl/tasks/tok/tag_tok.py
|
ef851e2cbd4ca8ccca15fed5edb30c0f11616c07
|
[
"Apache-2.0",
"CC-BY-NC-4.0",
"Python-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
leobert-lan/HanLP
|
155a66b5c93720abeb816616cb3b9ef4f7942e83
|
39c3ede99c3f99d7ea39bbbd470601dc7ef0ad62
|
refs/heads/master
| 2021-06-16T17:51:54.485767 | 2021-03-26T02:36:12 | 2021-03-26T02:36:12 | 178,836,942 | 1 | 0 |
Apache-2.0
| 2021-03-26T02:36:13 | 2019-04-01T10:09:21 |
Java
|
UTF-8
|
Python
| false | false | 10,190 |
py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-11 16:35
import logging
from typing import Dict, Any, Union, Iterable, List, Set
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import VocabDict, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.components.tokenizers.transformer import TransformerTaggingTokenizer
from hanlp.layers.crf.crf import CRF
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp.transform.transformer_tokenizer import TransformerSequenceTokenizer
from hanlp_common.util import merge_locals_kwargs
from hanlp_trie import DictInterface, TrieDict
class LinearCRFDecoder(torch.nn.Module):
def __init__(self,
hidden_size,
num_labels,
crf=False) -> None:
super().__init__()
self.classifier = torch.nn.Linear(hidden_size, num_labels)
self.crf = CRF(num_labels) if crf else None
def forward(self, contextualized_embeddings: torch.FloatTensor, batch: Dict[str, torch.Tensor], mask=None):
return self.classifier(contextualized_embeddings[:, 1:-1, :])
class TaggingTokenization(Task, TransformerTaggingTokenizer):
def __init__(self,
trn: str = None,
dev: str = None,
tst: str = None,
sampler_builder: SamplerBuilder = None,
dependencies: str = None,
scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=False,
lr=1e-3, separate_optimizer=False,
cls_is_bos=True,
sep_is_eos=True,
delimiter=None,
max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False,
transform=None,
tagging_scheme='BMES',
crf=False,
token_key='token',
dict_force: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,
dict_combine: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,
**kwargs) -> None:
"""Tokenization which casts a chunking problem into a tagging problem.
This task has to create batch of tokens containing both [CLS] and [SEP] since it's usually the first task
and later tasks might need them.
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
cls_is_bos: ``True`` to treat the first token as ``BOS``.
sep_is_eos: ``True`` to treat the last token as ``EOS``.
delimiter: Delimiter used to split a line in the corpus.
max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
transform: An optional transform to be applied to samples. Usually a character normalization transform is
passed in.
tagging_scheme: Either ``BMES`` or ``BI``.
crf: ``True`` to enable CRF (:cite:`lafferty2001conditional`).
token_key: The key to tokens in dataset. This should always be set to ``token`` in MTL.
**kwargs: Not used.
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs, excludes=(
'self', 'kwargs', '__class__', 'dict_force', 'dict_combine'))) # avoid to config
self.transform = transform
self.vocabs = VocabDict()
self.dict_force = dict_force
self.dict_combine = dict_combine
def build_dataloader(self, data, transform: TransformList = None, training=False, device=None,
logger: logging.Logger = None, cache=False, gradient_accumulation=1, **kwargs) -> DataLoader:
args = dict((k, self.config[k]) for k in
['delimiter', 'max_seq_len', 'sent_delimiter', 'char_level', 'hard_constraint'] if k in self.config)
# We only need those transforms before TransformerTokenizer
transformer_index = transform.index_by_type(TransformerSequenceTokenizer)
assert transformer_index is not None
transform = transform[:transformer_index + 1]
if self.transform:
transform.insert(0, self.transform)
transform.append(self.last_transform())
dataset = self.build_dataset(data, cache=cache, transform=transform, **args)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset, 'token_input_ids'),
shuffle=training, gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset)
def compute_loss(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
criterion) -> Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return TransformerTaggingTokenizer.compute_loss(self, criterion, output, batch['tag_id'], batch['mask'])
def decode_output(self, output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
mask: torch.BoolTensor, batch: Dict[str, Any], decoder, **kwargs) -> Union[Dict[str, Any], Any]:
return TransformerTaggingTokenizer.decode_output(self, output, mask, batch, decoder)
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any], metric: Union[MetricDict, Metric]):
TransformerTaggingTokenizer.update_metrics(self, metric, output, batch['tag_id'], None, batch, prediction)
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return LinearCRFDecoder(encoder_size, len(self.vocabs['tag']), self.config.crf)
def build_metric(self, **kwargs):
return TransformerTaggingTokenizer.build_metric(self)
def build_criterion(self, model=None, **kwargs):
return TransformerTaggingTokenizer.build_criterion(self, model=model, reduction='mean')
def input_is_flat(self, data) -> bool:
return TransformerTaggingTokenizer.input_is_flat(self, data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> Union[List, Dict]:
return TransformerTaggingTokenizer.prediction_to_human(self, prediction, None, batch, rebuild_span=True)
def build_tokenizer(self, tokenizer: TransformerSequenceTokenizer):
# The transform for tokenizer needs very special settings, ensure these settings are set properly.
return TransformerSequenceTokenizer(
tokenizer.tokenizer,
tokenizer.input_key,
tokenizer.output_key,
tokenizer.max_seq_length,
tokenizer.truncate_long_sequences,
ret_subtokens=True,
ret_subtokens_group=True,
ret_token_span=True,
cls_is_bos=True,
sep_is_eos=True,
use_fast=tokenizer.tokenizer.is_fast,
dict_force=self.dict_force,
strip_cls_sep=False,
)
def build_samples(self, inputs, cls_is_bos=False, sep_is_eos=False):
return [{self.config.token_key: sent} for sent in inputs]
@property
def dict_force(self) -> DictInterface:
return TransformerTaggingTokenizer.dict_force.fget(self)
@dict_force.setter
def dict_force(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
if dictionary is not None and not isinstance(dictionary, DictInterface):
dictionary = TrieDict(dictionary)
self.config.dict_force = dictionary
@property
def dict_combine(self) -> DictInterface:
return TransformerTaggingTokenizer.dict_combine.fget(self)
@dict_combine.setter
def dict_combine(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
# noinspection PyArgumentList
TransformerTaggingTokenizer.dict_combine.fset(self, dictionary)
def transform_batch(self, batch: Dict[str, Any], results: Dict[str, Any] = None, cls_is_bos=False,
sep_is_eos=False) -> Dict[str, Any]:
"""
This method is overrode to honor the zero indexed token used in custom dict. Although for a tokenizer,
cls_is_bos = sep_is_eos = True, its tokens don't contain [CLS] or [SEP]. This behaviour is adopted from the
early versions and it is better kept to avoid migration efforts.
Args:
batch: A batch of samples.
results: Predicted results from other tasks which might be useful for this task to utilize. Say a dep task
uses both token and pos as features, then it will need both tok and pos results to make a batch.
cls_is_bos: First token in this batch is BOS.
sep_is_eos: Last token in this batch is EOS.
Returns:
A batch.
"""
return batch
|
[
"[email protected]"
] | |
7b8818be9be235aca64718b52aeb09dd41aa1a45
|
8b060d38c63993a3259a80b072768206b558772b
|
/BlogApp/migrations/0016_user.py
|
cd8e9e162cda9e83045bd02588187a077f03409b
|
[] |
no_license
|
mortadagzar/Simple-Python-feedingTable
|
d8b0a2a06c1b3d78167241a6f60a2bb00fa9c4ce
|
716c68e6b9c55bd2dc8299ca14ccf39431cf0efb
|
refs/heads/master
| 2020-03-30T19:07:16.027807 | 2018-10-14T15:05:28 | 2018-10-14T15:05:28 | 151,529,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 642 |
py
|
# Generated by Django 2.1.1 on 2018-09-24 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0015_auto_20180922_1833'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('password', models.CharField(max_length=40)),
('email', models.TextField(blank=True, null=True)),
],
),
]
|
[
"[email protected]"
] | |
94406b4c0a5f5a0b725f7359720d14ae01e6dc47
|
7bb34b9837b6304ceac6ab45ce482b570526ed3c
|
/external/webkit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
|
79e4cf4a97b601f70dcf60ea9438ce8a379e69d8
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.1-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
ghsecuritylab/android_platform_sony_nicki
|
7533bca5c13d32a8d2a42696344cc10249bd2fd8
|
526381be7808e5202d7865aa10303cb5d249388a
|
refs/heads/master
| 2021-02-28T20:27:31.390188 | 2013-10-15T07:57:51 | 2013-10-15T07:57:51 | 245,730,217 | 0 | 0 |
Apache-2.0
| 2020-03-08T00:59:27 | 2020-03-08T00:59:26 | null |
UTF-8
|
Python
| false | false | 2,325 |
py
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import BuilderToPort, Rebaseline
from webkitpy.tool.mocktool import MockTool
class RebaselineTest(unittest.TestCase):
# This just makes sure the code runs without exceptions.
def test_tests_to_update(self):
command = Rebaseline()
command.bind_to_tool(MockTool())
build = Mock()
OutputCapture().assert_outputs(self, command._tests_to_update, [build])
class BuilderToPortTest(unittest.TestCase):
def test_port_for_builder(self):
converter = BuilderToPort()
port = converter.port_for_builder("Leopard Intel Debug (Tests)")
self.assertEqual(port.name(), "mac-leopard")
|
[
"[email protected]"
] | |
53d010f5a09590ee0504499dddb723f69908eed7
|
bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd
|
/neekanee/job_scrapers/plugins/com/link/successfactors.py
|
d9a5a27fd52a841ade57881fa9b838d7cd797500
|
[] |
no_license
|
thayton/neekanee
|
0890dd5e5cf5bf855d4867ae02de6554291dc349
|
f2b2a13e584469d982f7cc20b49a9b19fed8942d
|
refs/heads/master
| 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,150 |
py
|
import re, urlparse, mechanize
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'SuccessFactors',
'hq': 'San Francisco, CA',
'home_page_url': 'http://www.successfactors.com',
'jobs_page_url': 'http://jobs.successfactors.com/search',
'empcnt': [1001,5000]
}
class SuccessFactorsJobScraper(JobScraper):
def __init__(self):
super(SuccessFactorsJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
pageno = 2
while True:
s = soupify(self.br.response().read())
r = re.compile(r'^/job/[^/]+/\d+/$$')
t = s.find('table', id='searchresults')
x = {'class': 'jobLocation'}
for a in t.findAll('a', href=r):
tr = a.findParent('tr')
sp = tr.find('span', attrs=x)
l = self.parse_location(sp.text)
if not l:
continue
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), a['href'])
job.location = l
jobs.append(job)
try:
self.br.follow_link(self.br.find_link(text='Page %d' % pageno))
pageno += 1
break
except mechanize.LinkNotFoundError:
break
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
x = {'class': 'jobDisplay'}
d = s.find('div', attrs=x)
job.desc = get_all_text(d)
job.save()
def get_scraper():
return SuccessFactorsJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
|
[
"[email protected]"
] | |
480d955400267885fb5c52823d2d84eaa53fffa3
|
25427cf7ac5ae9f8e5d421e953750a46fb2d1ebc
|
/OldBoy/Day68/django_model_form/django_model_form/settings.py
|
6fb83e2edb64e357b4d4491ed8d44ad40dd0a810
|
[] |
no_license
|
povillechan/Python
|
d48e2e25c9961acef45162ca882b547e5b9d0b77
|
67e88d6d7bdbe49b0c5165d9b35f37dccf638877
|
refs/heads/master
| 2020-03-22T08:43:44.606336 | 2019-09-01T15:25:57 | 2019-09-01T15:25:57 | 139,786,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,482 |
py
|
"""
Django settings for django_model_form project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3k++_2zr*%tgs7#n*yrd(#s_44k$ak$!@m70(g)0vj2jb4h_h3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ['127.0.0.1',]
ROOT_URLCONF = 'django_model_form.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_model_form.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'default1': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db1.sqlite3'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
)
|
[
"[email protected]"
] | |
3b34c7ebcbedf568257311ee2f077aeaf90dd3a0
|
cccfb7be281ca89f8682c144eac0d5d5559b2deb
|
/tools/perf/page_sets/desktop_ui/desktop_ui_shared_state.py
|
0e9637b9690d4687b2cadd3870ee616d2e07c556
|
[
"LGPL-2.0-or-later",
"MPL-1.1",
"BSD-3-Clause",
"APSL-2.0",
"MIT",
"Zlib",
"GPL-2.0-only",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] |
permissive
|
SREERAGI18/chromium
|
172b23d07568a4e3873983bf49b37adc92453dd0
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
refs/heads/master
| 2023-08-27T17:45:48.928019 | 2021-11-11T22:24:28 | 2021-11-11T22:24:28 | 428,659,250 | 1 | 0 |
BSD-3-Clause
| 2021-11-16T13:08:14 | 2021-11-16T13:08:14 | null |
UTF-8
|
Python
| false | false | 415 |
py
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import shared_page_state
class DesktopUISharedState(shared_page_state.SharedPageState):
""" Ensures the browser is restarted for each test, for all platforms. """
def ShouldReuseBrowserForAllStoryRuns(self):
return False
|
[
"[email protected]"
] | |
767eead4cdb530f13ecf649af74c3c4ac48b756f
|
b2dc8aa865136a80bba964624c641a32f25d0aa8
|
/test/test_meta.py
|
fcb2c3168a0dbb9ffecc33cff92582de680252dd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
DoubleVII/pytorch
|
91425f16239dfdd48f5faa9ea1b8d2642f9aab6b
|
67eca7cd321f73aa8d6c1a76a7621d3db9a7c97e
|
refs/heads/master
| 2023-02-20T23:49:06.344426 | 2023-02-08T22:40:45 | 2023-02-08T22:40:45 | 274,839,708 | 1 | 0 |
NOASSERTION
| 2020-06-25T05:52:16 | 2020-06-25T05:52:15 | null |
UTF-8
|
Python
| false | false | 55,044 |
py
|
# Owner(s): ["module: primTorch"]
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
class TestMetaConverter(TestCase):
def assertSameVersionCounter(self, m1, m2):
# Cannot easily test m1 and m2 have same storage due to
# lack of Storage bindings. Use version counter.
vc = m1._version
self.assertEqual(m2._version, vc)
# Doing it this way ensures that we get VC bump even with leaves
with torch.no_grad():
m1._base.add_(3)
self.assertNotEqual(m1._version, vc)
self.assertEqual(m2._version, m1._version)
def assertMetadataMatches(self, m1, m2):
assert_metadata_eq(self.assertEqual, m1, m2)
def test_view_of_non_leaf(self):
x = torch.randn(4, requires_grad=True)
y = x.neg()
z1 = y[:]
z2 = y[:]
to_meta = MetaConverter()
m1 = to_meta(z1)
m2 = to_meta(z2)
# check the test is actually testing what it claims
self.assertTrue(m1._is_view())
self.assertFalse(m1._base.is_leaf)
self.assertIsNot(m1, m2)
self.assertMetadataMatches(m1, z1)
self.assertMetadataMatches(m2, z2)
self.assertSameVersionCounter(m1, m2)
def test_view_of_leaf(self):
x = torch.randn(4, requires_grad=True)
z1 = x[:]
z2 = x[:]
to_meta = MetaConverter()
m1 = to_meta(z1)
m2 = to_meta(z2)
# check the test is actually testing what it claims
self.assertTrue(m1._is_view())
self.assertTrue(m1._base.is_leaf)
self.assertIsNot(m1, m2)
self.assertMetadataMatches(m1, z1)
self.assertMetadataMatches(m2, z2)
self.assertSameVersionCounter(m1, m2)
def test_view_of_view_of_leaf(self):
x = torch.randn(8)
y = x.view(2, 4)
y.requires_grad = True
z = y.view(2, 2, 2)
to_meta = MetaConverter()
mx = to_meta(x)
mz = to_meta(z)
self.assertFalse(z.is_leaf)
self.assertMetadataMatches(mx, x)
self.assertMetadataMatches(mz, z)
def test_leaf(self):
x = torch.randn(4, requires_grad=True)
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertTrue(m.is_leaf)
self.assertTrue(m.requires_grad)
self.assertMetadataMatches(m, x)
def test_non_leaf(self):
x = torch.randn(4, requires_grad=True)
y = x.neg()
to_meta = MetaConverter()
m = to_meta(y)
# check the test is actually testing what it claims
self.assertFalse(m.is_leaf)
self.assertTrue(m.requires_grad)
self.assertMetadataMatches(m, y)
def test_requires_grad_false(self):
x = torch.randn(4, requires_grad=False)
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertFalse(m.requires_grad)
self.assertMetadataMatches(m, x)
def test_channels_last(self):
x = torch.empty(2, 3, 4, 5, memory_format=torch.channels_last)
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertTrue(m.is_leaf)
self.assertMetadataMatches(m, x)
def test_channels_last_leaf(self):
x = torch.empty(2, 3, 4, 5, memory_format=torch.channels_last, requires_grad=True)
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertTrue(m.requires_grad)
self.assertTrue(m.is_leaf)
self.assertMetadataMatches(m, x)
def test_channels_last_non_leaf(self):
x = torch.empty(2, 3, 4, 5, memory_format=torch.channels_last, requires_grad=True)
y = x + 2
# sanity
self.assertEqual(x.stride(), y.stride())
self.assertFalse(y.is_leaf)
to_meta = MetaConverter()
m = to_meta(y)
# check the test is actually testing what it claims
self.assertTrue(m.requires_grad)
self.assertFalse(m.is_leaf)
self.assertMetadataMatches(m, y)
# Check that we can autograd with m as input without erroring;
# see https://github.com/pytorch/pytorch/issues/87956
loss = m.sum()
torch.autograd.grad(loss, m)
def test_empty_strided_non_dense_leaf(self):
x = torch.empty_strided((2, 2), (4, 2), requires_grad=True)
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertTrue(m.requires_grad)
self.assertTrue(m.is_leaf)
self.assertMetadataMatches(m, x)
def test_non_leaf_torture(self):
x = torch.empty(20, requires_grad=True)
with torch.no_grad():
x.set_(x.storage(), 10, (2,), (2,))
to_meta = MetaConverter()
m = to_meta(x)
# check the test is actually testing what it claims
self.assertTrue(m.requires_grad)
self.assertTrue(m.is_leaf)
self.assertMetadataMatches(m, x)
# NB: complex stuff is not actually exercised right now because
# we have a blanket exclusion for complex conversion
def test_view_as_real(self):
x = torch.randn(4, dtype=torch.complex64)
y = torch.view_as_real(x)
m = MetaConverter()(y)
self.assertMetadataMatches(m, y)
def test_complex_noncontiguous_bug(self):
x = torch.randn((2, 2, 4, 9), dtype=torch.complex32)[:, 0, :, :]
m = MetaConverter()(x)
self.assertMetadataMatches(m, x)
def test_view_as_complex(self):
x = torch.randn((4, 2), dtype=torch.float32)
y = torch.view_as_complex(x)
m = MetaConverter()(y)
self.assertMetadataMatches(m, y)
def test_view_dtype(self):
x = torch.randn(4, dtype=torch.float32)
y = x.view(dtype=torch.int32)
m = MetaConverter()(y)
self.assertMetadataMatches(m, y)
def test_imag(self):
x = torch.randn(4, dtype=torch.complex64)
y = x.imag
m = MetaConverter()(y)
self.assertMetadataMatches(m, y)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1991")
def test_weakref(self):
x = torch.randn(4, 4, 4)
m = MetaConverter()
y = m(x)
z = m(x)
self.assertIs(y, z)
self.assertEqual(len(m.tensor_memo), 1)
self.assertEqual(len(m.storage_memo), 1)
del x
self.assertEqual(len(m.tensor_memo), 0)
m.check_for_expired_weak_storages()
self.assertEqual(len(m.storage_memo), 0)
li = []
r = []
for i in range(4):
li.append(torch.rand([i]))
r.append(m(li[-1]))
self.assertEqual(len(m.tensor_memo), 4)
del li
self.assertEqual(len(m.tensor_memo), 0)
m.check_for_expired_weak_storages()
self.assertEqual(len(m.storage_memo), 0)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1991")
def test_tensor_outlives_converter(self):
m = MetaConverter()
ref = weakref.ref(m)
x = torch.randn([4, 4])
y = m(x)
del m
self.assertIs(ref(), None)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
class CheckStrides(Enum):
NONE = 0
SIGNIFICANT = 1
ALL = 2
def should_check_strides(func):
if func in CHECK_ALL_STRIDES:
return CheckStrides.ALL
if func in CHECK_STRIDES:
return CheckStrides.SIGNIFICANT
if func in CHECK_STRIDES_SKIPS:
return CheckStrides.NONE
if not isinstance(func, torch._ops.OpOverload):
return CheckStrides.NONE
# Prims are expected to model strides correctly
if func.namespace == "prims":
return CheckStrides.SIGNIFICANT
# Check if it's a view, by testing if any of the returns have
# a non-empty alias set
if any(r.alias_info.before_set for r in func._schema.returns if r.alias_info):
return CheckStrides.SIGNIFICANT
# TODO: check for TensorIterator
return CheckStrides.SIGNIFICANT
def assert_ref_meta_equal(test_case, func, meta_rs, rs, msg_callable):
flat_meta_rs, _ = tree_flatten(meta_rs)
flat_rs, _ = tree_flatten(rs)
test_case.assertEqual(len(flat_meta_rs), len(flat_rs))
for i, meta_r, r in zip(range(len(flat_rs)), flat_meta_rs, flat_rs):
def test_assert(cond, msg):
if not cond:
raise RuntimeError(f"output {i}: {msg_callable(msg)}")
if not isinstance(r, torch.Tensor):
continue
test_assert(isinstance(meta_r, torch.Tensor), f"but real {i}th result is Tensor")
test_assert(meta_r.dtype == r.dtype, f"but real dtype was {r.dtype}")
test_assert(meta_r.shape == r.shape, f"but real shape was {r.shape}")
# See https://github.com/pytorch/pytorch/issues/78050
if should_check_strides(func) == CheckStrides.ALL:
same_strides, _ = torch._prims_common.check_all_strides(meta_r, r)
test_assert(same_strides, f"but real stride was {r.stride()}")
elif should_check_strides(func) == CheckStrides.SIGNIFICANT:
same_strides, _ = torch._prims_common.check_significant_strides(meta_r, r)
test_assert(same_strides, f"but real stride was {r.stride()}")
test_assert(
meta_r.storage_offset() == r.storage_offset(),
f"but real storage_offset was {r.storage_offset()}")
test_assert(meta_r.requires_grad == r.requires_grad, f"but real requires_grad was {r.requires_grad}")
test_assert(meta_r.is_conj() == r.is_conj(), f"but real is_conj was {r.is_conj()}")
test_assert(meta_r.is_neg() == r.is_neg(), f"but real is_neg was {r.is_neg()}")
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_meta.py` on a CUDA build
# of PyTorch that has LAPACK/MAGMA installed. You can filter `-k test_meta`
# or `-k test_dispatch_meta` to only focus on one or another list
# 2. Given the printed skip/xfail list, add them to the corresponding lists;
# torch.* entries go in meta_function and aten.* entries go in meta_dispatch.
# If there are preexisting entries, you need to merge in the entries.
#
# This is somewhat manual but typically you shouldn't need to do this, unless
# you've made a major change (e.g., added a new dtype to PyTorch) and need to
# refresh the lists. If you want to do it from scratch, just clear out the
# preexisting lists before running.
#
# WARNING: Python dict literals will silently ignore duplicate keys
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
def print_seen():
expected_failures = []
skips = []
def fmt_dtypes(dtypes):
r = ', '.join(sorted(dtype_abbrs[d] for d in dtypes))
return '{' + r + '}'
for op, failed_dtypes in seen_failed.items():
ops = resolve_name(op)
succeeded_dtypes = seen_succeeded.get(op, set())
expected_failures_dtypes = failed_dtypes - succeeded_dtypes
skips_dtypes = failed_dtypes & succeeded_dtypes
reasons = ""
if failed_reasons[op]:
reasons = " # " + ", ".join(sorted(failed_reasons[op]))
if expected_failures_dtypes:
expected_failures.append(f" {ops}: {fmt_dtypes(expected_failures_dtypes)},{reasons}")
if skips_dtypes:
skips.append(f" {ops}: {fmt_dtypes(skips_dtypes)},")
expected_failures.sort()
skips.sort()
nl = '\n'
print(f"""\
expected_failures = {{
{nl.join(expected_failures)}
}}
skips = {{
{nl.join(skips)}
}}
""")
if COLLECT_EXPECT:
atexit.register(print_seen)
# Success forces pass; failure forces fail; skip unconditionally skips testing
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
# unlike print produce strides
def verbose_print(e):
class Lit:
def __init__(self, s):
self.s = s
def __repr__(self):
return self.s
def go(t):
if isinstance(t, torch.Tensor):
return Lit(f"{t} stride={t.stride()}")
else:
return t
return repr(tree_map(go, e))
def run_meta_crossref(
test_case,
test_expect,
func,
args,
kwargs,
*,
dtype,
device_type,
run_symbolic_meta: bool
):
to_meta = MetaConverter()
do_meta = test_expect is not TestExpect.SKIP
if do_meta:
try:
meta_args = tree_map(to_meta, args)
meta_kwargs = tree_map(to_meta, kwargs)
except Exception as e:
raise RuntimeError(
f"failed to convert args to meta; "
f"originally (*{args}, **{kwargs})") from e
try:
rs = func(*args, **kwargs)
except Exception as e:
# A lot of OpInfo for inplace are actually broken because
# they're not tested outside of gradcheck which only checks
# torch.float64 and torch.complex128 (which this second one
# often skipped as well).
raise unittest.SkipTest("Original OpInfo is broken") from e
# TODO: also handle cases where func raise an exception
# For now, only attempt if we managed to convert all tensor types
# (if any of them failed, we're in a mixed device situation and
# this isn't well supported)
if do_meta and to_meta.successful():
# Special cases
if func is torch.tensor_split:
# Use original indices_or_sections, this argument is data dependent
meta_args = (meta_args[0], args[1]) + meta_args[2:]
elif func is torch.Tensor.__getitem__:
# Ensure boolean tensors use original
assert len(args) == 2
flat_args, _ = tree_flatten(args[1])
flat_meta_args, spec = tree_flatten(meta_args[1])
flat_new_args = []
for a, ma in zip(flat_args, flat_meta_args):
flat_new_args.append(a if isinstance(a, torch.Tensor) and a.dtype in [torch.int8, torch.bool] else ma)
meta_args = (meta_args[0], tree_unflatten(flat_new_args, spec))
elif func is torch.ops.aten.repeat_interleave.Tensor:
if kwargs.get("output_size", None) is None:
meta_args = args
elif func is torch.ops.aten.index.Tensor:
# Don't convert boolean tensors to meta as they will have nonzero
# called on them
indices = []
for meta_index, real_index in zip(meta_args[1], args[1]):
if meta_index is not None and meta_index.dtype in [torch.int8, torch.bool]:
indices.append(real_index)
else:
indices.append(meta_index)
meta_args = (meta_args[0], indices)
if kwargs.get("device", None) is not None:
meta_kwargs["device"] = "meta"
try:
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if run_symbolic_meta:
# Run the decomps and meta kernels registered
# to the python dispatcher instead of the regular dispatcher.
# This should be the same set of kernels
# that fake tensor runs in dynamic shapes mode.
with enable_python_dispatcher():
meta_rs = func(*meta_args, **meta_kwargs)
else:
meta_rs = func(*meta_args, **meta_kwargs)
except Exception as e:
if test_expect is TestExpect.XFAILURE:
return rs
seen_failed.setdefault(func, set()).add(dtype)
if isinstance(e, NotImplementedError):
m = RE_NOT_IMPLEMENTED_MSG.search(e.args[0])
if m:
failed_reasons[func].add(m.group(1))
if COLLECT_EXPECT:
return rs
raise RuntimeError(f"""\
failed to run: {resolve_name(func)}(
*{verbose_print(meta_args)},
**{verbose_print(meta_kwargs)}
)""") from e
else:
try:
delim = ',\n '
assert_ref_meta_equal(test_case, func, meta_rs, rs, lambda msg: f"""\
meta disagrees with real impl:
{resolve_name(func)}(
{delim.join(map(verbose_print, meta_args))},
{delim.join(k + ": " + verbose_print(v) for k, v in meta_kwargs.items())}
) = (
{verbose_print(meta_rs)}
)
{msg}
""")
except Exception:
if test_expect is TestExpect.XFAILURE:
return rs
seen_failed.setdefault(func, set()).add(dtype)
if COLLECT_EXPECT:
return rs
raise
else:
seen_succeeded.setdefault(func, set()).add(dtype)
if test_expect is TestExpect.XFAILURE and not COLLECT_EXPECT:
raise RuntimeError(f"unexpected success {resolve_name(func)}")
return rs
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
"""
# This is some sample code for how we could dump these dicts into YAML
# file for easier reading/writing
import yaml
print(yaml.dump(
{resolve_name(k): [dtype_abbrs[d] for d in v]
for k, v in meta_function_expected_failures.items()}, default_flow_style=None))
import sys
sys.exit()
"""
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
# This is a __torch_function__ mode that, when enabled, interposes every
# Torch API call and runs the operator as normal, and then reruns it
# with meta inputs, and then checks that everything about the output agrees.
# Most of the logic deals with faithfully replicating the original tensor
# as a meta tensor, which is nontrivial because there are a lot of subsystems
# that may potentially be exercised.
#
# That being said, this class is a little overkill for what it is doing in
# this test file (since I could have just inlined __torch_function__ on the
# OpInfo call, and OpInfos generally have very regular inputs), but it will be
# useful for more comprehensive testing e.g., as seen in
# https://github.com/pytorch/pytorch/pull/75994 The big benefit is it is
# A LOT more efficient that torch dispatch mode (at the cost of less coverage)
class MetaCrossRefFunctionMode(torch.overrides.TorchFunctionMode):
test_case: TestCase
device_type: str
dtype: torch.dtype
def __init__(self, test_case, *, device, dtype, inplace):
self.test_case = test_case
self.device_type = torch.device(device).type
self.dtype = dtype
self.inplace = inplace
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if (
torch.jit.is_tracing() or isinstance(func, torch.ScriptMethod) or
# meta converter doesn't work correctly when no_dispatch() is on, so
# skip running the crossref test in this case
torch._C._dispatch_tls_local_exclude_set().has(torch._C.DispatchKey.Python)
):
return func(*args, **kwargs)
if self.dtype in meta_function_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_function_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_function_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif not self.inplace and self.dtype in meta_function_expected_failures_only_outplace.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_function_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
elif not self.inplace and \
self.dtype in meta_function_device_expected_failures_only_outplace[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return run_meta_crossref(
self.test_case, test_expect, func, args,
kwargs, dtype=self.dtype, device_type=self.device_type, run_symbolic_meta=False
)
# these always fail
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
# these sometimes pass and sometimes fail
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
# For CompositeImplicitAutograd functions that fail before hitting the Mode
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
def get_strided_args(args):
def get_strided_variants(t, include_storage_offset=False):
variants = []
# contiguous
variants.append(t)
# transposed
if t.ndim > 1:
perm = list(reversed(range(t.ndim)))
transposed = torch.empty(
t.shape[::-1], device=t.device, dtype=t.dtype, requires_grad=t.requires_grad
).permute(perm).copy_(t)
variants.append(transposed)
# nondense
if t.ndim > 0:
nondense = torch.repeat_interleave(t, 2, dim=-1)[..., ::2]
variants.append(nondense)
# channel_last
if t.ndim == 4:
variants.append(t.contiguous(memory_format=torch.channels_last))
# channel_last_3d
if t.ndim == 5:
variants.append(t.contiguous(memory_format=torch.channels_last_3d))
# storage_offset
if include_storage_offset:
buffer = torch.empty(t.numel() + 1, device=t.device, dtype=t.dtype, requires_grad=t.requires_grad)
buffer = buffer.as_strided(t.shape, t.stride(), storage_offset=1)
buffer.copy_(t)
variants.append(buffer)
return variants
strided_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and not arg.is_sparse_csr and arg.is_contiguous():
strided_arg_variants = get_strided_variants(arg)
else:
strided_arg_variants = [arg]
strided_args.append(strided_arg_variants)
yield from itertools.product(*strided_args)
class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode):
test_case: TestCase
device: torch.device
dtype: torch.dtype
def __init__(self, test_case, *, device, dtype, symbolic_meta: bool):
self.test_case = test_case
# save TLS
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol
self.device_type = torch.device(device).type
self.dtype = dtype
self.symbolic_meta = symbolic_meta
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
if self.dtype in meta_dispatch_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_dispatch_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return run_meta_crossref(
self.test_case,
test_expect,
func,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
# NB: we're running these tests only on CUDA because there are some
# inconsistencies between CUDA and CPU, and running on CUDA makes it easier
# to ignore the CPU case when inconsistencies arise. Ideally we deal
# with the inconsistencies but this takes time.
class TestMeta(TestCase):
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_meta_outplace(self, device, dtype, op):
# run the OpInfo sample inputs, cross-referencing them with the
# meta implementation and check the results are the same. All
# the heavy lifting happens in MetaCrossRefFunctionMode
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=False):
expected = func(*args, **kwargs)
if isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_meta_inplace(self, device, dtype, op):
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
if func in meta_inplace_skips:
self.skipTest("Skipped")
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
with MetaCrossRefFunctionMode(self, dtype=dtype, device=device, inplace=True):
expected = func(*args, **kwargs)
def _run_dispatch_meta_test(self, device, dtype, op, symbolic_meta, inplace, all_stride_variants=False):
if inplace:
func = op.get_inplace()
if not func:
self.skipTest("No inplace variable for this op")
else:
func = op.get_op()
if func in meta_dispatch_early_skips:
self.skipTest("Function is in dispatch early skips")
if inplace:
func = self._get_safe_inplace(func)
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in samples:
if inplace and sample_input.broadcasts_input:
continue
sample_args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if all_stride_variants and sum(isinstance(arg, torch.Tensor) for arg in sample_args) <= 5:
# test inputs <= 5 tensors to avoid combinatorial explosion
strided_args = get_strided_args(sample_args)
else:
strided_args = [sample_args]
for args in strided_args:
with MetaCrossRefDispatchMode.push(self, dtype=dtype, device=device, symbolic_meta=symbolic_meta):
expected = func(*args, **kwargs)
if not inplace and isinstance(expected, torch.Tensor) and op.supports_out:
func(*args, **kwargs, out=expected)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=False, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_symbolic_meta_outplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
@ops(op_db)
def test_dispatch_symbolic_meta_inplace(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(op_db, dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_outplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=False, all_stride_variants=True)
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@suppress_warnings
# only test one dtype, as output stride behavior is the same for all dtypes
@ops(op_db, dtypes=OpDTypes.any_common_cpu_cuda_one)
# Only test on CUDA, as CUDA kernel's stride is the reference
@onlyCUDA
def test_dispatch_symbolic_meta_inplace_all_strides(self, device, dtype, op):
self._run_dispatch_meta_test(device, dtype, op, symbolic_meta=True, inplace=True, all_stride_variants=True)
def test_empty_quantized(self):
r = torch.empty(2 ** 52, device='meta', dtype=torch.qint8)
self.assertEqual(r.device.type, 'meta')
def test_huber_loss_backward(self):
inps = [torch.rand(2**52, device='meta') for _ in range(3)]
r = torch.ops.aten.huber_loss_backward(*inps, 0, 1.0)
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, inps[0].shape)
def test_fill__alias_relationship(self):
inps = torch.rand(2**52, device='meta')
r = torch.ops.aten.fill_(inps, 1.0)
# aten.fill_ returns an aliase
self.assertEqual(id(inps), id(r))
# aten.fill returns a new tensor
r2 = torch.ops.aten.fill(inps, 1.0)
self.assertNotEqual(id(inps), id(r2))
def test_meta__fused_moving_avg_obs_fq_helper(self, device):
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
to_meta = MetaConverter()
x = torch.randn(5, 5, device=device)
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
meta_x = to_meta(x)
args = [
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
]
meta_args = args.copy()
meta_args[0] = meta_x
kwargss = [
{},
{"per_row_fake_quant": False, "symmetric_quant": False},
{"per_row_fake_quant": False, "symmetric_quant": True},
]
for kwargs in kwargss:
ref_out = aten._fused_moving_avg_obs_fq_helper.default(*args, **kwargs)
meta_out = aten._fused_moving_avg_obs_fq_helper.default(*meta_args, **kwargs)
self.assertEqual(ref_out[0].size(), meta_out[0].size())
self.assertEqual(ref_out[0].stride(), meta_out[0].stride())
self.assertEqual(ref_out[1].size(), meta_out[1].size())
self.assertEqual(ref_out[1].stride(), meta_out[1].stride())
def test_cdist_forward(self, device):
to_meta = MetaConverter()
x1 = torch.rand([3, 2], device=device)
x2 = torch.rand([2, 2], device=device)
p = 2.0
for compute_mode in (None, 1, 2):
ref = aten._cdist_forward.default(x1, x2, p, compute_mode)
res = aten._cdist_forward.default(to_meta(x1), to_meta(x2), p, compute_mode)
self.assertEqual(res.device.type, 'meta')
self.assertEqual(ref.shape, res.shape)
# opinfo test is using aten.fill_, it's not testing aten.fill
@onlyCUDA
def test_fill_stride(self):
to_meta = MetaConverter()
sample_args = [torch.rand(2, 2, 2, 2), 1.0]
for args in get_strided_args(sample_args):
meta_args = to_meta(args)
ref_out = torch.ops.aten.fill(*args)
meta_out = torch.ops.aten.fill(*meta_args)
self.assertEqual(ref_out.size(), meta_out.size())
self.assertEqual(ref_out.stride(), meta_out.stride())
def test_map_location_deserialize(self):
import io
t = torch.rand(10)
b = io.BytesIO()
torch.save(t, b)
b.seek(0)
r = torch.load(b, map_location=torch.device("meta"))
self.assertEqual(r.device.type, 'meta')
self.assertEqual(r.shape, t.shape)
self.assertEqual(r.dtype, t.dtype)
self.assertEqual(r.storage().data_ptr(), 0)
instantiate_device_type_tests(TestMeta, globals())
def print_op_str_if_not_supported(op_str):
op = OperatorName.parse(op_str)
packet = getattr(torch.ops.aten, str(op.name))
overload = getattr(packet, op.overload_name if op.overload_name else "default")
if any(overload in d for d in [meta_dispatch_skips, meta_dispatch_device_skips['cuda']]):
print(f"{overload} # SKIP")
if any(overload in d for d in [meta_dispatch_expected_failures, meta_dispatch_device_expected_failures['cuda']]):
print(overload)
if __name__ == "__main__":
COMPARE_XLA = os.getenv('PYTORCH_COMPARE_XLA', None)
if COMPARE_XLA is not None:
with open(COMPARE_XLA, "r") as f:
d = yaml.load(f, Loader=YamlLoader)
ops = d.get("full_codegen", []) + d.get("supported", []) + d.get("autograd", [])
for op_str in ops:
print_op_str_if_not_supported(op_str)
sys.exit(0)
COMPARE_TEXT = os.getenv('PYTORCH_COMPARE_TEXT', None)
if COMPARE_TEXT is not None:
with open(COMPARE_TEXT, "r") as f:
for op_str in f:
print_op_str_if_not_supported(op_str.strip())
sys.exit(0)
run_tests()
|
[
"[email protected]"
] | |
0c49b5725258db042a42850d57bee23969d2e342
|
7bdb0e12359162c5dd2bddc58d2ca1d234fb29d2
|
/trunk/playground/intern/2009/Pakito/pakito/gui/pspecWidget/dialogs/comarDialog.py
|
caece273e400a41ea31a7a0ae7e6d7bb1d1ad9c1
|
[] |
no_license
|
hitaf/Pardus-2011-Svn-
|
f40776b0bba87d473aac45001c4b946211cbc7bc
|
16df30ab9c6ce6c4896826814e34cfeadad1be09
|
refs/heads/master
| 2021-01-10T19:48:33.836038 | 2012-08-13T22:57:37 | 2012-08-13T22:57:37 | 5,401,998 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,347 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from qt import *
from kdecore import KGlobal, KIcon, i18n
from kfile import KFileDialog
import kdedesigner
import os
from pakito.gui.pspecWidget.dialogs.comarDialogUI import COMARDialogUI
class COMARDialog(COMARDialogUI):
def __init__(self, parent = None, comar = None, name= None):
COMARDialogUI.__init__(self, parent, name)
self.realLoc = ""
il = KGlobal.iconLoader()
self.pbFile.setIconSet(il.loadIconSet("fileopen", KIcon.Toolbar))
self.connect(self.btnOk, SIGNAL("clicked()"), self, SLOT("accept()"))
self.connect(self.btnCancel, SIGNAL("clicked()"), self, SLOT("reject()"))
self.connect(self.pbFile, SIGNAL("clicked()"), self.slotFile)
if comar:
self.cbProvides.setCurrentText(comar[0])
self.leFile.setText(comar[1])
def slotFile(self):
self.realLoc = KFileDialog.getOpenFileName(QString.null, QString.null, self, i18n("Select COMAR Script"))
if not self.realLoc or str(self.realLoc).strip() == "":
return
self.leFile.setText(os.path.split(str(self.realLoc))[1])
def getResult(self):
res = []
res.append(str(self.cbProvides.currentText()))
res.append(str(self.leFile.text()))
res.append(str(self.realLoc))
return res
|
[
"fatih@dhcppc1.(none)"
] |
fatih@dhcppc1.(none)
|
717d84fd828878b75823d79be5f00d7fa9321862
|
1bdaf97709a1d885e473c15d5b1ef26f8d086c44
|
/pipeline_02_geocode_addresses.py
|
4992b58fa3233241ce374dffddb79489e0a6c677
|
[] |
no_license
|
austinlwheat/lab-04-pipelines-and-web-services
|
773924564552203d3efa59a9c808cf1646f1ccec
|
faa6d6a611e2da8c6a33ef0ab6d98cdcbebf12f9
|
refs/heads/main
| 2023-09-01T07:35:40.870284 | 2021-10-13T14:01:00 | 2021-10-13T14:01:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 998 |
py
|
"""
Extract Process #2
Use the Census Geocoding API to geocode the addresses in the file that was
extracted in step one. The documentation for the API is available at:
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
I encourage you to read it for details, but the gist is:
- You can geocode a batch of addresses by sending a POST request to
https://geocoding.geo.census.gov/geocoder/geographies/addressbatch
- The request should contain the following context:
1. A parameter named "benchmark" (set the value to "Public_AR_Current")
2. A parameter named "vintage" (set the value to "Current_Current")
3. A file labeled "addressFile" with the format described at
https://www.census.gov/programs-surveys/locations/technical-documentation/complete-technical-documentation/census-geocoder.html#ti103804043
(the file you downloaded in the previous step should conform to that
format).
Save the geocoded data to a new file.
"""
import requests
|
[
"[email protected]"
] | |
90b2b1995928dbf28964d7d769164ca293be561d
|
70f41a06d733e680af3bb1f00d8ff33574f4f4bb
|
/src/fh_tools/language_test/DeepLearningNotes/Note-6 A3CNet/Note-6.1 simple A3CNet/sonnet/python/modules/nets/convnet_test.py
|
bd86d1ef3a653e0c617c3bc416ba158e04d1e949
|
[
"MIT"
] |
permissive
|
mmmaaaggg/RefUtils
|
209f7136acc63c880e60974c347e19adc4c7ac2e
|
f127658e75b5c52b4db105a22176ee0931ceacae
|
refs/heads/master
| 2021-06-11T16:06:06.245275 | 2021-03-10T05:32:14 | 2021-03-10T05:32:14 | 139,413,962 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 30,348 |
py
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test sonnet.python.modules.nets.convnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from functools import partial
import itertools
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.python.modules.conv import _fill_shape as fill_shape
from sonnet.testing import parameterized
import tensorflow as tf
from tensorflow.python.ops import variables
class SharedConvNets2DTest(parameterized.ParameterizedTestCase,
tf.test.TestCase):
def setUp(self):
super(SharedConvNets2DTest, self).setUp()
self.output_channels = [2, 3, 4]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.paddings = [snt.SAME]
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testName(self, module):
unique_name = "unique_name"
with tf.variable_scope("scope"):
net = module(name=unique_name,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
self.assertEqual(net.scope_name, "scope/" + unique_name)
self.assertEqual(net.module_name, unique_name)
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testConstructor(self, module):
with self.assertRaisesRegexp(ValueError,
"output_channels must not be empty"):
module(output_channels=[],
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"kernel_shapes must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=[],
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"kernel_shapes must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=[1, 2],
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"strides must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=[],
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"strides must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=[1, 1],
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"paddings must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.paddings,
paddings=[])
with self.assertRaisesRegexp(ValueError,
"paddings must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=[snt.SAME, snt.SAME])
with self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*"):
module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"w": tf.zeros([1, 2, 3])})
with self.assertRaisesRegexp(KeyError,
"Invalid regularizer keys.*"):
module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
regularizers={"not_w": tf.contrib.layers.l1_regularizer(scale=0.5)})
with self.assertRaisesRegexp(TypeError,
"Regularizer for 'w' is not a callable "
"function or dictionary"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
regularizers={"w": tf.zeros([1, 2, 3])})
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
activation="not_a_function")
err = "output_channels must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=42,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
err = "kernel_shapes must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=None,
strides=self.strides,
paddings=self.paddings)
err = "strides must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=True,
paddings=self.paddings)
err = "paddings must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=lambda x: x + 42)
err = "use_bias must be either a bool or an iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=2)
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose",
partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNormBuildFlag(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True)
self.assertTrue(model.use_batch_norm)
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
# Check that an error is raised if we don't specify the is_training flag
err = "is_training flag must be explicitly specified"
with self.assertRaisesRegexp(ValueError, err):
model(input_to_net)
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose",
partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNorm(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True)
self.assertTrue(model.use_batch_norm)
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
# Check Tensorflow flags work
is_training = tf.placeholder(tf.bool)
test_local_stats = tf.placeholder(tf.bool)
model(input_to_net,
is_training=is_training,
test_local_stats=test_local_stats)
# Check Python is_training flag works
model(input_to_net, is_training=False, test_local_stats=False)
model_variables = model.get_variables()
self.assertEqual(
len(model_variables),
len(self.output_channels) * 3 - 1)
# Check that the appropriate moving statistics variables have been created.
self.assertTrue(
any("moving_variance" in var.name
for var in tf.global_variables()))
self.assertTrue(
any("moving_mean" in var.name
for var in tf.global_variables()))
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNormConfig(self, module):
batch_norm_config = {
"scale": True,
}
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True,
batch_norm_config=batch_norm_config)
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
model(input_to_net, is_training=True)
model_variables = model.get_variables()
self.assertEqual(
len(model_variables),
len(self.output_channels) * 4 - 2)
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testNoBias(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=False)
self.assertEqual(model.use_bias, (False,) * len(self.output_channels))
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
model(input_to_net)
model_variables = model.get_variables()
self.assertEqual(
len(model_variables),
len(self.output_channels))
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testNoBiasIterable(self, module):
use_bias = (True,) * (len(self.output_channels) - 1) + (False,)
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias)
actual_use_biases = tuple(layer.has_bias for layer in model.layers)
self.assertEqual(model.use_bias, actual_use_biases)
self.assertEqual(use_bias, actual_use_biases)
model_transpose = model.transpose()
actual_use_biases = tuple(layer.has_bias
for layer in model_transpose.layers)
self.assertEqual(model_transpose.use_bias, actual_use_biases)
self.assertEqual(tuple(reversed(use_bias)), actual_use_biases)
@parameterized.NamedParameters(
("ConvNet2DNoBias", snt.nets.ConvNet2D, False),
("ConvNet2DBias", snt.nets.ConvNet2D, True),
("ConvNet2DTransposeNoBias", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]]), False),
("ConvNet2DTransposeBias", partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]]), True))
def testRegularizersInRegularizationLosses(self, module, use_bias):
if use_bias:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
"b": tf.contrib.layers.l2_regularizer(scale=0.5)}
else:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias,
regularizers=regularizers)
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
model(input_to_net)
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
@parameterized.NamedParameters(
("ConvNet2D", snt.nets.ConvNet2D, False),
("ConvNet2DFinal", snt.nets.ConvNet2D, True),
("ConvNet2DTranspose",
partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]]),
False),
("ConvNet2DTransposeFinal",
partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]]),
True))
def testActivateFinal(self, module, activate_final):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
activate_final=activate_final,
use_batch_norm=True,
use_bias=False)
self.assertEqual(activate_final, model.activate_final)
input_to_net = tf.placeholder(tf.float32, shape=(1, 100, 100, 3))
model(input_to_net, is_training=True)
model_variables = model.get_variables()
# Batch norm variable missing for final activation
if activate_final:
self.assertEqual(len(model_variables), len(self.output_channels) * 2)
else:
self.assertEqual(len(model_variables), len(self.output_channels) * 2 - 1)
# Test transpose method's activate_final arg.
transposed_model_activate_final = model.transpose(activate_final=True)
transposed_model_no_activate_final = model.transpose(activate_final=False)
transposed_model_inherit_activate_final = model.transpose()
self.assertEqual(True, transposed_model_activate_final.activate_final)
self.assertEqual(False, transposed_model_no_activate_final.activate_final)
self.assertEqual(model.activate_final,
transposed_model_inherit_activate_final.activate_final)
@parameterized.Parameters(
*itertools.product(
[snt.nets.ConvNet2D,
partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])],
["kernel_shapes", "strides", "paddings", "activation", "initializers",
"partitioners", "regularizers", "use_bias", "batch_norm_config"]))
def testTransposeDefaultParameter(self, module, param_name):
"""Tests if .transpose correctly chooses the default parameters.
Args:
module: The conv net class.
param_name: The name of the parameter to test.
"""
# For these parameters, the expected values are their reversed values
expected_reversed = ["kernel_shapes", "strides", "paddings", "use_bias"]
# We have to choose asymmetric parameter values here in order for the test
# to be effective. This is why we don't take the default ones.
model = module(output_channels=[2, 3, 4],
kernel_shapes=[[3, 3], [5, 5], [7, 7]],
strides=[[1, 1], [2, 2], [3, 3]],
paddings=[snt.SAME, snt.SAME, snt.VALID],
use_batch_norm=[True, True, False],
use_bias=[True, True, False])
# We don't pass the parameter on to .transpose, None should be the default
transpose_model = model.transpose()
if param_name in expected_reversed:
self.assertItemsEqual(reversed(getattr(model, param_name)),
getattr(transpose_model, param_name))
else:
self.assertEqual(getattr(model, param_name),
getattr(transpose_model, param_name))
@parameterized.Parameters(
*itertools.product(
[snt.nets.ConvNet2D,
partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])],
[("kernel_shapes", [[3, 3], [3, 3], [3, 3]]),
("strides", [[1, 1], [1, 1], [1, 1]]),
("paddings", [snt.SAME, snt.SAME, snt.SAME]),
("activation", tf.nn.tanh),
("initializers", {}),
("partitioners", {}),
("regularizers", {}),
("use_bias", [True, True, True]),
("batch_norm_config", {"scale": True})]))
def testTransposePassThroughParameter(self, module, param_name_and_value):
"""Tests if .transpose correctly passes through the given parameters.
Args:
module: The conv net class.
param_name_and_value: Tuple consisting of the parameter name and value.
"""
param_name, param_value = param_name_and_value
# The given parameter values are all for three-layer networks. Changing
# the default parameters would therefore break this test. Thus, we choose
# fixed/independent parameters.
model = module(output_channels=[2, 3, 4],
kernel_shapes=[[3, 3], [5, 5], [7, 7]],
strides=[[1, 1], [2, 2], [3, 3]],
paddings=[snt.SAME, snt.SAME, snt.VALID],
use_batch_norm=[True, True, False],
use_bias=[True, True, False])
transpose_model = model.transpose(**{param_name: param_value})
if isinstance(param_value, collections.Iterable):
self.assertItemsEqual(param_value, getattr(transpose_model, param_name))
else:
self.assertEqual(param_value, getattr(transpose_model, param_name))
class ConvNet2DTest(parameterized.ParameterizedTestCase,
tf.test.TestCase):
def setUp(self):
super(ConvNet2DTest, self).setUp()
self.output_channels = [2, 3, 4]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.paddings = [snt.SAME]
def testConstructor(self):
net = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
self.assertEqual(len(net.layers), len(self.output_channels))
for i, layer in enumerate(net.layers):
self.assertEqual(layer.output_channels, self.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(self.strides[0], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(self.kernel_shapes[0], 2))
self.assertEqual(layer.padding, self.paddings[0])
self.assertEqual(layer.output_channels, net.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(net.strides[i], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(net.kernel_shapes[i], 2))
self.assertEqual(layer.padding, net.paddings[i])
def testTranspose(self):
with tf.variable_scope("scope1"):
net = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
name="conv_net_2d")
err = "Iterable output_channels length must match the number of layers"
with self.assertRaisesRegexp(ValueError, err):
net.transpose(output_channels=[42] * 18)
with tf.variable_scope("scope2"):
net_transpose = net.transpose()
self.assertEqual("scope1/conv_net_2d", net.scope_name)
self.assertEqual("conv_net_2d", net.module_name)
self.assertEqual("scope2/conv_net_2d_transpose", net_transpose.scope_name)
self.assertEqual("conv_net_2d_transpose", net_transpose.module_name)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
# Tests that trying to connect the trasposed network before connecting the
# original nets raises an error. The reason is that the output_shapes and
# output_channels are laziliy evaluated and not yet known.
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first.".format(
net.layers[-1].scope_name)):
net_transpose(input_to_net)
net_transpose = net.transpose(name="another_net_transpose")
net_out = net(input_to_net, is_training=True)
self.assertEqual(net.input_shape, tuple(input_shape))
net_transposed_output = net_transpose(net_out)
self.assertEqual(net_transposed_output.get_shape(),
input_to_net.get_shape())
for i in range(len(net.layers)):
self.assertEqual(net_transpose.layers[i].output_shape,
net.layers[-1 - i].input_shape[1:-1])
self.assertEqual(net_transpose.layers[i].output_channels,
net.layers[-1 - i].input_shape[-1])
data = np.random.rand(*input_shape)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(net_transposed_output, feed_dict={input_to_net: data})
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
use_batch_norm = True
var_names_w = [
u"conv_net_2d/conv_2d_0/w:0",
u"conv_net_2d/conv_2d_1/w:0",
u"conv_net_2d/conv_2d_2/w:0",
]
var_names_b = [
u"conv_net_2d/conv_2d_0/b:0",
u"conv_net_2d/conv_2d_1/b:0",
u"conv_net_2d/conv_2d_2/b:0",
]
var_names_bn = [
u"conv_net_2d/batch_norm_0/beta:0",
u"conv_net_2d/batch_norm_1/beta:0",
]
correct_variable_names = set(var_names_w + var_names_b + var_names_bn)
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias,
use_batch_norm=use_batch_norm)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = module(input_to_net, is_training=True)
variable_names = [var.name for var in module.get_variables()]
self.assertEqual(set(variable_names), correct_variable_names)
def testPartitioners(self):
partitioners = {
"w": tf.variable_axis_size_partitioner(10),
"b": tf.variable_axis_size_partitioner(8),
}
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
partitioners=partitioners)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = module(input_to_net)
for layer in module._layers:
self.assertEqual(type(layer.w), variables.PartitionedVariable)
self.assertEqual(type(layer.b), variables.PartitionedVariable)
class ConvNet2DTransposeTest(tf.test.TestCase):
def setUp(self):
super(ConvNet2DTransposeTest, self).setUp()
self.output_channels = [2, 3, 4]
self.output_shapes = [[100, 100]]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.paddings = [snt.SAME]
def testConstructor(self):
with self.assertRaisesRegexp(ValueError,
"output_shapes must be of length 1 or *"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=[],
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"output_shapes must be of length 1 or *"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=[[1, 2], [1, 2]],
kernel_shapes=self.kernel_shapes,
strides=[],
paddings=self.paddings)
with self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*"):
snt.nets.ConvNet2DTranspose(
output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
net = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
self.assertEqual(net.output_shapes,
tuple(self.output_shapes) * len(self.output_channels))
self.assertEqual(len(net.layers), len(self.output_channels))
for i, layer in enumerate(net.layers):
self.assertEqual(layer.output_channels, self.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(self.strides[0], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(self.kernel_shapes[0], 2))
self.assertEqual(layer.padding, self.paddings[0])
self.assertEqual(layer.output_channels, net.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(net.strides[i], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(net.kernel_shapes[i], 2))
self.assertEqual(layer.padding, net.paddings[i])
with self.assertRaisesRegexp(TypeError, "output_shapes must be iterable"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=False,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
def testTranspose(self):
net = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
err = "Iterable output_channels length must match the number of layers"
with self.assertRaisesRegexp(ValueError, err):
net.transpose(output_channels=[42] * 18)
net_transpose = net.transpose()
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
# Tests that trying to connect the trasposed network before connecting the
# original nets raises an error. The reason is that the output_shapes and
# output_channels are laziliy evaluated and not yet known.
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first.".format(
net.layers[-1].scope_name)):
net_transpose(input_to_net)
net_transpose = net.transpose(name="another_net_transpose")
net_out = net(input_to_net, is_training=True)
net_transposed_output = net_transpose(net_out)
self.assertEqual(net_transposed_output.get_shape(),
input_to_net.get_shape())
for i in range(len(net.layers)):
self.assertEqual(net_transpose.layers[i].input_shape[1:-1],
net.layers[-1 - i].output_shape)
self.assertEqual(net_transpose.layers[i].output_channels,
net.layers[-1 - i].input_shape[-1])
data = np.random.rand(*input_shape)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(net_transposed_output, feed_dict={input_to_net: data})
def testPartitioners(self):
partitioners = {
"w": tf.variable_axis_size_partitioner(10),
"b": tf.variable_axis_size_partitioner(8),
}
module = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
partitioners=partitioners)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = module(input_to_net)
for layer in module._layers:
self.assertEqual(type(layer.w), variables.PartitionedVariable)
self.assertEqual(type(layer.b), variables.PartitionedVariable)
if __name__ == "__main__":
tf.test.main()
|
[
"[email protected]"
] | |
fba8b0cb1fe9b1aef56fa39569578536094bec5b
|
d85043257d93d35ac5d20fdb784656a83e141350
|
/old/stm312_test/dummy_import.py
|
5a533c4fc4f6b8b716042fce16cce40ea9be03c4
|
[] |
no_license
|
CINF/cinfdata_test
|
ca7ae74c93afb2860c2fa24d6589e25ed5c7d38a
|
159c5e7f4727318a6b7b78dcce8f0ea57353abdb
|
refs/heads/master
| 2021-01-01T04:26:36.569960 | 2016-05-13T11:10:43 | 2016-05-13T11:10:43 | 58,448,151 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
#!/usr/bin/python
import sys
sys.path.insert(0, '/var/www/cinfdata/')
print "I'm the import testing script"
print 'Start'
import numpy
print 'Succes'
|
[
"[email protected]"
] | |
d91c223d0e292d92c905617d1590fb3557a187e7
|
0c7fb9f937fa81395a6b0518811e91361e838b03
|
/openpyxl/worksheet/tests/test_cell_range.py
|
5171baffcf1db340113e114986a28921b654545b
|
[
"BSD-3-Clause"
] |
permissive
|
Solly64/py4e-data
|
f04a76a643f345cf3b23bd54fccc7a153df784d5
|
e40378ae7d68b39be472cae01aa3bfeb87adbe45
|
refs/heads/master
| 2020-07-03T08:31:28.291269 | 2019-08-12T03:41:20 | 2019-08-12T03:41:20 | 201,852,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,433 |
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import pytest
from copy import copy
@pytest.fixture
def CellRange():
from ..cell_range import CellRange
return CellRange
class TestCellRange:
def test_ctor(self, CellRange):
cr = CellRange(min_col=1, min_row=1, max_col=5, max_row=7)
assert (cr.min_col, cr.min_row, cr.max_col, cr.max_row) == (1, 1, 5, 7)
assert cr.coord == "A1:E7"
def test_max_row_too_small(self, CellRange):
with pytest.raises(ValueError):
cr = CellRange("A4:B1")
def test_max_col_too_small(self, CellRange):
with pytest.raises(ValueError):
cr = CellRange("F1:B5")
@pytest.mark.parametrize("range_string, title, coord",
[
("Sheet1!$A$1:B4", "Sheet1", "A1:B4"),
("A1:B4", None, "A1:B4"),
]
)
def test_from_string(self, CellRange, range_string, title, coord):
cr = CellRange(range_string)
assert cr.coord == coord
assert cr.title == title
def test_repr(self, CellRange):
cr = CellRange("Sheet1!$A$1:B4")
assert repr(cr) == "<CellRange 'Sheet1'!A1:B4>"
def test_str(self, CellRange):
cr = CellRange("'Sheet 1'!$A$1:B4")
assert str(cr) == "'Sheet 1'!A1:B4"
cr = CellRange("A1")
assert str(cr) == "A1"
def test_eq(self, CellRange):
cr1 = CellRange("'Sheet 1'!$A$1:B4")
cr2 = CellRange("'Sheet 1'!$A$1:B4")
assert cr1 == cr2
def test_ne(self, CellRange):
cr1 = CellRange("'Sheet 1'!$A$1:B4")
cr2 = CellRange("Sheet1!$A$1:B4")
assert cr1 != cr2
def test_copy(self, CellRange):
cr1 = CellRange("Sheet1!$A$1:B4")
cr2 = copy(cr1)
assert cr2 is not cr1
def test_shift(self, CellRange):
cr = CellRange("A1:B4")
cr.shift(1, 2)
assert cr.coord == "B3:C6"
def test_shift_negative(self, CellRange):
cr = CellRange("A1:B4")
with pytest.raises(ValueError):
cr.shift(-1, 2)
def test_union(self, CellRange):
cr1 = CellRange("A1:D4")
cr2 = CellRange("E5:K10")
cr3 = cr1.union(cr2)
assert cr3.bounds == (1, 1, 11, 10)
def test_no_union(self, CellRange):
cr1 = CellRange("Sheet1!A1:D4")
cr2 = CellRange("Sheet2!E5:K10")
with pytest.raises(ValueError):
cr3 = cr1.union(cr2)
def test_expand(self, CellRange):
cr = CellRange("E5:K10")
cr.expand(right=2, down=2, left=1, up=2)
assert cr.coord == "D3:M12"
def test_shrink(self, CellRange):
cr = CellRange("E5:K10")
cr.shrink(right=2, bottom=2, left=1, top=2)
assert cr.coord == "F7:I8"
def test_size(self, CellRange):
cr = CellRange("E5:K10")
assert cr.size == {'columns':7, 'rows':6}
def test_intersection(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("D2:F7")
cr3 = cr1.intersection(cr2)
assert cr3.coord == "E5:F7"
def test_no_intersection(self, CellRange):
cr1 = CellRange("A1:F5")
cr2 = CellRange("M5:P17")
with pytest.raises(ValueError):
assert cr1 & cr2 == CellRange("A1")
def test_isdisjoint(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("A1:C12")
assert cr1.isdisjoint(cr2) is True
def test_is_not_disjoint(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("D2:F7")
assert cr1.isdisjoint(cr2) is False
def test_issubset(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("F6:J8")
assert cr2.issubset(cr1) is True
def test_is_not_subset(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("D4:M8")
assert cr2.issubset(cr1) is False
def test_issuperset(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("F6:J8")
assert cr1.issuperset(cr2) is True
def test_is_not_superset(self, CellRange):
cr1 = CellRange("E5:K10")
cr2 = CellRange("A1:D4")
assert cr1.issuperset(cr2) is False
def test_contains(self, CellRange):
cr = CellRange("A1:F10")
assert "B3" in cr
def test_doesnt_contain(self, CellRange):
cr = CellRange("A1:F10")
assert not "M1" in cr
@pytest.mark.parametrize("r1, r2, expected",
[
("Sheet1!A1:B4", "Sheet1!D5:E5", None),
("Sheet1!A1:B4", "D5:E5", None),
]
)
def test_check_title(self, CellRange,r1, r2, expected):
cr1 = CellRange(r1)
cr2 = CellRange(r2)
assert cr1._check_title(cr2) is expected
@pytest.mark.parametrize("r1, r2",
[
("A1:B4", "Sheet1!D5:E5"),
("Sheet1!A1:B4", "Sheet2!D5:E5"),
]
)
def test_different_worksheets(self, CellRange, r1, r2):
cr1 = CellRange(r1)
cr2 = CellRange(r2)
with pytest.raises(ValueError):
cr1._check_title(cr2)
def test_lt(self, CellRange):
cr1 = CellRange("A1:F5")
cr2 = CellRange("A2:F4")
assert cr2 < cr1
def test_gt(self, CellRange):
cr1 = CellRange("A1:F5")
cr2 = CellRange("A2:F4")
assert cr1 > cr2
@pytest.fixture
def MultiCellRange():
from ..cell_range import MultiCellRange
return MultiCellRange
class TestMultiCellRange:
def test_ctor(self, MultiCellRange, CellRange):
cr = CellRange("A1")
cells = MultiCellRange(ranges=[cr])
assert cells.ranges == [cr]
def test_from_string(self, MultiCellRange, CellRange):
cells = MultiCellRange("A1 B2:B5")
assert cells.ranges == [CellRange("A1"), CellRange("B2:B5")]
def test_add_coord(self, MultiCellRange, CellRange):
cr = CellRange("A1")
cells = MultiCellRange(ranges=[cr])
cells.add("B2")
assert cells.ranges == [cr, CellRange("B2")]
def test_add_cell_range(self, MultiCellRange, CellRange):
cr1 = CellRange("A1")
cr2 = CellRange("B2")
cells = MultiCellRange(ranges=[cr1])
cells.add(cr2)
assert cells.ranges == [cr1, cr2]
def test_iadd(self, MultiCellRange):
cells = MultiCellRange()
cells.add('A1')
assert cells == "A1"
def test_avoid_duplicates(self, MultiCellRange):
cells = MultiCellRange("A1:D4")
cells.add("A3")
assert cells == "A1:D4"
def test_repr(self, MultiCellRange, CellRange):
cr1 = CellRange("a1")
cr2 = CellRange("B2")
cells = MultiCellRange(ranges=[cr1, cr2])
assert repr(cells) == "<MultiCellRange [A1 B2]>"
def test_contains(self, MultiCellRange, CellRange):
cr = CellRange("A1:E4")
cells = MultiCellRange([cr])
assert "C3" in cells
def test_doesnt_contain(self, MultiCellRange):
cells = MultiCellRange("A1:D5")
assert "F6" not in cells
def test_eq(self, MultiCellRange):
cells = MultiCellRange("A1:D4 E5")
assert cells == "A1:D4 E5"
def test_ne(self, MultiCellRange):
cells = MultiCellRange("A1")
assert cells != "B4"
def test_empty(self, MultiCellRange):
cells = MultiCellRange()
assert bool(cells) is False
def test_not_empty(self, MultiCellRange):
cells = MultiCellRange("A1")
assert bool(cells) is True
def test_remove(self, MultiCellRange):
cells = MultiCellRange("A1:D4")
cells.remove("A1:D4")
def test_remove_invalid(self, MultiCellRange):
cells = MultiCellRange("A1:D4")
with pytest.raises(ValueError):
cells.remove("A1")
def test_iter(self, MultiCellRange, CellRange):
cells = MultiCellRange("A1")
assert list(cells) == [CellRange("A1")]
def test_copy(self, MultiCellRange, CellRange):
r1 = MultiCellRange("A1")
from copy import copy
r2 = copy(r1)
assert list(r1)[0] is not list(r2)[0]
|
[
"[email protected]:Solly64/datasciencecoursera.git"
] |
[email protected]:Solly64/datasciencecoursera.git
|
c728f18221747be09cb4d7bc0f4d5c4588ee119b
|
bc899480ea50049929e6ba7a2836e39a51d0faa3
|
/leetcode/misc/linked_list/remove_nth_node_from_end_ll.py
|
4e70dab6e0f371566bbe615b5bd76d412292f0f1
|
[] |
no_license
|
grewy/practice_py
|
605a88f40eb54f7ac0fd54a1ab2d6bfdfae57b49
|
b00f649598a6e57af30b517baa304f3094345f6d
|
refs/heads/master
| 2021-07-09T01:33:07.158778 | 2020-10-15T12:33:06 | 2020-10-15T12:33:06 | 199,878,779 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
if n==0 or not head:
return head
stack = []
t = head
while t:
stack.append(t)
t= t.next
while n != 0:
curr = stack.pop(-1)
n -= 1
if stack:
prev = stack.pop(-1)
prev.next = curr.next
else:
head = curr.next
return head
"""
https://leetcode.com/problems/remove-nth-node-from-end-of-list/discuss/9032/Python-concise-one-pass-solution-with-dummy-head.
"""
def removeNthFromEnd(self, head, n):
dummy = ListNode(0)
dummy.next = head
fast = slow = dummy
for _ in xrange(n):
fast = fast.next
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next
|
[
"[email protected]"
] | |
5b3cf730f6e87e9912fba8136bfe75c1322b09af
|
205407e7259fe8ffc42ca653cebdece2f63fe1dc
|
/config.py.tmp
|
b37cc1ea816751caf2cc643a7dbd5c2afa47f800
|
[] |
no_license
|
namuyan/nem-tip-bot-peg-system
|
720f805ff93e45d0e2ee3bb5ca48c6cdabff4288
|
aad038f6ee68523c5e8e5cdfbfb63ff0854b2ba3
|
refs/heads/master
| 2021-09-03T01:03:13.350492 | 2018-01-04T11:57:23 | 2018-01-04T11:57:23 | 109,551,023 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,112 |
tmp
|
#!/user/env python3
# -*- coding: utf-8 -*-
class EmptyObject:
pass
class Config:
def __init__(self, test=True):
self.test = test
self.stop_signal = False
self.stop_ok = []
self.stop_need_obj = ("incoming",)
if test:
self.node = [("127.0.0.1", 8293), ("nukowallet.com", 8293)]
self.screen = ""
self.account_pubkey = ""
self.account_seckey = ""
self.genesis = "0x1a505395bfe4b2a8eef2f80033d68228db70e82bb695dd4ffb20e6d0cf71cb73"
self.db = {
"host": "127.0.0.1", "user": "peg",
"pass": "Q3h5GP", "db": "pegger_test",
"charset": 'utf8mb4'
}
self.twitter = {
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": "",
"callback": None
}
self.login_pubkey = None
self.login_seckey = None
self.ws_host = "ws://153.122.86.46:8080"
self.rest_host = "127.0.0.1"
else:
self.node = [("127.0.0.1", 8293), ("nukowallet.com", 8293)]
self.screen = ""
self.account_pubkey = ""
self.account_seckey = ""
self.genesis = "0x1a505395bfe4b2a8eef2f80033d68228db70e82bb695dd4ffb20e6d0cf71cb73"
self.db = {
"host": "127.0.0.1", "user": "peg",
"pass": "Q3h5GP", "db": "pegger",
"charset": 'utf8mb4'
}
self.twitter = {
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": "",
"callback": None
}
self.login_pubkey = None
self.login_seckey = None
self.ws_host = "ws://153.122.86.46:8088"
self.rest_host = "0.0.0.0"
MICRO_TO_WEI = 1000000000000 # 小数点以下6桁
NUKO_TO_WEI = 1000000000000000000
LOCAL_IP_ADDRESS = ("127.0.0.1", "localhost")
|
[
"[email protected]"
] | |
9557efb13c32f1488beeea0ed45879a178691809
|
b014c00eeaef54c3514bb37c5b68d2e464e2fffe
|
/scikit-learn/sklearn/utils/tests/test_extmath.py
|
59030a6f64693a899a2091cac6737f0dd0beca37
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
iglesias/tapkee_benchmarks
|
0eebd19debfdc82df1bcc4d30b3007b99376492f
|
e8d6def94331eb4229ae70ca4a7b8a10012851dc
|
refs/heads/master
| 2020-12-30T10:36:30.278402 | 2013-05-02T20:27:12 | 2013-05-02T20:27:12 | 9,820,903 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,819 |
py
|
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in xrange(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
|
[
"[email protected]"
] | |
dbdf5de27d352c1c45e3e5eee6df6fcab3a46460
|
98efe1aee73bd9fbec640132e6fb2e54ff444904
|
/loldib/getratings/models/NA/na_udyr/na_udyr_bot.py
|
09e8372d3859fd0e92e3fdb2bcd92f97ec80262b
|
[
"Apache-2.0"
] |
permissive
|
koliupy/loldib
|
be4a1702c26546d6ae1b4a14943a416f73171718
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
refs/heads/master
| 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,269 |
py
|
from getratings.models.ratings import Ratings
class NA_Udyr_Bot_Aatrox(Ratings):
pass
class NA_Udyr_Bot_Ahri(Ratings):
pass
class NA_Udyr_Bot_Akali(Ratings):
pass
class NA_Udyr_Bot_Alistar(Ratings):
pass
class NA_Udyr_Bot_Amumu(Ratings):
pass
class NA_Udyr_Bot_Anivia(Ratings):
pass
class NA_Udyr_Bot_Annie(Ratings):
pass
class NA_Udyr_Bot_Ashe(Ratings):
pass
class NA_Udyr_Bot_AurelionSol(Ratings):
pass
class NA_Udyr_Bot_Azir(Ratings):
pass
class NA_Udyr_Bot_Bard(Ratings):
pass
class NA_Udyr_Bot_Blitzcrank(Ratings):
pass
class NA_Udyr_Bot_Brand(Ratings):
pass
class NA_Udyr_Bot_Braum(Ratings):
pass
class NA_Udyr_Bot_Caitlyn(Ratings):
pass
class NA_Udyr_Bot_Camille(Ratings):
pass
class NA_Udyr_Bot_Cassiopeia(Ratings):
pass
class NA_Udyr_Bot_Chogath(Ratings):
pass
class NA_Udyr_Bot_Corki(Ratings):
pass
class NA_Udyr_Bot_Darius(Ratings):
pass
class NA_Udyr_Bot_Diana(Ratings):
pass
class NA_Udyr_Bot_Draven(Ratings):
pass
class NA_Udyr_Bot_DrMundo(Ratings):
pass
class NA_Udyr_Bot_Ekko(Ratings):
pass
class NA_Udyr_Bot_Elise(Ratings):
pass
class NA_Udyr_Bot_Evelynn(Ratings):
pass
class NA_Udyr_Bot_Ezreal(Ratings):
pass
class NA_Udyr_Bot_Fiddlesticks(Ratings):
pass
class NA_Udyr_Bot_Fiora(Ratings):
pass
class NA_Udyr_Bot_Fizz(Ratings):
pass
class NA_Udyr_Bot_Galio(Ratings):
pass
class NA_Udyr_Bot_Gangplank(Ratings):
pass
class NA_Udyr_Bot_Garen(Ratings):
pass
class NA_Udyr_Bot_Gnar(Ratings):
pass
class NA_Udyr_Bot_Gragas(Ratings):
pass
class NA_Udyr_Bot_Graves(Ratings):
pass
class NA_Udyr_Bot_Hecarim(Ratings):
pass
class NA_Udyr_Bot_Heimerdinger(Ratings):
pass
class NA_Udyr_Bot_Illaoi(Ratings):
pass
class NA_Udyr_Bot_Irelia(Ratings):
pass
class NA_Udyr_Bot_Ivern(Ratings):
pass
class NA_Udyr_Bot_Janna(Ratings):
pass
class NA_Udyr_Bot_JarvanIV(Ratings):
pass
class NA_Udyr_Bot_Jax(Ratings):
pass
class NA_Udyr_Bot_Jayce(Ratings):
pass
class NA_Udyr_Bot_Jhin(Ratings):
pass
class NA_Udyr_Bot_Jinx(Ratings):
pass
class NA_Udyr_Bot_Kalista(Ratings):
pass
class NA_Udyr_Bot_Karma(Ratings):
pass
class NA_Udyr_Bot_Karthus(Ratings):
pass
class NA_Udyr_Bot_Kassadin(Ratings):
pass
class NA_Udyr_Bot_Katarina(Ratings):
pass
class NA_Udyr_Bot_Kayle(Ratings):
pass
class NA_Udyr_Bot_Kayn(Ratings):
pass
class NA_Udyr_Bot_Kennen(Ratings):
pass
class NA_Udyr_Bot_Khazix(Ratings):
pass
class NA_Udyr_Bot_Kindred(Ratings):
pass
class NA_Udyr_Bot_Kled(Ratings):
pass
class NA_Udyr_Bot_KogMaw(Ratings):
pass
class NA_Udyr_Bot_Leblanc(Ratings):
pass
class NA_Udyr_Bot_LeeSin(Ratings):
pass
class NA_Udyr_Bot_Leona(Ratings):
pass
class NA_Udyr_Bot_Lissandra(Ratings):
pass
class NA_Udyr_Bot_Lucian(Ratings):
pass
class NA_Udyr_Bot_Lulu(Ratings):
pass
class NA_Udyr_Bot_Lux(Ratings):
pass
class NA_Udyr_Bot_Malphite(Ratings):
pass
class NA_Udyr_Bot_Malzahar(Ratings):
pass
class NA_Udyr_Bot_Maokai(Ratings):
pass
class NA_Udyr_Bot_MasterYi(Ratings):
pass
class NA_Udyr_Bot_MissFortune(Ratings):
pass
class NA_Udyr_Bot_MonkeyKing(Ratings):
pass
class NA_Udyr_Bot_Mordekaiser(Ratings):
pass
class NA_Udyr_Bot_Morgana(Ratings):
pass
class NA_Udyr_Bot_Nami(Ratings):
pass
class NA_Udyr_Bot_Nasus(Ratings):
pass
class NA_Udyr_Bot_Nautilus(Ratings):
pass
class NA_Udyr_Bot_Nidalee(Ratings):
pass
class NA_Udyr_Bot_Nocturne(Ratings):
pass
class NA_Udyr_Bot_Nunu(Ratings):
pass
class NA_Udyr_Bot_Olaf(Ratings):
pass
class NA_Udyr_Bot_Orianna(Ratings):
pass
class NA_Udyr_Bot_Ornn(Ratings):
pass
class NA_Udyr_Bot_Pantheon(Ratings):
pass
class NA_Udyr_Bot_Poppy(Ratings):
pass
class NA_Udyr_Bot_Quinn(Ratings):
pass
class NA_Udyr_Bot_Rakan(Ratings):
pass
class NA_Udyr_Bot_Rammus(Ratings):
pass
class NA_Udyr_Bot_RekSai(Ratings):
pass
class NA_Udyr_Bot_Renekton(Ratings):
pass
class NA_Udyr_Bot_Rengar(Ratings):
pass
class NA_Udyr_Bot_Riven(Ratings):
pass
class NA_Udyr_Bot_Rumble(Ratings):
pass
class NA_Udyr_Bot_Ryze(Ratings):
pass
class NA_Udyr_Bot_Sejuani(Ratings):
pass
class NA_Udyr_Bot_Shaco(Ratings):
pass
class NA_Udyr_Bot_Shen(Ratings):
pass
class NA_Udyr_Bot_Shyvana(Ratings):
pass
class NA_Udyr_Bot_Singed(Ratings):
pass
class NA_Udyr_Bot_Sion(Ratings):
pass
class NA_Udyr_Bot_Sivir(Ratings):
pass
class NA_Udyr_Bot_Skarner(Ratings):
pass
class NA_Udyr_Bot_Sona(Ratings):
pass
class NA_Udyr_Bot_Soraka(Ratings):
pass
class NA_Udyr_Bot_Swain(Ratings):
pass
class NA_Udyr_Bot_Syndra(Ratings):
pass
class NA_Udyr_Bot_TahmKench(Ratings):
pass
class NA_Udyr_Bot_Taliyah(Ratings):
pass
class NA_Udyr_Bot_Talon(Ratings):
pass
class NA_Udyr_Bot_Taric(Ratings):
pass
class NA_Udyr_Bot_Teemo(Ratings):
pass
class NA_Udyr_Bot_Thresh(Ratings):
pass
class NA_Udyr_Bot_Tristana(Ratings):
pass
class NA_Udyr_Bot_Trundle(Ratings):
pass
class NA_Udyr_Bot_Tryndamere(Ratings):
pass
class NA_Udyr_Bot_TwistedFate(Ratings):
pass
class NA_Udyr_Bot_Twitch(Ratings):
pass
class NA_Udyr_Bot_Udyr(Ratings):
pass
class NA_Udyr_Bot_Urgot(Ratings):
pass
class NA_Udyr_Bot_Varus(Ratings):
pass
class NA_Udyr_Bot_Vayne(Ratings):
pass
class NA_Udyr_Bot_Veigar(Ratings):
pass
class NA_Udyr_Bot_Velkoz(Ratings):
pass
class NA_Udyr_Bot_Vi(Ratings):
pass
class NA_Udyr_Bot_Viktor(Ratings):
pass
class NA_Udyr_Bot_Vladimir(Ratings):
pass
class NA_Udyr_Bot_Volibear(Ratings):
pass
class NA_Udyr_Bot_Warwick(Ratings):
pass
class NA_Udyr_Bot_Xayah(Ratings):
pass
class NA_Udyr_Bot_Xerath(Ratings):
pass
class NA_Udyr_Bot_XinZhao(Ratings):
pass
class NA_Udyr_Bot_Yasuo(Ratings):
pass
class NA_Udyr_Bot_Yorick(Ratings):
pass
class NA_Udyr_Bot_Zac(Ratings):
pass
class NA_Udyr_Bot_Zed(Ratings):
pass
class NA_Udyr_Bot_Ziggs(Ratings):
pass
class NA_Udyr_Bot_Zilean(Ratings):
pass
class NA_Udyr_Bot_Zyra(Ratings):
pass
|
[
"[email protected]"
] | |
0bc290dafa4233a31706966fbaf2ce4ea89bf3fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03148/s093100517.py
|
ebad2fec8979471f0c0c14cb03557cc80fe50113
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 759 |
py
|
n, m = [int(i) for i in input().split()]
td = [[int(i) for i in input().split()] for n in range(n)]
td.sort(key=lambda x:-x[1])
memo = set()
a = []
for t, d in td:
if t in memo:
a.append((d, 0))
else:
a.append((d, 1))
memo.add(t)
a = [(-x, x, d) for x, d in a]
import heapq
heapq.heapify(a)
val = 0
kind = 0
b = []
for _ in range(m):
ele = heapq.heappop(a)
val += ele[1]
kind += ele[2]
if ele[2] == 0:
b.append(ele[1])
ans = val + kind ** 2
while (len(a) > 0 and len(b)>0):
val -= b.pop()
flag = False
while(len(a) > 0):
elem = heapq.heappop(a)
if elem[2] == 1:
flag = True
break
if not flag:
break
val += elem[1]
kind += 1
tmpans = val + kind ** 2
if tmpans > ans:
ans = tmpans
print(ans)
|
[
"[email protected]"
] | |
bdd58b6d01c70e104c0b4911a2c42ed7714b4725
|
f71175700ba405e606eeab58d2b3ad97474bf9f5
|
/link/models.py
|
b5e6c8526fdbdb9674a67616f4a1bf6871d27a73
|
[] |
no_license
|
Shirhussain/link_scraper
|
76018965718887b71247e917babfb009d534d126
|
f5e9a70160edc408fc005bc4d5c16a56834d93c7
|
refs/heads/main
| 2023-01-10T05:53:17.296704 | 2020-11-05T20:22:29 | 2020-11-05T20:22:29 | 310,549,867 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 232 |
py
|
from django.db import models
class Link(models.Model):
name = models.CharField(max_length=2000, blank=True, null=True)
address = models.CharField(max_length=2000)
def __str__(self):
return self.name
|
[
"[email protected]"
] | |
0f22f542e80fab1255b87f8e1fc553feb6bd3b7d
|
caf71b6a4374a30a51e6dff4deefd9122ae7980d
|
/contest_python/tallestInClass_DWITE.py
|
2e5540d8b419a15e72b7001183dcff93db427fe9
|
[] |
no_license
|
PMiskew/DP_CS_Code_PMiskew
|
fba73779e78bc4eb0bfafac05168e28ec11561b1
|
93d366a1dae3cc8f55acc7dd9cfdb2b224dbf539
|
refs/heads/master
| 2023-03-28T08:46:15.118189 | 2021-03-24T15:49:57 | 2021-03-24T15:49:57 | 294,153,175 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,459 |
py
|
'''
Recommendation:
This problem can be daunting, however, if you break it apart to some components and tackle those
it falls apart nicely.
1. Start by managing the input. Assuming you store the data as a list of strings, create three
new lists and copy in the data. The first two elements in this parallel list structure look
as follows
name = ["Jim","Sally",. . .]
height = [1.45, 187, . . . ]
units = ["m", "cm", . . .]
HL - You could copy it into a 2D list
data2D = [ ["Jim","Sally",. . .],
[1.45, 187, . . . ],
["m", "cm", . . .],
]
]
2. Start off by simplifying the problem by assuming
a) All measurements are in the same unit
b) You only want to find the single tallest.
2. Create a function that coverts any unit to meters. What it converts it to doesn't matter, but
this allows you to send any meansurment through it and get a standard measurement that can be
compared.
'''
data = ["Jim 1.45 m",
"Sally 187 cm",
"Joey 1064 mm",
"Roel 15.23 dm",
"Karl 134 cm",
"Melanie 18.9 dm",
"Jill 1.54 m",
"Sam 133 cm",
"Joel 1877 mm",
"Roger 17.83 dm",
"Karen 178 cm",
"Marnie 17.9 dm"]
name = []
height = []
units = []
for i in range(0,len(data),1):
loc = data[i].index(' ')
n = data[i][0:loc]
name.append(n)
loc1 = data[i].index(' ',loc+1)
h = data[i][loc + 1:loc1]
height.append(float(h))
u = data[i][loc1+1:]
units.append(u)
print(name)
print(height)
print(units)
|
[
"[email protected]"
] | |
7eca7055328d8ca5c90cf66a600c1cb07862346e
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/leetcode/t000840.py
|
f81a88fb38895df4b807234bcc9a0768a92e933b
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,284 |
py
|
# 解:
# 暴力法,注意严格看提议条件
#
# ```
from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
count = 0
l1 = len(grid)
if l1 >= 3:
l2 = len(grid[0])
if l2 >= 3:
for i in range(l1 - 2):
for j in range(l2 - 2):
if self.ifh([[grid[i][j], grid[i][j + 1], grid[i][j + 2]],
[grid[i + 1][j], grid[i + 1][j + 1], grid[i + 1][j + 2]],
[grid[i + 2][j], grid[i + 2][j + 1], grid[i + 2][j + 2]]]):
count += 1
return count
def ifh(self, grid):
a = []
for i in range(3):
for j in range(3):
if grid[i][j] <= 0:
return False
if grid[i][j] > 9:
return False
if grid[i][j] in a:
return False
a.append(grid[i][j])
m1 = grid[0][0] + grid[0][1] + grid[0][2]
m2 = grid[1][0] + grid[1][1] + grid[1][2]
m3 = grid[2][0] + grid[2][1] + grid[2][2]
m4 = grid[0][0] + grid[1][0] + grid[2][0]
m5 = grid[0][1] + grid[1][1] + grid[2][1]
m6 = grid[0][2] + grid[1][2] + grid[2][2]
m7 = grid[0][0] + grid[1][1] + grid[2][2]
m8 = grid[0][2] + grid[1][1] + grid[2][0]
if m1 == m2 and m1 == m3 and m1 == m4 and m1 == m5 and m1 == m6 \
and m1 == m7 and m1 == m8:
return True
else:
return False
# 840.
# 矩阵中的幻方 - -2
#
# 3
# x
# 3
# 的幻方是一个填充有从
# 1
# 到
# 9
# 的不同数字的
# 3
# x
# 3
# 矩阵,其中每行,每列以及两条对角线上的各数之和都相等。
#
# 给定一个由整数组成的
# grid,其中有多少个
# 3 × 3
# 的 “幻方” 子矩阵?(每个子矩阵都是连续的)。
#
#
#
# 示例:
#
# 输入: [[4, 3, 8, 4],
# [9, 5, 1, 9],
# [2, 7, 6, 2]]
# 输出: 1
# 解释:
# 下面的子矩阵是一个
# 3
# x
# 3
# 的幻方:
# 438
# 951
# 276
#
# 而这一个不是:
# 384
# 519
# 762
#
# 总的来说,在本示例所给定的矩阵中只有一个
# 3
# x
# 3
# 的幻方子矩阵。
#
# 提示:
#
# 1 <= grid.length <= 10
# 1 <= grid[0].length <= 10
# 0 <= grid[i][j] <= 15
# 解:
# 打表发,其实中心是5,才可能是幻
#
# ```
class Solution(object):
def numMagicSquaresInside(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
l = [[8, 1, 6, 3, 5, 7, 4, 9, 2], [6, 1, 8, 7, 5, 3, 2, 9, 4], [4, 9, 2, 3, 5, 7, 8, 1, 6],
[2, 9, 4, 7, 5, 3, 6, 1, 8], [6, 7, 2, 1, 5, 9, 8, 3, 4], [8, 3, 4, 1, 5, 9, 6, 7, 2],
[2, 7, 6, 9, 5, 1, 4, 3, 8], [4, 3, 8, 9, 5, 1, 2, 7, 6]]
count = 0
for i in range(len(grid) - 2):
for j in range(len(grid[0]) - 2):
temp = grid[i][j:j + 3] + grid[i + 1][j:j + 3] + grid[i + 2][j:j + 3]
if temp in l:
count += 1
return count
|
[
"[email protected]"
] | |
526293e66ff54ad466476cfb1a777b846734c8af
|
082474f6f6301c561ee9598843adaf1a37bcdf96
|
/page_object/common/sendmail.py
|
5f2e87c0ef3ac33e463697786390ed2ff90df098
|
[] |
no_license
|
guocheng45/Projects
|
69e2b79e93f6a4697d1adb3a025f2c04943f37cf
|
df16943fcbc34341b9a0934b830da0860e2bb5ff
|
refs/heads/master
| 2023-01-23T20:27:06.327650 | 2020-07-12T07:19:14 | 2020-07-12T07:19:14 | 197,792,797 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,158 |
py
|
# coding=utf-8
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class SendMail():
def sendMail(self,msg1,pic,receiver):
# 声明用来登录的邮箱和口令
password = 'sdjxhqksmlfsbghd' # 发信授权码
smtp_server = 'smtp.qq.com' # 发信服务器
sender = '[email protected]'
receivers = ['[email protected]','[email protected]'] # 接收邮箱
msg = MIMEMultipart('related')
# 邮件头信息
msg['From'] = sender # 发送者
msg['To'] = ";".join(receivers) # 接收者
msg['Subject'] = Header('Test Feedback Email', 'utf-8') # 邮件标题
# 邮箱正文 ,三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
# message = MIMEText('Python sendmail test', 'plain', 'utf-8')
mail_msg = MIMEText("""
<p>Python 邮件发送图文</p>
<p>测试截图:</p>
<p><img height="600" width="300" src="cid:image1"></p>
<p><a href="http://www.baidu.com">这是一个链接</a></p>
""", 'html', 'utf-8') # cid 即Content-Id java或python发邮件时使用,在HTML格式的正文中可以使用这个唯一标识号来引用内嵌资源。
msg.attach(mail_msg)
# 指定图片的目录,读取图片
file = open('test.png', 'rb')
img_data = file.read()
file.close()
# 图片植入
img=MIMEImage(img_data)
img.add_header('Content-ID','image1')
msg.attach(img)
try:
# 开启发信服务,这里使用的是加密传输
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(smtp_server, 465)
smtpObj.login(sender, password)
smtpObj.sendmail(sender, receivers, msg.as_string())
print("send mail success")
except smtplib.SMTPException:
print("Error: can not send the mail")
finally:
# 关闭服务器
smtpObj.quit()
|
[
"[email protected]"
] | |
644a5a3de788bd0b147de0a44fea79aee008cf69
|
da5ef82554c6c0413193b7c99192edd70fed58dd
|
/core/lib/tests/free_ip.py
|
7e2f686c2bc963208f481a2adf46fbdf94a1207b
|
[] |
no_license
|
rtucker-mozilla/mozilla_inventory
|
d643c7713c65aa870e732e18aaf19ce677e277b7
|
bf9154b0d77705d8c0fe1a9a35ce9c1bd60fcbea
|
refs/heads/master
| 2020-12-24T17:17:37.621418 | 2013-04-11T10:39:41 | 2013-04-11T10:39:41 | 2,709,399 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,292 |
py
|
from django.test import TestCase
from core.vlan.models import Vlan
from core.site.models import Site
from core.range.models import Range
from core.network.models import Network
from core.interface.static_intr.models import StaticInterface
from core.lib.utils import calc_free_ips_str, create_ipv4_intr_from_range
from mozdns.domain.models import Domain
from mozdns.tests.utils import create_fake_zone
from systems.models import System
class LibTestsFreeIP(TestCase):
def setUp(self):
self.system = System()
d1 = create_fake_zone("mozilla.com.com", suffix="")
soa = d1.soa
v, _ = Vlan.objects.get_or_create(name="private", number=3)
s, _ = Site.objects.get_or_create(name="phx1")
s1, _ = Site.objects.get_or_create(name="corp", parent=s)
d, _ = Domain.objects.get_or_create(name="phx1.mozilla.com.com")
d.soa = soa
d.save()
d1, _ = Domain.objects.get_or_create(name="corp.phx1.mozilla.com.com")
d1.soa = soa
d1.save()
d2, _ = Domain.objects.get_or_create(
name="private.corp.phx1.mozilla.com.com")
d2.soa = soa
d2.save()
d, _ = Domain.objects.get_or_create(name="arpa")
d, _ = Domain.objects.get_or_create(name="in-addr.arpa")
d, _ = Domain.objects.get_or_create(name="ip6.arpa")
d, _ = Domain.objects.get_or_create(name="15.in-addr.arpa")
d, _ = Domain.objects.get_or_create(name="2.in-addr.arpa")
n = Network(network_str="15.0.0.0/8", ip_type="4")
n.clean()
n.site = s1
n.vlan = v
n.save()
r = Range(start_str="15.0.0.0", end_str="15.0.0.10",
network=n)
r.clean()
r.save()
def test1_free_ip_count(self):
# Add a bunch of interfaces and make sure the calc_free_ips function is
# working
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 4)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 3)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 2)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 1)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
(intr, errors) = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 0)
def test2_free_ip_count(self):
return
# Time is tight, not going to do this test yet.
# Add an Ipv6 address and make sure the rangecount function sees it.
calc_free_ips_str("2620:101:8001::", "2620:101:8001::",
ip_type='6')
|
[
"[email protected]"
] | |
ed1af0217a26c41db25af4fb2cbaf6824e514d91
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/repos/flask-restful-swagger-master/setup.py
|
b93a88d336feff6f68a96d4730a9222aae80d2f5
|
[
"MIT"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 822 |
py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README") as file:
long_description = file.read()
setup(
name="flask-restful-swagger",
version="0.20.2",
url="https://github.com/rantav/flask-restful-swagger",
zip_safe=False,
packages=["flask_restful_swagger"],
package_data={
"flask_restful_swagger": [
"static/*.*",
"static/css/*.*",
"static/images/*.*",
"static/lib/*.*",
"static/lib/shred/*.*",
]
},
description="Extract swagger specs from your flask-restful project",
author="Ran Tavory",
license="MIT",
long_description=long_description,
install_requires=[
"Jinja2>=2.10.1,<3.0.0",
"Flask-RESTful>=0.3.6",
],
)
|
[
"[email protected]"
] | |
dce9c69be6f76f03da43a245de0a9184f4969bd0
|
70d39e4ee19154a62e8c82467ef75b601e584738
|
/pyth3/birb_scraper.py
|
fb9b94b174fd01de555cd135edd1d03093b3209a
|
[] |
no_license
|
babywyrm/sysadmin
|
6f2724be13ae7e5b9372278856a8c072073beffb
|
2a5f3d29c7529bc917d4ff9be03af30ec23948a5
|
refs/heads/master
| 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 |
PowerShell
|
UTF-8
|
Python
| false | false | 1,590 |
py
|
#!/usr/bin/python3
##################
##
##
import click
import requests
import re,os,sys
from bs4 import BeautifulSoup
#############################
#############################
def get_html_of(url):
resp = requests.get(url)
if resp.status_code != 200:
print(f'HTTP status code of {resp.status_code} returned, but 200 was expected. Exiting...')
exit(1)
return resp.content.decode()
def count_occurrences_in(word_list, min_length):
word_count = {}
for word in word_list:
if len(word) < min_length:
continue
if word not in word_count:
word_count[word] = 1
else:
current_count = word_count.get(word)
word_count[word] = current_count + 1
return word_count
def get_all_words_from(url):
html = get_html_of(url)
soup = BeautifulSoup(html, 'html.parser')
raw_text = soup.get_text()
return re.findall(r'\w+', raw_text)
def get_top_words_from(all_words, min_length):
occurrences = count_occurrences_in(all_words, min_length)
return sorted(occurrences.items(), key=lambda item: item[1], reverse=True)
@click.command()
@click.option('--url', '-u', prompt='Web URL', help='URL of webpage to extract from.')
@click.option('--length', '-l', default=0, help='Minimum word length (default: 0, no limit).')
def main(url, length):
the_words = get_all_words_from(url)
top_words = get_top_words_from(the_words, length)
for i in range(10):
print(top_words[i][0])
if __name__ == '__main__':
main()
###############################
##
##
|
[
"[email protected]"
] | |
83413238c8fee2649daebd589b1fed30ede470b5
|
46ac0965941d06fde419a6f216db2a653a245dbd
|
/sdks/python/test/test_UserLiteProfileResponse.py
|
ca0997dce4fae8c872697809ac1d3e612d3b77dc
|
[
"MIT",
"Unlicense"
] |
permissive
|
b3nab/appcenter-sdks
|
11f0bab00d020abb30ee951f7656a3d7ed783eac
|
bcc19c998b5f648a147f0d6a593dd0324e2ab1ea
|
refs/heads/master
| 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 |
MIT
| 2022-01-22T07:57:59 | 2019-05-18T17:29:21 |
Python
|
UTF-8
|
Python
| false | false | 985 |
py
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from UserLiteProfileResponse.clsUserLiteProfileResponse import UserLiteProfileResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestUserLiteProfileResponse(unittest.TestCase):
"""UserLiteProfileResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserLiteProfileResponse(self):
"""Test UserLiteProfileResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsUserLiteProfileResponse.UserLiteProfileResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
8c73809c2df2643526a2928d14dabbde864775da
|
eb5b9791349f1cc75b8e47fd80896e4fe9bf6061
|
/nb/loop.py
|
7d0aded055e890398caf2938540ae1778b28cc65
|
[
"MIT"
] |
permissive
|
Hellorelei/oc-2018
|
4cbaacbe443886e6c93cf74486f0027e21fc462a
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
refs/heads/master
| 2022-07-09T07:36:04.121651 | 2019-06-21T08:05:41 | 2019-06-21T08:05:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 43 |
py
|
for i in range(5):
print(i, i**2, i**3)
|
[
"[email protected]"
] | |
da9be17b5af888677c2e663ed919a2370d0183b0
|
764ce53fd708bb3f81d67cc9a2366265c9a685b9
|
/accounts_django/views.py
|
a9377b5f321014743f8f2900bb49fe562dc80934
|
[] |
no_license
|
Vaishnavi-Gajinkar/Bridgelabz
|
3d17b8399432ac5643059e822ccad9a90f919e9f
|
e51551ab675dbb5444ba222cc88ac05fbeab49d2
|
refs/heads/master
| 2020-12-28T02:45:18.517627 | 2020-03-09T13:42:37 | 2020-03-09T13:42:37 | 238,153,294 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def indexView(request):
return render(request, 'accountsIndex.html')
@login_required
def dashboardView(request):
return render(request,'dashboard.html')
def registerView(request):
if request.method == "Post":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login_url')
else:
form = UserCreationForm
return render(request,'registration/register.html',{'form':form})
|
[
"[email protected]"
] | |
794bb82367e77038068f0618a2f04881891f6d67
|
752116ef4b69a3049fef0cfe9b3d212548cc81b1
|
/sources/scripting/wrappers/session.py
|
82d27208c07181752417a4872c0c94524d3e9985
|
[] |
no_license
|
VDOMBoxGroup/runtime2.0
|
e54af4af7a642f34b0e07b5d4096320494fb9ae8
|
cb9932f5f75d5c6d7889f26d58aee079b4127299
|
refs/heads/develop
| 2023-07-07T11:06:10.817093 | 2023-07-03T06:11:55 | 2023-07-03T06:11:55 | 62,622,255 | 0 | 12 | null | 2023-05-23T02:55:00 | 2016-07-05T09:09:48 |
Python
|
UTF-8
|
Python
| false | false | 1,056 |
py
|
import managers
class VDOM_session(object):
def _get_id(self):
return managers.request_manager.current.session().id()
def __getitem__(self, name):
if name == "response": # temporary solution for backward compability of Whole
return managers.request_manager.current.wholeAnswer
return managers.request_manager.current.session()[name]
def __setitem__(self, name, value):
if name == "response": # temporary solution for backward compability of Whole
managers.request_manager.current.wholeAnswer = value
managers.request_manager.current.session()[name] = value
def __delitem__(self, name):
del managers.request_manager.current.session()[name]
def get(self, name, default=None):
return managers.request_manager.current.session().get(name, default)
def keys(self):
return managers.request_manager.current.session().keys()
def __iter__(self):
return iter(managers.request_manager.current.session())
id = property(_get_id)
|
[
"[email protected]"
] | |
a299aa68059723e3132438cc91d76f150d1c8463
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/B/bkj123/yrs_of_educ_by_country.py
|
f3b9f31c458c9e8d9cd5f725423f5700f75502e0
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
import scraperwiki
html = scraperwiki.scrape("http://web.archive.org/web/20110514112442/http://unstats.un.org/unsd/demographic/products/socind/education.htm")
# print html
import lxml.html
root = lxml.html.fromstring(html)
for tr in root.cssselect("div[align='left'] tr"):
tds = tr.cssselect("td")
if len(tds)==12:
data = {
'country' : tds[0].text_content(),
'years_in_school' : int(tds[4].text_content())
}
scraperwiki.sqlite.save(unique_keys=['country'], data=data)
import scraperwiki
html = scraperwiki.scrape("http://web.archive.org/web/20110514112442/http://unstats.un.org/unsd/demographic/products/socind/education.htm")
# print html
import lxml.html
root = lxml.html.fromstring(html)
for tr in root.cssselect("div[align='left'] tr"):
tds = tr.cssselect("td")
if len(tds)==12:
data = {
'country' : tds[0].text_content(),
'years_in_school' : int(tds[4].text_content())
}
scraperwiki.sqlite.save(unique_keys=['country'], data=data)
|
[
"[email protected]"
] | |
853ee679d49b7188596217525c19de94c1b111a0
|
0e8b572727bbe59882ca3faaf71515c24e81aaf7
|
/hyconhacks/papers/apps.py
|
14a3af03111f64f3469497eb8212b9b602251d60
|
[
"MIT"
] |
permissive
|
jisuhan3201/hyconhakathon
|
ff7cfbf300abb7b7132975c909bdfa15ebe2b54a
|
a29f7369432cd4d9744ec75debe03c16cd1e349a
|
refs/heads/master
| 2021-11-20T00:45:16.698871 | 2018-09-15T04:57:45 | 2018-09-15T04:57:45 | 148,870,606 | 0 | 0 |
MIT
| 2021-09-08T00:18:25 | 2018-09-15T04:50:40 |
Python
|
UTF-8
|
Python
| false | false | 98 |
py
|
from django.apps import AppConfig
class PapersConfig(AppConfig):
name = 'hyconhacks.papers'
|
[
"[email protected]"
] | |
97f2bc4b372ec60b32bf11ae749b8cbabc6a3842
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/raw/GL/KHR/context_flush_control.py
|
3afcc413f9667fef3a1d01df0028809210531e33
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 |
MIT
| 2022-11-20T09:47:56 | 2019-05-24T20:55:10 |
Python
|
UTF-8
|
Python
| false | false | 876 |
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_KHR_context_flush_control'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_KHR_context_flush_control',error_checker=_errors._error_checker)
GL_CONTEXT_RELEASE_BEHAVIOR=_C('GL_CONTEXT_RELEASE_BEHAVIOR',0x82FB)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',0x82FC)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR',0x82FC)
GL_CONTEXT_RELEASE_BEHAVIOR_KHR=_C('GL_CONTEXT_RELEASE_BEHAVIOR_KHR',0x82FB)
GL_NONE=_C('GL_NONE',0)
GL_NONE=_C('GL_NONE',0)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.