blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71962d7e86cb76c775309a6190d8d73cdbbb4cf6 | 184bcb482ea5f0f13aa35275847b0e7dd56d8219 | /tests/test_cli.py | 75f2834a1bf6022a049f31136d917db671e85112 | [
"LPPL-1.3c",
"MIT"
]
| permissive | svenkreiss/unicodeit | 643a3ead7fc69160eff82099b33c25ba5d01de28 | d7f3f0cb9b7f8c3abf8e47ea6158b2ee1f6cbf05 | refs/heads/main | 2023-08-23T07:44:45.029170 | 2023-03-12T09:21:04 | 2023-03-12T09:21:04 | 10,319,674 | 234 | 34 | NOASSERTION | 2023-07-18T22:48:57 | 2013-05-27T17:52:57 | Python | UTF-8 | Python | false | false | 1,743 | py | import subprocess
import sys
import pytest
PYTHON = 'python3' if sys.platform != 'win32' else 'python'
def test_cli_symbols1():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'\\Sigma'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_cli_symbols2():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def\\Sigma_{01234}abc\\alpha_{567}ggg\\beta_{1234}lll "\\Sigma e_0 e^3"'
])
print(r.decode())
assert r.decode().strip() == 'defΣ₀₁₂₃₄abcα₅₆₇gggβ₁₂₃₄lll "Σ e₀ e³"'
def test_cli_symbols3():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def^{01234}abc\\alpha^{567abc} "\\:) \\:G"'
])
print(r.decode())
assert r.decode().strip() == 'def⁰¹²³⁴abcα⁵⁶⁷ᵃᵇᶜ "☺ ㋡"'
@pytest.mark.skip('this was already broken')
def test_cli_symbols4():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'ggg\\beta^{1234=\\(5\\)}lll'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_subscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'a_{\\beta\\gamma\\varphi\\rho\\chi}'
])
print(r.decode())
assert r.decode().strip() == 'aᵦᵧᵩᵨᵪ'
def test_superscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'm^{ABDEGHIJKLMNOPRTUWabcdefghiklmnoprstuvwxyz\\beta\\gamma\\delta\\varphi\\chi<>}'
])
print(r.decode())
assert r.decode().strip() == 'mᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁᵂᵃᵇᶜᵈᵉᶠᵍʰⁱᵏˡᵐⁿᵒᵖʳˢᵗᵘᵛʷˣʸᶻᵝᵞᵟᵠᵡ˂˃'
| [
"[email protected]"
]
| |
d368f4a86a6510a5b4544a08552856d776373977 | 202bf6cd78eb19025aa63dea609b40903ce1a735 | /code/library_clusters.py | b159f8a7f9ca23ec704b02c9382692e2679fb567 | [
"MIT"
]
| permissive | jbkinney/17_inducibility | 1e4a5943bd680252f19008fb77a0b9bb8cfbdaf6 | cb2ddf984608fba6f85c98711bce0d7e450fba0e | refs/heads/master | 2020-04-06T07:28:02.386966 | 2018-12-05T16:25:35 | 2018-12-05T16:25:35 | 157,273,701 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,247 | py | spacing_constructs={
'51.5':{'ind':'b17B7', 'null':''},
'52.5':{'ind':'b17B9', 'null':''},
'53.5':{'ind':'b17C2', 'null':''},
'54.5':{'ind':'b17C4', 'null':''},
'55.5':{'ind':'b17C6', 'null':''},
'56.5':{'ind':'b17C8', 'null':''},
'57.5':{'ind':'b17D1', 'null':''},
'58.5':{'ind':'b17D3', 'null':''},
'59.5':{'ind':'b17D5', 'null':''},
'60.5':{'ind':'b3C7', 'null':'b3C9'},
'61.5':{'ind':'b1A1', 'null':'b1A7'},
'62.5':{'ind':'b3D3', 'null':'b3D4'},
'63.5':{'ind':'b17D7', 'null':'b17D9'},
'64.5':{'ind':'b17E2', 'null':'b17E4'},
'65.5':{'ind':'b9F9', 'null':'b3D8'},
'66.5':{'ind':'b3D6', 'null':'b3D8'},
'70.5':{'ind':'b3E1', 'null':'b3E3'},
'71.5':{'ind':'b1I1', 'null':'b1H6'},
'72.5':{'ind':'b3E5', 'null':'b3E7'},
'73.5':{'ind':'b3E9', 'null':'b3F2'},
'76.5':{'ind':'b3F4', 'null':'b3F6'},
'80.5':{'ind':'b3F8', 'null':'b3G1'},
'81.5':{'ind':'b3G3', 'null':'b3G5'},
'82.5':{'ind':'b3G7', 'null':'b3G9'},
'83.5':{'ind':'b3H2', 'null':'b3H4'},
'87.5':{'ind':'b17F1', 'null':''},
'92.5':{'ind':'b17F2', 'null':''},
'97.5':{'ind':'b17F4', 'null':''},
'102.5':{'ind':'b17F6', 'null':''},
'103.5':{'ind':'b17F8', 'null':''},
'104.5':{'ind':'b17G1', 'null':''}
}
rgap_cons={
'35wt10L':['b13C5','b13C6','b13C7','b13C8','b13C9','b13D1','b13D2','b13D3','b13D4','b13D5','b13D6'],
'35con10L':['b13D7','b13D8','b13D9','b13E1','b13E2','b13E3','b13E4','b13E5','b13E6','b13E7','b13E8'],
'35L10wt':['b13E9','b13F1','b13F2','b13F3','b13F4','b13F5','b13F6','b13F7','b13F8','b13F9','b13G1'],
'35L10con':['b13G2','b13G3','b13G4','b13G5','b13G6','b13G7','b13G8','b13G9','b13H1','b13H2','b13H3'],
'35L10ext':['b13B2','b13B3','b13B4','b13B5','b13B6','b13B7','b13B8','b13B9','b13C1','b13C2','b13C3','b13C4']
}
library_groups = {
'c61r18':{
'all':['b1A1', 'b1B6', 'b1B7', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2', 'b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E2', 'b2E3', 'b2E4', 'b2E5', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'inliers':['b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E3', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'outliers':['b1B7','b2E2','b2E4'],
'wt':['b1A1']},
'c61r18.10':{
'all':['b1A1', 'b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E2', 'b2E3', 'b2E4', 'b2E5', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'inliers':['b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E3', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'outliers':['b2E2','b2E4'],
'wt':['b1A1']},
'c61r18.35':{
'all':['b1A1', 'b1B6', 'b1B7', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'inliers':['b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'outliers':['b1B7'],
'wt':['b1A1']},
'c71r18':{
'all':['b1I1', 'b2F3', 'b2F4', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G1', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2G9', 'b2H1', 'b2H2', 'b2H3', 'b2H4', 'b2H5', 'b2H6', 'b2H7', 'b2H8', 'b1I5', 'b1I6', 'b1I7', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A3', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'inliers':['b2F3', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2H1', 'b2H3', 'b2H4', 'b2H5', 'b2H7', 'b2H8', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'outliers':['b2F4'],
'wt':['b1I1']},
'c71r18.10':{
'all':['b1I1', 'b2F3', 'b2F4', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G1', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2G9', 'b2H1', 'b2H2', 'b2H3', 'b2H4', 'b2H5', 'b2H6', 'b2H7', 'b2H8'],
'inliers':['b2F3', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2H1', 'b2H3', 'b2H4', 'b2H5', 'b2H7', 'b2H8'],
'outliers':['b2F4'],
'wt':['b1I1']},
'c71r18.35':{
'all':['b1I1', 'b1I5', 'b1I6', 'b1I7', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A3', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'inliers':['b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'outliers':[],
'wt':['b1I1']},
'occlusion':{
'all':['b5E4', 'b5A8', 'b5B2', 'b5B3', 'b5B4', 'b5B5', 'b5B6', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C6', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G2', 'b5G3', 'b5G4', 'b5G5', 'b5G6', 'b5G7', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'inliers':['b5B2', 'b5B3', 'b5B5', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G3', 'b5G4', 'b5G6', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'outliers':['b5G5', 'b5G7'],
'wt':[]},
'oc4':{
'all':['b5A8', 'b5B2', 'b5B3', 'b5B4', 'b5B5', 'b5B6', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C6', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7'],
'inliers':['b5B2', 'b5B3', 'b5B5', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7'],
'outliers':[],
'wt':['b5A8']},
'oc0':{
'all':['b5E4', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G2', 'b5G3', 'b5G4', 'b5G5', 'b5G6', 'b5G7', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'inliers':['b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G3', 'b5G4', 'b5G6', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'outliers':['b5G5', 'b5G7'],
'wt':['b5E4']},
'61c-r18.35':{
'all':['b1A7', 'b1E3', 'b1E4', 'b1E5', 'b1E6', 'b1E7', 'b1E8', 'b1E9', 'b1F1', 'b1F2', 'b1F3', 'b1F4', 'b1F5', 'b1F6', 'b1F7', 'b1F8', 'b1F9', 'b1G1', 'b1G2', 'b1G3', 'b1G4', 'b1G5', 'b1G6', 'b1G7', 'b1G8'],
'inliers':['b1E3', 'b1E4', 'b1E5', 'b1E7', 'b1E8', 'b1F1', 'b1F2', 'b1F3', 'b1F4', 'b1F5', 'b1F6', 'b1F7', 'b1F8', 'b1F9', 'b1G1', 'b1G2', 'b1G3', 'b1G4', 'b1G5', 'b1G6', 'b1G8'],
'outliers':[],
'wt':['b1A7']},
'wtc60r18':{
'all':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A7', 'b4A8', 'b4A9', 'b4B1', 'b4B2', 'b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C3', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'inliers':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A8', 'b4A9', 'b4B1', 'b4B2', 'b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'outliers':['b4A7', 'b4C3'],
'wt':[]},
'wtc60r18.35':{
'all':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A7', 'b4A8', 'b4A9', 'b4B1', 'b4B2'],
'inliers':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A8', 'b4A9', 'b4B1', 'b4B2'],
'outliers':['b4A7'],
'wt':[]},
'wtc60r18.10':{
'all':['b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C3', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'inliers':['b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'outliers':['b4C3'],
'wt':[]},
'wtc61r18.10':{
'all':['b4D9', 'b4E1', 'b4E2', 'b4E3', 'b4E4', 'b4E5', 'b4E6', 'b4E7', 'b4E8', 'b4E9', 'b4F1', 'b4F2', 'b4F3', 'b4F4', 'b4F5', 'b4F6', 'b4F7', 'b4F8', 'b4F9', 'b4G1', 'b4G2', 'b4G3', 'b4G4', 'b4G5'],
'inliers':['b4E1', 'b4E2', 'b4E3', 'b4E4', 'b4E6', 'b4E7', 'b4E8', 'b4E9', 'b4F1', 'b4F2', 'b4F3', 'b4F4', 'b4F5', 'b4F6', 'b4F8', 'b4F9', 'b4G1', 'b4G2', 'b4G3', 'b4G4', 'b4G5'],
'outliers':['b4D9', 'b4F7', 'b4E5'],
'wt':[]},
'wtc71r18.10':{
'all':['b4G6', 'b4G7', 'b4G8', 'b4G9', 'b4H1', 'b4H2', 'b4H3', 'b4H4', 'b4H5', 'b4H6', 'b4H7', 'b4H8', 'b4H9', 'b4I1', 'b4I2', 'b4I3', 'b4I4', 'b4I5', 'b4I6', 'b4I7', 'b4I8', 'b4I9', 'b5A1', 'b5A2'],
'inliers':['b4G6', 'b4G7', 'b4G8', 'b4G9', 'b4H1', 'b4H2', 'b4H3', 'b4H4', 'b4H5', 'b4H6', 'b4H7', 'b4H8', 'b4H9', 'b4I1', 'b4I2', 'b4I3', 'b4I4', 'b4I5', 'b4I6', 'b4I7', 'b4I8', 'b4I9', 'b5A1', 'b5A2'],
'outliers':[],
'wt':[]},
'DJc61r18':{
'all':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa22', 'DJa23', 'DJa24'],
'inliers':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa23', 'DJa24'],
'outliers':['DJa22'],
'wt':['DJb03']},
'DJc61r18.35':{
'all':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24'],
'inliers':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24'],
'outliers':[],
'wt':['DJb03']},
'DJc61r18.10':{
'all':['DJb03', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa22', 'DJa23', 'DJa24'],
'inliers':['DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa23', 'DJa24'],
'outliers':['DJa22'],
'wt':['DJb03']},
'c60r18.10':{
'all':['b3C7', 'b7D8', 'b7D9', 'b7E1', 'b7E2', 'b7E3', 'b7E4', 'b7E5', 'b7E6', 'b7E7', 'b7E8', 'b7E9', 'b7F1', 'b7F2', 'b7F3', 'b7F4', 'b7F5', 'b7F6', 'b7F7', 'b7F8', 'b7F9', 'b7G1', 'b7G2', 'b7G3', 'b7G4'],
'inliers':['b7D8', 'b7D9', 'b7E1', 'b7E2', 'b7E3', 'b7E4', 'b7E5', 'b7E6', 'b7E7', 'b7E8', 'b7E9', 'b7F1', 'b7F2', 'b7F3', 'b7F4', 'b7F5', 'b7F6', 'b7F7', 'b7F8', 'b7G1', 'b7G2', 'b7G3', 'b7G4'],
'outliers':['b7F9'],
'wt':['b3C7']},
'c62r18.10':{
'all':['b3D3', 'b7G5', 'b7G6', 'b7G7', 'b7G8', 'b7G9', 'b7H1', 'b7H2', 'b7H3', 'b7H4', 'b7H5', 'b7H6', 'b7H7', 'b7H8', 'b7H9', 'b7I1', 'b7I2', 'b7I3', 'b7I4', 'b7I5', 'b7I6', 'b7I7', 'b7I8', 'b7I9', 'b8A1'],
'inliers':['b7G5', 'b7G6', 'b7G7', 'b7G8', 'b7G9', 'b7H1', 'b7H2', 'b7H3', 'b7H4', 'b7H5', 'b7H6', 'b7H7', 'b7H8', 'b7H9', 'b7I1', 'b7I2', 'b7I3', 'b7I4', 'b7I5', 'b7I6', 'b7I7', 'b7I8', 'b7I9', 'b8A1'],
'outliers':[],
'wt':['b3D3']},
'c81r18.10':{
'all':['b3G3', 'b8A2', 'b8A3', 'b8A4', 'b8A5', 'b8A6', 'b8A7', 'b8A8', 'b8A9', 'b8B1', 'b8B2', 'b8B3', 'b8B4', 'b8B5', 'b8B6', 'b8B7', 'b8B8', 'b8B9', 'b8C1', 'b8C2', 'b8C3', 'b8C4', 'b8C5', 'b8C6', 'b8C7'],
'inliers':['b8A2', 'b8A3', 'b8A4', 'b8A5', 'b8A6', 'b8A7', 'b8A8', 'b8B1', 'b8B2', 'b8B3', 'b8B4', 'b8B5', 'b8B6', 'b8B7', 'b8B8', 'b8B9', 'b8C1', 'b8C2', 'b8C3', 'b8C4', 'b8C5', 'b8C6', 'b8C7','b8A9'],
'outliers':[],
'wt':['b3G3']},
'c63r18.10':{
'all':['b17D7','b17D8', 'b9D1', 'b9D2', 'b9D3', 'b9D4', 'b9D5', 'b9D6', 'b9D7', 'b9D8', 'b9D9', 'b9E1', 'b9E2', 'b9E3'],
'inliers':['b9D1', 'b9D2', 'b9D3', 'b9D4', 'b9D6', 'b9D7', 'b9D8', 'b9D9', 'b9E1', 'b9E2', 'b9E3'],
'outliers':['b9D5'],
'wt':['b17D7','b17D8']},
'c64r18.10':{
'all':['b17E2','b17E3', 'b9E4', 'b9E5', 'b9E6', 'b9E7', 'b9E8', 'b9E9', 'b9F1', 'b9F2', 'b9F3', 'b9F4', 'b9F5', 'b9F6'],
'inliers':['b9E4', 'b9E5', 'b9E6', 'b9E7', 'b9E8', 'b9E9', 'b9F1', 'b9F2', 'b9F3', 'b9F4', 'b9F5', 'b9F6'],
'outliers':[],
'wt':['b17E2','b17E3']},
'c65r18.10':{
'all':['b9F7', 'b9F8', 'b9F9', 'b9G1', 'b9G2', 'b9G3', 'b9G4', 'b9G5', 'b9G6', 'b9G7', 'b9G8', 'b9G9', 'b9H1', 'b9H2', 'b9H3', 'b9H4', 'b9H5', 'b9H6', 'b9H7', 'b9H8', 'b9H9', 'b9I1', 'b9I2', 'b9I3'],
'inliers':['b9F7', 'b9F8', 'b9F9', 'b9G1', 'b9G2', 'b9G3', 'b9G4', 'b9G5', 'b9G6', 'b9G7', 'b9G8', 'b9G9', 'b9H1', 'b9H2', 'b9H3', 'b9H4', 'b9H5', 'b9H6', 'b9H7', 'b9H8', 'b9H9', 'b9I1', 'b9I2', 'b9I3'],
'outliers':[],
'wt':[]},
'c66r18.10':{
'all':['b3D6', 'b10F8', 'b10F9', 'b10G1', 'b10G2', 'b10G3', 'b10G4', 'b10G5', 'b10G6', 'b10G7', 'b10G8', 'b10G9', 'b10H1', 'b10H2', 'b10H3', 'b10H4', 'b10H5', 'b10H6', 'b10H7', 'b10H8', 'b10H9', 'b10I1', 'b10I2', 'b10I3', 'b10I4'],
'inliers':['b10F8', 'b10F9', 'b10G1', 'b10G2', 'b10G4', 'b10G5', 'b10G6', 'b10G7', 'b10G8', 'b10G9', 'b10H1', 'b10H2', 'b10H3', 'b10H4', 'b10H5', 'b10H6', 'b10H7', 'b10H8', 'b10H9', 'b10I1', 'b10I2', 'b10I3', 'b10I4'],
'outliers':['b10G3'],
'wt':['b3D6']},
'c76r18.10':{
'all':['b3F4', 'b10I5', 'b10I6', 'b10I7', 'b10I8', 'b10I9', 'b11A1', 'b11A2', 'b11A3', 'b11A4', 'b11A5', 'b11A6', 'b11A7', 'b11A8', 'b11A9', 'b11B1', 'b11B2', 'b11B3', 'b11B4', 'b11B5', 'b11B6', 'b11B7', 'b11B8', 'b11B9', 'b11C1'],
'inliers':['b10I5', 'b10I6', 'b10I7', 'b10I8', 'b10I9', 'b11A1', 'b11A2', 'b11A3', 'b11A4', 'b11A5', 'b11A6', 'b11A7', 'b11A8', 'b11A9', 'b11B1', 'b11B2', 'b11B3', 'b11B4', 'b11B5', 'b11B6', 'b11B7', 'b11B8', 'b11B9', 'b11C1'],
'outliers':[],
'wt':['b3F4']},
'c82r18.10':{
'all':['b13H4', 'b13H5', 'b13H6', 'b13H7', 'b13H8', 'b13H9', 'b13I1', 'b13I2', 'b13I3', 'b13I4', 'b13I5', 'b13I6', 'b13I7', 'b13I8', 'b13I9', 'b14A1', 'b14A2', 'b14A3', 'b14A4', 'b14A5', 'b14A6', 'b14A7'],
'inliers':['b13H4', 'b13H5', 'b13H6', 'b13H7', 'b13H8', 'b13H9', 'b13I1', 'b13I2', 'b13I3', 'b13I4', 'b13I5', 'b13I6', 'b13I7', 'b13I8', 'b13I9', 'b14A1', 'b14A2', 'b14A3', 'b14A4', 'b14A5', 'b14A6', 'b14A7'],
'outliers':[],
'wt':[]},
'c72r18.10':{
'all':['b14A8', 'b14A9', 'b14B1', 'b14B2', 'b14B3', 'b14B4', 'b14B5', 'b14B6', 'b14B7', 'b14B8', 'b14B9', 'b14C1', 'b14C2', 'b14C3', 'b14C4', 'b14C5', 'b14C6', 'b14C7', 'b14C8', 'b14C9', 'b14D1', 'b14D2'],
'inliers':['b14A8', 'b14A9', 'b14B1', 'b14B2', 'b14B3', 'b14B4', 'b14B5', 'b14B6', 'b14B7', 'b14B8', 'b14B9', 'b14C1', 'b14C2', 'b14C3', 'b14C4', 'b14C5', 'b14C6', 'b14C7', 'b14C8', 'b14C9', 'b14D1', 'b14D2'],
'outliers':[],
'wt':[]},
'c40':{
'all':['b12I1', 'b12I2', 'b14E3', 'b14E4', 'b14E5', 'b14E6', 'b14E7', 'b14E8', 'b14E9', 'b14F1', 'b14F2', 'b14F3', 'b14F4', 'b14G7', 'b14G8', 'b14G9', 'b14H1', 'b14H2', 'b14H3', 'b14H4', 'b14H5', 'b14H6', 'b14H7', 'b14H8', 'b15C6', 'b15C7', 'b15C8', 'b15C9', 'b15D1', 'b15D2', 'b15D3', 'b15D4', 'b15D5', 'b15D6', 'b15D7'],
'inliers':['b14E3', 'b14E4', 'b14E5', 'b14E6', 'b14E7', 'b14E8', 'b14E9', 'b14F1', 'b14F2', 'b14F3', 'b14F4', 'b14G7', 'b14G8', 'b14G9', 'b14H1', 'b14H2', 'b14H3', 'b14H4', 'b14H5', 'b14H6', 'b14H7', 'b14H8', 'b15C6', 'b15C7', 'b15C8', 'b15C9', 'b15D1', 'b15D2', 'b15D3', 'b15D4', 'b15D5', 'b15D6', 'b15D7'],
'outliers':[],
'wt':['b12I1', 'b12I2', 'b12I3']},
'c41':{
'all':['b14D3', 'b14D4', 'b14D5', 'b14F5', 'b14F6', 'b14F7', 'b14F8', 'b14F9', 'b14G1', 'b14G2', 'b14G3', 'b14G4', 'b14G5', 'b14G6', 'b14H9', 'b14I1', 'b14I2', 'b14I3', 'b14I4', 'b14I5', 'b14I6', 'b14I7', 'b14I8', 'b14I9', 'b15A1', 'b15D8', 'b15D9', 'b15E1', 'b15E2', 'b15E3', 'b15E4', 'b15E5', 'b15E6', 'b15E7', 'b15E8', 'b15E9'],
'inliers':['b14F5', 'b14F6', 'b14F7', 'b14F8', 'b14F9', 'b14G1', 'b14G2', 'b14G3', 'b14G4', 'b14G5', 'b14G6', 'b14H9', 'b14I1', 'b14I2', 'b14I5', 'b14I6', 'b14I7', 'b14I8', 'b15A1', 'b15D8', 'b15D9', 'b15E1', 'b15E2', 'b15E3', 'b15E4', 'b15E5', 'b15E6', 'b15E8', 'b15E9'],
'outliers':['b14I3','b14I4','b14I9','b15E7'],
'wt':['b14D3','b14D4','b14D5']},
'gal':{
'all':['b14D6','b14D7','b14D8','b15A2','b15A3','b15A4','b15A5','b15A6','b15A7','b15A8','b15A9','b15B1','b15B2','b15B3','b15B4','b15B5','b15B6','b15B7','b15B8','b15B9','b15C1','b15C2','b15C3','b15C4','b15C5'],
'inliers':['b15A2','b15A3','b15A5','b15A6','b15A7','b15A9','b15B1','b15B2','b15B3','b15B4','b15B5','b15B6','b15B7','b15B8','b15B9','b15C1','b15C2','b15C5'],
'outliers':['b15A4','b15A8','b15C3','b15C4'],
'wt':['b14D6','b14D7','b14D8']}
}
| [
"[email protected]"
]
| |
192e0a22a39afd8de4675f9032f1eaadfbe026fb | 0cb064f4e2f5b27a189b3e7631bb19f7842e150b | /zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py | 5afee75a91dc41f48230b292ccc4813ddf9fab99 | [
"MIT"
]
| permissive | stellar2016/zvt | 35e514927302cffb3577f3535344e2ca55ec9abd | f6c2c05c136b14c0c0f239960f08f85bcdee7c28 | refs/heads/master | 2021-04-19T13:18:01.020365 | 2020-03-22T14:44:26 | 2020-03-22T14:44:26 | 249,607,341 | 0 | 0 | MIT | 2020-03-24T03:51:06 | 2020-03-24T03:51:05 | null | UTF-8 | Python | false | false | 2,077 | py | # -*- coding: utf-8 -*-
from zvdata.utils.pd_utils import pd_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp
from zvdata.utils.utils import to_float
from zvt.api.api import get_dividend_financing, get_spo_detail
from zvt.domain import SpoDetail, DividendFinancing
from zvt.recorders.eastmoney.common import EastmoneyPageabeDataRecorder
class SPODetailRecorder(EastmoneyPageabeDataRecorder):
data_schema = SpoDetail
url = 'https://emh5.eastmoney.com/api/FenHongRongZi/GetZengFaMingXiList'
page_url = url
path_fields = ['ZengFaMingXiList']
def get_original_time_field(self):
return 'ZengFaShiJian'
def get_data_map(self):
return {
"spo_issues": ("ShiJiZengFa", to_float),
"spo_price": ("ZengFaJiaGe", to_float),
"spo_raising_fund": ("ShiJiMuJi", to_float)
}
def on_finish(self):
last_year = str(now_pd_timestamp().year)
codes = [item.code for item in self.entities]
need_filleds = get_dividend_financing(provider=self.provider, codes=codes,
return_type='domain',
session=self.session,
filters=[DividendFinancing.spo_raising_fund.is_(None)],
end_timestamp=last_year)
for item in need_filleds:
df = get_spo_detail(provider=self.provider, entity_id=item.entity_id,
columns=[SpoDetail.timestamp, SpoDetail.spo_raising_fund],
start_timestamp=item.timestamp,
end_timestamp="{}-12-31".format(item.timestamp.year))
if pd_is_not_null(df):
item.spo_raising_fund = df['spo_raising_fund'].sum()
self.session.commit()
super().on_finish()
__all__ = ['SPODetailRecorder']
if __name__ == '__main__':
# init_log('spo_detail.log')
recorder = SPODetailRecorder(codes=['000999'])
recorder.run()
| [
"[email protected]"
]
| |
3ed2beb303c28748c85454eea580163c3338d096 | 3ccd3465c241071827ad98bac61f85d8405bffc9 | /test/talkytest/clients/voice/tts/test_osxsay.py | eb28c42d580273e2a2717801aba2380cd9cfccaf | [
"MIT"
]
| permissive | keiffster/talk-y | a12e2590f3170af1debb4add9c27fd12adb279fa | dd2bb2a816c868770d9bec8f02ee9f2bbfcbae2a | refs/heads/master | 2021-06-25T08:32:39.020921 | 2020-02-16T17:18:45 | 2020-02-16T17:18:45 | 102,565,196 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import unittest
from talky.clients.voice.tts.osxsay import OSXSayTextToSpeach
from talky.config.sections.client.voice.voice import VoiceConfiguration
class OSXSayTextToSpeachTests(unittest.TestCase):
def test_init(self):
config = VoiceConfiguration()
tts = OSXSayTextToSpeach(config)
self.assertIsNotNone(tts)
| [
"[email protected]"
]
| |
af133b71314acc64bdbd6be37d282d55ba8fde6d | 5b3caf64b77161748d0929d244798a8fb914d9c5 | /Python Excel Examples/WorksheetsApiDemo/background/deleteBackground.py | e5297d90f3031e8342735c46a68eb63f86226e60 | []
| no_license | EiceblueCloud/Spire.Cloud.Excel | 0d56864991eaf8d44c38f21af70db614b1d804b7 | d9845d5cefd15a3ab408b2c9f80828a4767e2b82 | refs/heads/master | 2021-07-20T23:44:39.068568 | 2021-07-15T03:04:49 | 2021-07-15T03:04:49 | 230,225,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
appId = "your id"
appKey = "your key"
baseUrl="https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.worksheets_api.WorksheetsApi(configuration)
name = "DeleteBackground.xlsx"
storage = ""
folder = "ExcelDocument"
sheet_name = "Sheet1"
api.delete_background(name, sheet_name, folder=folder, storage=storage) | [
"[email protected]"
]
| |
bfa4bc2ce6ce4fc12f63223c6419c52e683ba101 | 5682e1b9ff4d7387e69c8fcb75fda7c641e68e50 | /LeeJehwan/scraper/1.THEORY/1.6.returns.py | d080dc94f96697dc93d383af081feaeba169bac3 | []
| no_license | daehyun1023/Python | 18b68cd626f8e0f15102eec7280decd773fb84c5 | 99d3c1badd31c3aef2bc9f4fe52296768c5c117e | refs/heads/main | 2023-02-28T08:12:33.314536 | 2021-02-07T23:54:31 | 2021-02-07T23:54:31 | 332,975,801 | 1 | 0 | null | 2021-01-26T04:54:13 | 2021-01-26T04:54:13 | null | UTF-8 | Python | false | false | 69 | py | def plus(a, b):
return a + b
result = plus(2, 4)
print(result)
| [
"[email protected]"
]
| |
a8510ed4c832d93162788220a8e618a6ac40439c | 4a09376ef4ddd8cd5752e79bb0f3c18de6906455 | /iHubCrowdSourcing/tmpScripts/GetUniques.py | 9d7f8666fb8302f725b05fcd7aee852179ddeb03 | []
| no_license | GBelzoni/iHub | 33f36500f090fbfd962977ae266117be499b7cb5 | e816954bfca2127fdaaf750aef8b0442b287003c | refs/heads/master | 2021-01-18T14:45:09.589406 | 2013-09-06T22:52:15 | 2013-09-06T22:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | fin =open('modeltree.txt')
lines = fin.readlines()
fin.close()
fout =open('unique.txt','w')
lines2 = [ line.lower() for line in lines]
uniques = set(lines2)
print uniques
fout.writelines(uniques) | [
"[email protected]"
]
| |
2a99c1e567547ad471064a62fa5571a65b29f715 | 71324aca11e16d6da17b0440e72d0107f5af6e04 | /todo_vue_restful/todo_with_vue_and_restful/todo/migrations/0001_initial.py | 18a12c83dc377d8c7d800fdd1d51722e1aa0f4de | [
"MIT"
]
| permissive | n3k0fi5t/Django_Tutorial | 6bad82a919d1de0162b34f4c7f753cd126b05cc3 | e3953335ca88fe22c68268fd76afb7c4f9bbb55f | refs/heads/master | 2023-02-16T07:56:56.416031 | 2021-01-11T23:17:33 | 2021-01-11T23:17:33 | 291,436,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # Generated by Django 3.1 on 2020-12-14 00:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TodoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('finish_time', models.DateTimeField(auto_now=True)),
('is_finished', models.BooleanField(default=False)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'todo_item',
'ordering': ['-create_time'],
},
),
]
| [
"[email protected]"
]
| |
55011e7d72f18177422f55514b292382081f4dcd | 2caf6885511af24443e22aaa43cd679d694f6f80 | /note/my_note/first_month/day06/demo01.py | 18fab09a906c452f8cebed4af58ddeba84253c43 | []
| no_license | nandadao/Python_note | 7f9ba54a73af05c935b4f7e24cacb728859a6c69 | abddfc2e9a1704c88867cff1898c9251f59d4fb5 | refs/heads/master | 2020-11-25T18:29:50.607670 | 2019-12-19T01:28:02 | 2019-12-19T01:28:02 | 228,793,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """
猜拳
石头 剪刀
剪刀 布
布 石头
随机产生
"""
# import random
# win = ("石头剪刀", "剪刀布", "布石头")
# same = ("石头石头", "剪刀剪刀", "布布")
# choice = ("石头", "剪刀", "布")
# pc1 = choice[random.randint(0, 2)]
# # pc2 = choice[random.randint(0, 2)]
# pc2 = input("请出拳:")
# print(str(pc1)+str(pc2))
# # if str(pc1)+str(pc2) in win or str(pc2)+str(pc1) in win:
# if str(pc2)+str(pc1) in win:
# print("获胜")
# elif str(pc2)+str(pc1) in same:
# print("相同重新开始")
# else:
# print("你输了")
# 统一管理多个数据 :思想很重要
# import random
# tuple_win = (
# ("石头", "剪刀"),
# ("剪刀", "布"),
# ("布", "石头"),
# )
# tuple_item = ("石头", "剪刀", "布")
#
# item_input = input("请出拳:")
# # random.randint(0, 2) # 生成0 1 2
# index_system = random.randint(0, 2)
# item_system = tuple_item[index_system]
#
# if item_input == item_system:
# print("平局")
# elif (item_input, item_system) in tuple_win:
# print("你获胜")
# else:
# print("你失败")
| [
"[email protected]"
]
| |
fb873db4bf3aa718c82d101dda25aca24cd84ce9 | 4edadc6b0c733b208df760e8491b1f1808ed4395 | /image process/plot_loss.py | 9bdbe07e304b8e5f2c385c95b6d5b208c45e8f10 | []
| no_license | RichardcLee/Expression_Transformation | d3f8a1bd0b11071f3f085efe821fabc716e617e6 | ae849789a6c77e4cec0909c0c490305ad13ba06d | refs/heads/master | 2021-04-11T04:51:02.496921 | 2020-09-16T08:21:41 | 2020-09-16T08:21:41 | 248,993,500 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | from matplotlib import pyplot as plt
import re
plt.rcParams['font.sans-serif']=['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False # 用来正常显示负号
lines = []
with open(r"C:\Users\81955\Desktop\ckpts\face\ganimation\200513_232207\logs.txt", "r+") as f:
lines = f.readlines()
# dis_fake WGAN-GP对抗损失第二项,值越大越好(正值)
# dis_real WGAN-GP对抗损失第一项,值越小越好(负值)
# dis_real_aus 条件表情损失第二项
# gen_rec 循环一致性损失
# dis 判别器损失
# gen 生成器损失
loss = {
"dis_fake": [],
"dis_real": [],
"dis_real_aus": [],
"gen_rec": [],
'dis': [],
'gen': [],
"total": []
}
for line in lines:
a, b, c, d = float(re.findall("dis_fake:(.*?)\|", line)[0].strip()), float(re.findall("dis_real:(.*?)\|", line)[0].strip()), float(re.findall("dis_real_aus:(.*?)\|", line)[0].strip()), float(re.findall("gen_rec:(.*?)\|", line)[0].strip())
e, f = float(re.findall("dis:(.*?)\|", line)[0].strip()), float(re.findall("gen:(.*?)\|", line)[0].strip())
loss["dis_fake"].append(a)
loss["dis_real"].append(b)
loss["dis_real_aus"].append(c)
loss["dis"].append(d)
loss["gen"].append(f)
loss["gen_rec"].append(d)
loss["total"].append(10*d + 1*(a+b) + 160*c)
# print(loss)
plt.figure(dpi=120)
plt.tight_layout()
plt.subplots_adjust(wspace=0.45, hspace=0.5) # 调整子图间距
xy = ["321","322", "323", "324", "325", "326"]
widths = [0.07, 0.07, 0.07, 0.09, 0.09, 0.07]
labels = ['adversarial loss 2', 'adversarial loss 1', 'condition loss', 'cycle consistency loss', 'dis loss', 'gen loss', 'total loss']
ticks_y = [[0, 1, 2, 3, 4, 5], [-5, -4, -3, -2, -1, 0], [0, 0.004, 0.008, 0.012, 0.016], [0, 0.1, 0.2, 0.3, 0.4], [0, 0.1, 0.2, 0.3, 0.4], [-3, -2, -1, 0, 1, 2, 3, 4 ,5, 6]]
ticks_x = ['0', '1w', '2w', '3w', '4w']
scale_x = [0, 10000, 20000, 30000, 40000]
idx = 0
space = 10 # 控制损失显示间距,避免图像线条过于集中
step = [i for i in range(len(loss["dis_fake"]))] # step数
fontsize = 10
for name, val in loss.items():
if idx == 6:
continue
plt.subplot(xy[idx])
plt.title(labels[idx], fontsize=fontsize+2)
plt.plot(step[::space], val[::space], linewidth=widths[idx], color='k') # label=labels[idx]
# plt.legend(loc='best')
if idx == 4 or idx == 5:
plt.xlabel("step", fontsize=fontsize-1)
plt.ylabel("loss value", fontsize=fontsize-1)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-1)
plt.yticks(ticks_y[idx], fontsize=fontsize-1)
idx += 1
plt.savefig("1.jpg")
plt.show()
fontsize = 20
plt.figure(dpi=80)
plt.plot(step[::space], loss['total'][::space], linewidth=0.2, color='k')
plt.xlabel("step", fontsize=fontsize-6)
plt.ylabel("loss value", fontsize=fontsize-6)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-6)
plt.yticks(fontsize=fontsize-1)
plt.savefig("2.jpg")
plt.show()
| [
"[email protected]"
]
| |
ff2d90b1f5ac7311985425547060f8d42ed0a4e2 | 86100df5db058ea25b1859b6d2d4eafef720bede | /dirWatch.py | f7e991a8815c3efb30907af01c6dd56ae91c92e1 | []
| no_license | SniPE7/MonitorPy | 1afb156d1d8185158012e54bb3c387cfde29c7cd | e3f7aa672a2909abfa080bf3db9b4ff56bd6b97e | refs/heads/master | 2020-12-24T16:42:52.899784 | 2014-12-15T11:32:58 | 2014-12-15T11:32:58 | 28,030,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python
import time
from time import gmtime, strftime
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def on_created(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_deleted(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_moved(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
observer = Observer()
observer.schedule(Handler(), path='/var', recursive=True)
observer.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| [
"[email protected]"
]
| |
5b0c427c59c3f900fc2f681738a7253d68c9bc70 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/_exercises/templates/Mastering Concurrency in Python/Chapter03/example2.py | 86fd9c8132767986c2d2392ff9d84973ea193f0b | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 642 | py | # # ch3/example2.py
#
# ______ _th.. __ thread
# ____ ma__ ______ sqrt
#
# ___ is_prime x
# __ ? < 2
# print('@ is not a prime number.' ?
#
# ____ ? __ 2
# print('@ is a prime number.' ?
#
# ____ ? % 2 __ 0
# print('@ is not a prime number.' ?
#
# ____
# limit _ __. sq ? + 1
# ___ i __ ra.. 3 ? 2
# __ x % i __ 0
# print('@ is not a prime number.' ?
# r_
#
# print('@ is a prime number.' ?
#
# my_input _ 2, 193, 323, 1327, 433785907
#
# ___ x __ ?
# ?.s_n_t.. ? ?
#
# a _ __..('Type something to quit: \n')
# print('Finished.')
| [
"[email protected]"
]
| |
395326db8f49bcc62b832e19268b8b29aecfb822 | 1b8ae90527e93aab7f80ac7b908a5eefe1b1384e | /Data_Structures/queue_with_max.py | 5423249c1576b9d2c695a000e3a2f44bc7861135 | []
| no_license | Frikster/CodingNotebook | 4cbdff626e2b86fde45223775d27078291d41621 | c371cd43bcbac02cb915a9620b291d013d8fb485 | refs/heads/master | 2020-04-14T22:53:58.725429 | 2019-05-11T21:44:54 | 2019-05-11T21:44:54 | 164,182,563 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | # Implement a queue with #enqueue and #dequeue, as well as a #max API,
# a method which returns the maximum element still in the queue. This
# is trivial to do by spending O(n) time upon dequeuing.
# Can you do it in O(1) amortized? Maybe use an auxiliary storage structure?
import pdb
import unittest
from Data_Structures.ring_buffer import RingBuffer
from Data_Structures.min_max_stack_queue import MinMaxStackQueue
class QueueWithMax:
def __init__(self):
self.store = MinMaxStackQueue()
def enqueue(self, val):
self.store.enqueue(val)
def dequeue(self):
return self.store.dequeue()
def max(self):
if len(self) == 0: return float("-inf")
return self.store.max()
def __len__(self):
return len(self.store)
class Queue:
def __init__(self):
self.store = RingBuffer()
def enqueue(self, val):
self.store.append(val)
def dequeue(self):
return self.store.shift()
def __len__(self):
return len(self.store)
def __str__(self):
return self.store.__str__()
class Test(unittest.TestCase):
def test_queue_with_max(self):
q = QueueWithMax()
print(q.max())
q.enqueue(5)
print(q.max())
q.enqueue(1)
print(q.max())
q.enqueue(50)
print(q.max())
q.enqueue(5)
print(q.max())
q.dequeue()
q.dequeue()
print(q.max())
q.dequeue()
print(q.max())
def test_queue(self):
q = Queue()
print(q)
q.enqueue(5)
print(q)
q.enqueue(1)
print(q)
q.enqueue(50)
print(q)
q.enqueue(5)
print(q)
q.dequeue()
q.dequeue()
print(q)
q.dequeue()
print(q)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
91a0fd3c2c049c06e58486ec8a171240d7f057f8 | e748e6d96aace1c9149327f384e0de07d743715a | /challange/largestPlateau.py | 14569e16c073b32674b62202fc1a064fd2c5fbe3 | []
| no_license | jorzel/codefights | cdfc4cb32261b064ffc605bfd927bf237885b5d2 | 28b62a2ae3809f0eb487198044c0fe74be09d4e8 | refs/heads/master | 2022-04-28T06:54:26.170503 | 2022-03-23T22:22:20 | 2022-03-23T22:22:20 | 110,818,719 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | """
Your terraforming crew is busy at work on a mountaintop, but it's time to break for lunch. In order to allow everyone to have lunch together, we'd like to find a plateau on the mountain where everyone can sit.
Given a topographic map in the form of a matrix of integers map, find the area of the largest plateau.
Example
For
map = [[1,0,0,2,2,0],
[0,0,2,1,0,2],
[0,1,1,2,2,2],
[1,2,1,0,2,1]]
the output should be largestPlateau(map) = 5. The crew could either choose the plateau with elevation 0 or the one with elevation 2; both of which have an area of 5:
"""
from collections import defaultdict
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
def build_graph(maps):
graph = defaultdict(set)
rows = len(maps)
cols = len(maps[0])
for y in range(rows):
for x in range(cols):
neighbours = [(x - 1, y),
(x, y - 1),
(x + 1, y),
(x, y + 1)]
while True:
p = neighbours.pop(0)
if p[1] >= 0 and p[1] < rows and p[0] >= 0 and p[0] < cols:
if maps[p[1]][p[0]] == maps[y][x]:
graph[(y, x)] |= {(p[1], p[0])}
if not neighbours:
break
return graph
def largestPlateau(maps):
if not maps:
return 0
graph = build_graph(maps)
rows = len(maps)
cols = len(maps[0])
visited = set()
max_plateu = 0
for y in range(rows):
for x in range(cols):
if (y, x) not in visited:
plateu = dfs(graph, (y,x))
visited |= plateu
if len(plateu) > max_plateu:
max_plateu = len(plateu)
return max_plateu | [
"[email protected]"
]
| |
ee60cd02829d54fe8b02d44339527c5b45fa47b4 | 2e47f91e6401eb7f36746e3078b0baac7fd4b9d1 | /silot/sspair_model.py | 3fb03a074b22eeed01b72335f81bba9b4b96c79b | [
"MIT"
]
| permissive | lqiang2003cn/silot | fdf7fb9e6ed0d814fb6a9f0630cd7913478a870a | d49a41dde74db62d62bdd9ba5d35ff54c07fd9bc | refs/heads/master | 2023-01-15T19:37:37.276344 | 2020-06-18T15:28:19 | 2020-06-18T15:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,805 | py | import numpy as np
import tensorflow as tf
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import to_rgb
from dps import cfg
from dps.utils import Param
from dps.utils.tf import RenderHook, tf_mean_sum, tf_shape
from auto_yolo.models.core import AP, xent_loss
from auto_yolo.models.object_layer import ObjectLayer
from silot.core import VideoNetwork
class SequentialSpair(VideoNetwork):
build_backbone = Param()
build_feature_fuser = Param()
build_obj_feature_extractor = Param()
n_backbone_features = Param()
anchor_boxes = Param()
train_reconstruction = Param()
reconstruction_weight = Param()
train_kl = Param()
kl_weight = Param()
backbone = None
object_layer = None
feature_fuser = None
obj_feature_extractor = None
@property
def eval_funcs(self):
if getattr(self, '_eval_funcs', None) is None:
if "annotations" in self._tensors:
ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AP(v) for v in ap_iou_values}
eval_funcs["AP"] = AP(ap_iou_values)
self._eval_funcs = eval_funcs
else:
self._eval_funcs = {}
return self._eval_funcs
def build_representation(self):
# --- init modules ---
self.B = len(self.anchor_boxes)
if self.backbone is None:
self.backbone = self.build_backbone(scope="backbone")
if "backbone" in self.fixed_weights:
self.backbone.fix_variables()
if self.feature_fuser is None:
self.feature_fuser = self.build_feature_fuser(scope="feature_fuser")
if "feature_fuser" in self.fixed_weights:
self.feature_fuser.fix_variables()
if self.obj_feature_extractor is None and self.build_obj_feature_extractor is not None:
self.obj_feature_extractor = self.build_obj_feature_extractor(scope="obj_feature_extractor")
if "obj_feature_extractor" in self.fixed_weights:
self.obj_feature_extractor.fix_variables()
backbone_output, n_grid_cells, grid_cell_size = self.backbone(
self.inp, self.B*self.n_backbone_features, self.is_training)
self.H, self.W = [int(i) for i in n_grid_cells]
self.HWB = self.H * self.W * self.B
self.pixels_per_cell = tuple(int(i) for i in grid_cell_size)
H, W, B = self.H, self.W, self.B
if self.object_layer is None:
self.object_layer = ObjectLayer(self.pixels_per_cell, scope="objects")
self.object_rep_tensors = []
object_rep_tensors = None
_tensors = defaultdict(list)
for f in range(self.n_frames):
print("Bulding network for frame {}".format(f))
early_frame_features = backbone_output[:, f]
if f > 0 and self.obj_feature_extractor is not None:
object_features = object_rep_tensors["all"]
object_features = tf.reshape(
object_features, (self.batch_size, H, W, B*tf_shape(object_features)[-1]))
early_frame_features += self.obj_feature_extractor(
object_features, B*self.n_backbone_features, self.is_training)
frame_features = self.feature_fuser(
early_frame_features, B*self.n_backbone_features, self.is_training)
frame_features = tf.reshape(
frame_features, (self.batch_size, H, W, B, self.n_backbone_features))
object_rep_tensors = self.object_layer(
self.inp[:, f], frame_features, self._tensors["background"][:, f], self.is_training)
self.object_rep_tensors.append(object_rep_tensors)
for k, v in object_rep_tensors.items():
_tensors[k].append(v)
self._tensors.update(**{k: tf.stack(v, axis=1) for k, v in _tensors.items()})
# --- specify values to record ---
obj = self._tensors["obj"]
pred_n_objects = self._tensors["pred_n_objects"]
self.record_tensors(
batch_size=self.batch_size,
float_is_training=self.float_is_training,
cell_y=self._tensors["cell_y"],
cell_x=self._tensors["cell_x"],
h=self._tensors["h"],
w=self._tensors["w"],
z=self._tensors["z"],
area=self._tensors["area"],
cell_y_std=self._tensors["cell_y_std"],
cell_x_std=self._tensors["cell_x_std"],
h_std=self._tensors["h_std"],
w_std=self._tensors["w_std"],
z_std=self._tensors["z_std"],
n_objects=pred_n_objects,
obj=obj,
latent_area=self._tensors["latent_area"],
latent_hw=self._tensors["latent_hw"],
attr=self._tensors["attr"],
)
# --- losses ---
if self.train_reconstruction:
output = self._tensors['output']
inp = self._tensors['inp']
self._tensors['per_pixel_reconstruction_loss'] = xent_loss(pred=output, label=inp)
self.losses['reconstruction'] = (
self.reconstruction_weight * tf_mean_sum(self._tensors['per_pixel_reconstruction_loss'])
)
if self.train_kl:
self.losses.update(
obj_kl=self.kl_weight * tf_mean_sum(self._tensors["obj_kl"]),
cell_y_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["cell_y_kl"]),
cell_x_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["cell_x_kl"]),
h_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["h_kl"]),
w_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["w_kl"]),
z_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["z_kl"]),
attr_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["attr_kl"]),
)
if cfg.background_cfg.mode == "learn_and_transform":
self.losses.update(
bg_attr_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_attr_kl"]),
bg_transform_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_transform_kl"]),
)
# --- other evaluation metrics ---
if "n_annotations" in self._tensors:
count_1norm = tf.to_float(
tf.abs(tf.to_int32(self._tensors["pred_n_objects_hard"]) - self._tensors["n_valid_annotations"]))
self.record_tensors(
count_1norm=count_1norm,
count_error=count_1norm > 0.5,
)
class SequentialSpair_RenderHook(RenderHook):
N = 4
linewidth = 2
on_color = np.array(to_rgb("xkcd:azure"))
off_color = np.array(to_rgb("xkcd:red"))
gt_color = "xkcd:yellow"
cutoff = 0.5
fetches = "obj z inp output appearance normalized_box background glimpse"
def __call__(self, updater):
network = updater.network
if "n_annotations" in network._tensors:
self.fetches += " annotations n_annotations"
if 'prediction' in network._tensors:
self.fetches += " prediction targets"
if "actions" in network._tensors:
self.fetches += " actions"
if "bg_y" in network._tensors:
self.fetches += " bg_y bg_x bg_h bg_w bg_raw"
fetched = self._fetch(updater)
self._prepare_fetched(fetched)
# self._plot_reconstruction(updater, fetched)
self._plot_patches(updater, fetched)
# try:
# self._plot_reconstruction(updater, fetched)
# except Exception:
# pass
@staticmethod
def normalize_images(images):
mx = images.reshape(*images.shape[:-3], -1).max(axis=-1)
return images / mx[..., None, None, None]
def _prepare_fetched(self, fetched):
inp = fetched['inp']
output = fetched['output']
prediction = fetched.get("prediction", None)
targets = fetched.get("targets", None)
N, T, image_height, image_width, _ = inp.shape
flat_obj = fetched['obj'].reshape(N, T, -1)
background = fetched['background']
box = (
fetched['normalized_box']
* [image_height, image_width, image_height, image_width]
)
flat_box = box.reshape(N, T, -1, 4)
n_annotations = fetched.get("n_annotations", np.zeros(N, dtype='i'))
annotations = fetched.get("annotations", None)
# actions = fetched.get("actions", None)
diff = self.normalize_images(np.abs(inp - output).mean(axis=-1, keepdims=True))
xent = self.normalize_images(
xent_loss(pred=output, label=inp, tf=False).mean(axis=-1, keepdims=True))
learned_bg = "bg_y" in fetched
bg_y = fetched.get("bg_y", None)
bg_x = fetched.get("bg_x", None)
bg_h = fetched.get("bg_h", None)
bg_w = fetched.get("bg_w", None)
bg_raw = fetched.get("bg_raw", None)
fetched.update(
prediction=prediction,
targets=targets,
flat_obj=flat_obj,
background=background,
box=box,
flat_box=flat_box,
n_annotations=n_annotations,
annotations=annotations,
diff=diff,
xent=xent,
learned_bg=learned_bg,
bg_y=bg_y,
bg_x=bg_x,
bg_h=bg_h,
bg_w=bg_w,
bg_raw=bg_raw,
)
def _plot_reconstruction(self, updater, fetched):
N, T, image_height, image_width, _ = fetched['inp'].shape
print("Plotting for {} data points...".format(N))
n_images = 8 if fetched['learned_bg'] else 7
fig_unit_size = 4
fig_height = T * fig_unit_size
fig_width = n_images * fig_unit_size
for n in range(N):
fig, axes = plt.subplots(T, n_images, figsize=(fig_width, fig_height))
if fetched['prediction'] is not None:
fig_title = "target={}, prediction={}".format(
np.argmax(fetched['targets'][n]),
np.argmax(fetched['prediction'][n]))
fig.suptitle(fig_title, fontsize=16)
for ax in axes.flatten():
ax.set_axis_off()
for t in range(T):
self._plot_helper(n, t, axes[t], **fetched)
plt.subplots_adjust(left=0.02, right=.98, top=.98, bottom=0.02, wspace=0.1, hspace=0.1)
self.savefig("reconstruction/" + str(n), fig, updater)
def _plot_helper(
self, n, t, axes, *, inp, output, diff, xent, background, flat_obj, flat_box,
n_annotations, annotations, learned_bg, bg_y, bg_x, bg_h, bg_w, bg_raw, **kwargs):
N, T, image_height, image_width, _ = inp.shape
lw = self.linewidth
def safe_remove(obj):
try:
obj.remove()
except NotImplementedError:
pass
ax_inp = axes[0]
self.imshow(ax_inp, inp[n, t])
for obj in ax_inp.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_inp.set_title('input')
ax = axes[1]
self.imshow(ax, output[n, t])
if t == 0:
ax.set_title('reconstruction')
ax = axes[2]
self.imshow(ax, diff[n, t])
if t == 0:
ax.set_title('abs error')
ax = axes[3]
self.imshow(ax, xent[n, t])
if t == 0:
ax.set_title('xent')
ax_all_bb = axes[4]
self.imshow(ax_all_bb, output[n, t])
for obj in ax_all_bb.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_all_bb.set_title('all bb')
ax_proposed_bb = axes[5]
self.imshow(ax_proposed_bb, output[n, t])
for obj in ax_proposed_bb.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_proposed_bb.set_title('proposed bb')
ax = axes[6]
self.imshow(ax, background[n, t])
if t == 0:
ax.set_title('background')
# Plot proposed bounding boxes
for o, (top, left, height, width) in zip(flat_obj[n, t], flat_box[n, t]):
color = o * self.on_color + (1-o) * self.off_color
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_all_bb.add_patch(rect)
if o > self.cutoff:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_proposed_bb.add_patch(rect)
# Plot true bounding boxes
for k in range(n_annotations[n]):
valid, _, _, top, bottom, left, right = annotations[n, t, k]
if not valid:
continue
height = bottom - top
width = right - left
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_inp.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_all_bb.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_proposed_bb.add_patch(rect)
if learned_bg:
ax = axes[7]
self.imshow(ax, bg_raw[n])
for obj in ax.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax.set_title('raw_bg, y={:.2f}, x={:.2f}, h={:.2f}, w={:.2f}'.format(
bg_y[n, t, 0], bg_x[n, t, 0], bg_h[n, t, 0], bg_w[n, t, 0]))
height = bg_h[n, t, 0] * image_height
top = (bg_y[n, t, 0] + 1) / 2 * image_height - height / 2
width = bg_w[n, t, 0] * image_width
left = (bg_x[n, t, 0] + 1) / 2 * image_width - width / 2
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor="xkcd:green", facecolor='none')
ax.add_patch(rect)
def _plot_patches(self, updater, fetched):
# Create a plot showing what each object is generating
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
from matplotlib import animation
import matplotlib.gridspec as gridspec
from itertools import product
N, T, image_height, image_width, _ = fetched['inp'].shape
H, W, B = updater.network.H, updater.network.W, updater.network.B
glimpse = fetched['glimpse']
appearance = fetched['appearance']
obj = fetched['obj']
z = fetched['z']
fig_unit_size = 3
fig_height = 2 * B * H * fig_unit_size
fig_width = 3 * W * fig_unit_size
for idx in range(N):
fig = plt.figure(figsize=(fig_width, fig_height))
time_text = fig.suptitle('', fontsize=20, fontweight='bold')
gs = gridspec.GridSpec(2*B*H, 3*W, figure=fig)
axes = np.array([[fig.add_subplot(gs[i, j]) for j in range(3*W)] for i in range(B*H)])
for ax in axes.flatten():
ax.set_axis_off()
other_axes = []
for i in range(2):
for j in range(4):
start_y = B*H + 2*i
end_y = start_y + 2
start_x = 2*j
end_x = start_x + 2
ax = fig.add_subplot(gs[start_y:end_y, start_x:end_x])
other_axes.append(ax)
other_axes = np.array(other_axes)
for ax in other_axes.flatten():
ax.set_axis_off()
print("Plotting patches for {}...".format(idx))
def func(t, axes=axes, other_axes=other_axes):
print("timestep {}".format(t))
time_text.set_text('t = {}'.format(t))
for h, w, b in product(range(H), range(W), range(B)):
_obj = obj[idx, t, h, w, b, 0]
_z = z[idx, t, h, w, b, 0]
ax = axes[h * B + b, 3 * w]
color = _obj * self.on_color + (1-_obj) * self.off_color
obj_rect = patches.Rectangle(
(-0.2, 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
if t == 0 and h == 0 and b == 0:
ax.set_title("w={}".format(w))
if t == 0 and w == 0 and b == 0:
ax.set_ylabel("h={}".format(h))
self.imshow(ax, glimpse[idx, t, h, w, b, :, :, :])
ax = axes[h * B + b, 3 * w + 1]
self.imshow(ax, appearance[idx, t, h, w, b, :, :, :3])
ax.set_title("obj={:.2f}, z={:.2f}".format(_obj, _z, b))
ax = axes[h * B + b, 3 * w + 2]
self.imshow(ax, appearance[idx, t, h, w, b, :, :, 3], cmap="gray")
self._plot_helper(idx, t, other_axes, **fetched)
plt.subplots_adjust(left=0.02, right=.98, top=.9, bottom=0.02, wspace=0.1, hspace=0.1)
anim = animation.FuncAnimation(fig, func, frames=T, interval=500)
path = self.path_for('patches/{}'.format(idx), updater, ext="mp4")
anim.save(path, writer='ffmpeg', codec='hevc')
plt.close(fig)
| [
"[email protected]"
]
| |
9d5940b50bb1c85781629bf130b65cdb741c45e3 | 6dc72f5c7a1f802a27cbefdd62f1ac05836c5219 | /PyDemo/DataAnalysisCode/matplotlibTest.py | 8e4a9d085e51d88380970dabe8927a0aa02479f9 | []
| no_license | RockJohnson503/MyDemo | 9e4e5c7b02ee76d5437fd54c36050655fca145fb | dc1062df01cc53eb9a2a1849709d2f88e8b4488c | refs/heads/master | 2022-05-13T22:45:27.051170 | 2020-04-24T07:32:13 | 2020-04-24T07:32:13 | 123,227,439 | 5 | 1 | null | 2022-04-22T21:10:48 | 2018-02-28T04:07:08 | Jupyter Notebook | UTF-8 | Python | false | false | 2,958 | py | # encoding: utf-8
"""
File: matplotlibTest.py
Author: Rock Johnson
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
# Line
"""
x = np.linspace(-np.pi, np.pi, 256, endpoint=True) # 用numpy生成x轴的线
c, s = np.cos(x), np.sin(x) # 用numpy定义正弦和余弦
plt.figure(1) # 绘制第一个图
plt.plot(x, c, color="blue", linewidth=1.0, linestyle="--", label="COS", alpha=0.5) # 前面的是自变量, 后面的是应变量, 这个是余弦
plt.plot(x, s, color="red", label="SIN") # 这个是正弦
plt.title("COS & SIN") # 给图添加标题
ax = plt.gca() # 轴的编辑器
ax.spines["right"].set_color("none") # 隐藏轴
ax.spines["top"].set_color("none")
ax.spines["left"].set_position(("data", 0)) # 将轴移动到数据域的某个点
ax.spines["bottom"].set_position(("data", 0))
ax.xaxis.set_ticks_position("bottom") # 将x轴显示的数据移到x轴的下方 框架默认就是这样的
ax.yaxis.set_ticks_position("left") # 将y轴显示的数据移到y轴的左方
plt.xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi], [r"$-\pi$", r"$-\pi/2$", r"$0$", r"$\pi/2$", r"$\pi$"])
plt.yticks(np.linspace(-1, 1, 5, endpoint=True)) # 设置轴的显示内容
for label in ax.get_xticklabels() + ax.get_yticklabels(): # 设置轴显示内容的样式
label.set_fontsize(12)
label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
plt.legend(loc="upper left") # 设置图片的说明
plt.grid() # 设置图片的网格线
plt.axis() # 设置图片的显示范围
plt.fill_between(x, np.abs(x) < 0.5, c, c > 0.5, color="green") # 对图片进行填充
t = 1
plt.plot([t, t], [0, np.cos(t)], "y", linewidth="3") # 添加注释线
plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +13), textcoords="offset points",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.4")) # 给注释线加描述
plt.show() # 展示图
"""
# Scatter
fig = plt.figure()
fig.add_subplot(3, 3, 1)
n = 128
X = np.random.normal(0, 1, n)
Y = np.random.normal(0, 1, n)
T = np.arctan2(Y, X) # 上色
plt.axes([0.025, 0.025, 0.95, 0.95]) # 设置显示范围
plt.scatter(X, Y, s=75, c=T, alpha=.5) # 画散点
plt.xlim(-1.5, 1.5), plt.xticks([]) # x的范围
plt.ylim(-1.5, 1.5), plt.yticks([]) # y的范围
plt.axis()
plt.title("scatter")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test():
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
y = x/2
plt.figure(1)
plt.plot(x, y)
ax = plt.gca()
ax.spines["top"].set_color("none")
ax.spines["right"].set_color("none")
ax.spines["left"].set_position(("data", 0))
ax.spines["bottom"].set_position(("data", 0))
plt.show()
if __name__ == '__main__':
main()
pass | [
"[email protected]"
]
| |
014a8aab3305226f095b71f76e73bfc13dc1caa5 | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/19-07-21_20210905224104.py | 016085b1d34f5ea2a80953238ca45e9068b0410c | []
| no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | class MENU ():
def __init__(self,titulo,opciones=[]):
self.titulo = titulo
self.opciones = opciones
def menu(self):
print(self.titulo)
for opcion in self.opciones:
print(opcion)
opc=input("Elije opcion [1 ..... {}]:".formato(len(self.opciones)))
return opc
menu1=MENU("Menú Principal" , ["1)Calculadora","2)Numeros","3)Listas","4)Cadenas","5)Salir"])
opc=menu1.menu()
if opc=="1":
menu1=MENU("Menú Calculadora",["1)Suma","2)Resta","3)Multiplicacion" , "4) División" , "5) Salir" ])
opc1=menu1.menu()
if opc1 == "1" :
print("Opcion Suma")
n1=int(input("Ingresar n1: "))
n2=int(input("Ingresar n2: "))
suma=n1+n2
print("{} + {} = {}".format( n1 , n2 , suma ))
elif opc1 == "2" :
print ( "Opcion Resta" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
resta = n1 - n2
print ( "{} - {} = {}".format ( n1 , n2 , resta ))
elif opc1 == "3" :
print ( "Opcion Multiplicacion" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
multiplicacion = n1 * n2
print ( "{} * {} = {}" . formato ( n1 , n2 , multiplicacion ))
elif opc1 == "4" :
print ( "Opcion Division" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
division = n1 / n2
print ( "{} / {} = {}" . formato ( n1 , n2 , division ))
elif opc1 == "5" :
print ( "Opcion Salir" )
elif opc == "2" :
menu2 = MENU ( "Menú Numero" , [ "1) Perfecto" , "2) Primo" , "3) Salir" ])
opc2 = input ( "Elije opcion [1 ..... 3]:" )
elif opc == "3" :
print ( "Listas de menú" )
elif opc == "4" :
print ( "Menú Cadenas" )
elif opc == "5" :
print ( "Menú Salir" )
else:
print ( "Opcion no valida" )
| [
"[email protected]"
]
| |
0b701c732d9ec6fcdb9862d7c4c3919ff3e5e7c8 | 761e3ede4c8eb6ecb08f635cb56303e26a6681c7 | /resnet.py | 2da7db8dc57d107601adcf3c9f3073187691738a | [
"MIT"
]
| permissive | zhaobinNF/PSPNet | 09be6a329ffad6f352f486c2a7f5d337d7a78e46 | 8e37762679a9e19fa9ac7adab4b8f5e3bcf1d09d | refs/heads/master | 2022-12-06T20:44:51.154156 | 2020-09-01T11:28:10 | 2020-09-01T11:28:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,715 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import urllib.request as urllib
from tqdm import tqdm
model_urls = {
'resnet50': {'url': "https://download.pytorch.org/models/resnet50-19c8e357.pth", 'id': 'resnet50-19c8e357.pth'},
'resnet101': {'url': "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth", 'id': 'resnet101-5d3b4d8f.pth'}
}
# Reference
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# model_urls = {
# 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
# 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
# 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
# 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
# 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
# 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
# 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
# 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
# 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
# }
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_model(url, id):
download_url = './weight'
if not os.path.isdir(download_url):
os.mkdir(download_url)
if not os.path.isfile(os.path.join(download_url, id)):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
urllib.urlretrieve(url, filename=os.path.join(download_url, id), reporthook=t.update_to)
else:
print('Already download')
state_dict = torch.load(os.path.join(download_url, id))
return state_dict
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
norm_layer = self._norm_layer
downsample = None
# previous_dilation = self.dilation
# if dilate:
# self.dilation *= stride
# stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = download_model(model_urls[arch]['url'], model_urls[arch]['id'])
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| [
"[email protected]"
]
| |
e2c46e9c3c12ed70166afee419a09e346f651ad9 | 09df89395816834ddf77de620f959c22e74d8c00 | /Bit Manipulation/Single Number.py | 382f244e2e89ffbe38dd4070043ad097ea451177 | []
| no_license | gdh756462786/Leetcode_by_python | c853c4e3de255a8b4016c59944a0d40213a539a7 | 6387543a2a23c30aef1d5d37db54ca72cfb19270 | refs/heads/master | 2020-06-22T11:53:24.758506 | 2018-12-28T03:03:31 | 2018-12-28T03:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # coding: utf-8
'''
Given an array of integers, every element appears twice except for one.
Find that single one.
Note:
Your algorithm should have a linear runtime complexity.
Could you implement it without using extra memory?
'''
class Solution(object):
'''
对数组元素执行异或运算,最终结果即为所求。
由于异或运算的性质,两个相同数字的异或等于0,而任意数字与0的亦或都等于它本身。
另外,异或运算满足交换律。
'''
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ans = 0
for num in nums:
ans ^= num
return ans
solution = Solution()
print solution.singleNumber([2,2,5,3,3])
| [
"[email protected]"
]
| |
eecca24a0adcd29352d7af9f0f13143148db787d | bcc2eadf72d0c2a38e595f973ad4840ac038bd53 | /Valid Palindrome.py | f674e38ffc21634991493803e1287fdb53981cfe | []
| no_license | jke-zq/myleetcode.py | 5841cec144884bcef9f0adadbb10dbe4ed34963f | e0f032f34f7fa8fa4f6e5af65c60b3fe581fdc23 | refs/heads/master | 2020-04-03T23:24:39.657299 | 2016-09-18T14:35:25 | 2016-09-18T14:35:25 | 49,498,500 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
left, right = 0, len(s) - 1
while left < right:
while left < right and not s[left].isalnum():
left += 1
while left < right and not s[right].isalnum():
right -= 1
if s[left].lower() == s[right].lower():
left, right = left + 1, right - 1
else:
return False
return True | [
"[email protected]"
]
| |
b777223f076ace65e55bd0c622a6919bce5bd167 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /m9bcZKy4niMmsg3JX_24.py | 53034914b39b9e5514126b1c339d6d3218688fbc | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | """
A group of friends have decided to start a secret society. The name will be
the first letter of each of their names, sorted in alphabetical order.
Create a function that takes in a list of names and returns the name of the
secret society.
### Examples
society_name(["Adam", "Sarah", "Malcolm"]) ➞ "AMS"
society_name(["Harry", "Newt", "Luna", "Cho"]) ➞ "CHLN"
society_name(["Phoebe", "Chandler", "Rachel", "Ross", "Monica", "Joey"]) ➞ "CJMPRR"
### Notes
The secret society's name should be entirely uppercased.
"""
def society_name(friends):
return ('').join(sorted([x[0] for x in friends]))
| [
"[email protected]"
]
| |
3d3d447d47cf4de951fd26d005eab160fee57292 | 2baad3d7ac8df92ac7669bc5226b295f6e572109 | /openstack/horizon-mitaka/openstack_dashboard/dashboards/admin/networks/subnets/tests.py | 7fc01c9ac5f3a691c3aa99508ff482236d4349f6 | [
"Apache-2.0"
]
| permissive | naanal/reference | 649d61110c0200718de186012466183c35071399 | e5401ab6d185f9879da4b7fd20dead4823d662cc | refs/heads/master | 2020-04-12T07:33:59.989760 | 2016-12-21T12:26:30 | 2016-12-21T12:26:30 | 57,004,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,953 | py | # Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import tests
from openstack_dashboard.test import helpers as test
DETAIL_URL = 'horizon:admin:networks:subnets:detail'
NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index')
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkSubnetTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('network_get', 'subnet_get',)})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.MultipleTimes().AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
url = reverse(DETAIL_URL, args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse(DETAIL_URL, args=[subnet.id])
res = self.client.get(url)
redir_url = NETWORKS_INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
tenant_id=subnet.tenant_id)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet)
url = reverse('horizon:admin:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
# admin DetailView is shared with userpanel one, so
# redirection URL on error is userpanel index.
redir_url = reverse('horizon:project:networks:index')
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
tenant_id=subnet.tenant_id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = tests.form_data_subnet(
subnet, cidr=cidr, allocation_pools=[])
url = reverse('horizon:admin:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:admin:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:admin:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
| [
"[email protected]"
]
| |
930bdd6c664af3339a2e4cd163054d717ef73e87 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /hr_timesheet_invoice/report/account_analytic_profit.py | 600535ee02795acbe0674bc506d0626bbe7cc93d | []
| no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,780 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from report import report_sxw
import pooler
class account_analytic_profit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_profit, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'lines': self._lines,
'user_ids': self._user_ids,
'journal_ids': self._journal_ids,
'line': self._line,
})
def _user_ids(self, lines):
user_obj=pooler.get_pool(self.cr.dbname).get('res.users')
ids=list(set([b.user_id.id for b in lines]))
res=user_obj.browse(self.cr, self.uid, ids)
return res
def _journal_ids(self, form, user_id):
line_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.line')
journal_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.journal')
line_ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', '=', user_id),
])
ids=list(set([b.journal_id.id for b in line_obj.browse(self.cr, self.uid, line_ids)]))
res=journal_obj.browse(self.cr, self.uid, ids)
return res
def _line(self, form, journal_ids, user_ids):
pool=pooler.get_pool(self.cr.dbname)
line_obj=pool.get('account.analytic.line')
product_obj=pool.get('product.product')
price_obj=pool.get('product.pricelist')
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', journal_ids),
('user_id', 'in', user_ids),
])
res={}
for line in line_obj.browse(self.cr, self.uid, ids):
if line.account_id.pricelist_id:
if line.account_id.to_invoice:
if line.to_invoice:
id=line.to_invoice.id
name=line.to_invoice.name
discount=line.to_invoice.factor
else:
name="/"
discount=1.0
id = -1
else:
name="Fixed"
discount=0.0
id=0
pl=line.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], line.product_id.id, line.unit_amount or 1.0, line.account_id.partner_id.id)[pl]
else:
name="/"
discount=1.0
id = -1
price=0.0
if id not in res:
res[id]={'name': name, 'amount': 0, 'cost':0, 'unit_amount':0,'amount_th':0}
xxx = round(price * line.unit_amount * (1-(discount or 0.0)), 2)
res[id]['amount_th']+=xxx
if line.invoice_id:
self.cr.execute('select id from account_analytic_line where invoice_id=%s', (line.invoice_id.id,))
tot = 0
for lid in self.cr.fetchall():
lid2 = line_obj.browse(self.cr, self.uid, lid[0])
pl=lid2.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], lid2.product_id.id, lid2.unit_amount or 1.0, lid2.account_id.partner_id.id)[pl]
tot += price * lid2.unit_amount * (1-(discount or 0.0))
if tot:
procent = line.invoice_id.amount_untaxed / tot
res[id]['amount'] += xxx * procent
else:
res[id]['amount'] += xxx
else:
res[id]['amount'] += xxx
res[id]['cost']+=line.amount
res[id]['unit_amount']+=line.unit_amount
for id in res:
res[id]['profit']=res[id]['amount']+res[id]['cost']
res[id]['eff']='%d' % (-res[id]['amount'] / res[id]['cost'] * 100,)
return res.values()
def _lines(self, form):
line_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.line')
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', form['employee_ids'][0][2]),
])
res=line_obj.browse(self.cr, self.uid, ids)
return res
report_sxw.report_sxw('report.account.analytic.profit', 'account.analytic.line', 'addons/hr_timesheet_invoice/report/account_analytic_profit.rml', parser=account_analytic_profit)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
c682dbe0c08b2b8188a1f15f8be584ff2944f575 | 0810b308b09e6680b5df2b5f412494d07d02f181 | /1905/month01/code/day11/demo01.py | 6df18b52956cef2cc3a75f7c6d67874f3607f4cf | []
| no_license | 952033053/python3 | d323ecff1bcd208fc81b74e2ab7e0eb9ce31d514 | 29c8fb7f3ca90e18cce1f9a62a27415aac946c46 | refs/heads/master | 2020-06-21T18:19:55.610435 | 2019-07-18T02:57:31 | 2019-07-18T02:57:31 | 197,524,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | class Wife:
def __init__(self, name, age, weight):
self.name = name
# 本质:障眼法(实际将变量名改为:_类名__age)
# self.__age = age
self.set_age(age)
# self.__weight = weight
self.set_weight(weight)
# 提供公开的读写方法
def get_age(self):
return self.__age
def set_age(self, value):
if 21 <= value <= 31:
self.__age = value
else:
pass
# 提供公开的读写方法
def get_weight(self):
return self.__weight
def set_weight(self, value):
if 40 <= value <= 60:
self.__weight = value
else:
pass
w01 = Wife("铁锤公主", 20, 20)
# 重新创建了新实例变量(没有改变类中定义的__age)
# w01.__age = 107
w01._Wife__age = 20 # (修改了类中定义的私有变量)
print(w01.__dict__)# python内置变量,存储对象的实例变量.
w01 = Wife("铁锤公主", 30, 50)
w01.set_age(25)
w01.set_weight(55)
print(w01.get_age())
print(w01.get_weight())
# 练习:定义敌人类(姓名,攻击力10 -- 50,血量100 -- 200)
# 创建一个敌人对象,可以修改数据,读取数据。
| [
"[email protected]"
]
| |
c5e0b845eec88fe50d7ed7cfda31c0af3417e7a8 | e645ebf3b5177eb0ebedb7f239bd6e1b40bf1b07 | /ups/minuit2.cfg | 53e4d870fb2d8ee6bb0e4e0ceb063982e4b81138 | []
| no_license | lsst-dm/bp | e095cdb7412124fef39bdd8428fce70bbf0f462a | 31c0b65866d06a09575a53d0dd558320e6994a06 | refs/heads/main | 2023-07-22T11:32:48.479329 | 2023-07-10T00:30:32 | 2023-07-10T00:30:32 | 37,212,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,936 | cfg | # -*- python -*-
"""
Dependencies and configuration for minuit2
"""
import os.path
import eups
def _get_root():
"""Return the root directory of the package."""
return eups.productDir("minuit2")
dependencies = {
# Names of packages required to build against this package.
"required": [],
# Names of packages optionally setup when building against this package.
"optional": [],
# Names of packages required to build this package, but not required to build against it.
"buildRequired": [],
# Names of packages optionally setup when building this package, but not used in building against it.
"buildOptional": [],
}
def setup(conf, products, build=False):
"""
Update an SCons environment to make use of the package.
Arguments:
conf ------ An SCons Configure context. The SCons Environment conf.env should be updated
by the setup function.
products -- A dictionary consisting of all dependencies and the return values of calls to their
setup() functions, or None if the dependency was optional and was not found.
build ----- If True, this is the product currently being built, and products in "buildRequired" and
"buildOptional" dependencies will also be present in the products dict.
"""
conf.env.PrependUnique(**paths)
if not build:
conf.env.AppendUnique(**doxygen)
for target in libs:
if target not in conf.env.libs:
conf.env.libs[target] = lib[target].copy()
else:
for lib in libs[target]:
if lib not in conf.env.libs[target]:
conf.env.libs[target].append(lib)
return {"paths": paths, "doxygen": doxygen, "libs": libs, "extra": {}}
###################################################################################################
# Variables for default implementation of setup() below; if the user provides
# a custom implementation of setup(), everything below is unnecessary.
# Packages to be added to the environment.
paths = {
# Sequence of paths to add to the include path.
"CPPPATH": [os.path.join(_get_root(), "include")],
# Sequence of paths to add to the linker path.
"LIBPATH": [os.path.join(_get_root(), "lib")],
}
doxygen = {
# Sequence of Doxygen tag files produced by this product.
"DOXYGEN_TAGFILES": [],
# Sequence of Doxygen configuration files to include in dependent products.
"DOXYGEN_INCLUDES": [],
}
# Libraries provided by the package, not including standard library prefixes or suffixes.
# Additional custom targets besides the standard "main", "python", and "test" targets may
# be provided as well.
libs = {
# Normal libraries.
"main": ["Minuit2"],
# Libraries only linked with C++-coded Python modules.
"python": [],
# Libraries only linked with C++-coded unit tests.
"test": [],
}
| [
"[email protected]"
]
| |
34796e03ad42278148564162c60c8e9b9f5fc4b8 | 56c3cefe1da4731175ee73d90ca2629d79bfe696 | /egs/ptb_chime4test/local/run_trf_nce_cnn.py | 3e8eeb368c4f54c64b8acffe0aafefdd38e84a84 | []
| no_license | wbengine/TRF-NN-Tensorflow | 022a187c80c80293553958c17a267c7eaf81213f | e225829c36043293d092cf8ed620d6dce0abc8f0 | refs/heads/master | 2022-04-16T10:20:46.999159 | 2020-03-05T04:56:20 | 2020-03-05T04:56:20 | 114,067,559 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | import tensorflow as tf
import os
import sys
import time
import numpy as np
import task
from model import wblib as wb
from model import reader
from model import trfbase
from model import trfnce
from model import lstmlm
import run_lstmlm
# [data]
data = reader.Data().load_raw_data([task.train, task.valid, task.test],
add_beg_token='</s>', add_end_token='</s>')
# data.cut_train_to_length(50)
def create_name(config, q_config):
s = str(config)
if q_config is not None:
s += '_with_' + run_lstmlm.create_name(q_config)
# s += '_op%d' % config.noise_operation_num
# s += '_lstm'
# s += '_logz{}'.format(int(config.init_zeta[0]))
return s
def main(_):
config = trfnce.Config(data)
config.structure_type = 'cnn'
config.embedding_dim = 200
config.cnn_filters = [(i, 100) for i in range(1, 11)]
config.cnn_width = 3
config.cnn_layers = 3
config.cnn_hidden = 200
config.rnn_hidden_layers = 2
config.rnn_hidden_size = 200
config.rnn_predict = True
config.batch_size = 10
config.noise_factor = 10
config.noise_sampler = 'lstm:lstm/lstm_e200_h200x2/model.ckpt'
config.init_weight = 0.1
config.optimize_method = ['adam', 'adam']
config.lr_param = trfbase.LearningRateEpochDelay(0.001)
config.lr_zeta = trfbase.LearningRateEpochDelay(0.01)
config.max_epoch = 100
# config.dropout = 0.75
# config.init_zeta = config.get_initial_logz(20)
config.update_zeta = True
config.write_dbg = False
config.print()
# q_config = run_lstmlm.small_config(data)
q_config = None
name = create_name(config, q_config)
logdir = 'trf_nce/' + name
wb.mkdir(logdir, is_recreate=True)
sys.stdout = wb.std_log(os.path.join(logdir, 'trf.log'))
print(logdir)
data.write_vocab(logdir + '/vocab.txt')
data.write_data(data.datas[1], logdir + '/valid.id')
data.write_data(data.datas[2], logdir + '/test.id')
# wb.rmdir(logdirs)
with tf.Graph().as_default():
if q_config is None:
m = trfnce.TRF(config, data, logdir=logdir, device='/gpu:0')
else:
m = trfnce.TRF(config, data, logdir=logdir, device='/gpu:0',
q_model=lstmlm.LM(q_config, device='/gpu:0')
)
# s1 = trfnce.NoiseSamplerNgram(config, data, 2)
# s2 = trfnce.NoiseSamplerLSTMEval(config, data, config.noise_sampler.split(':')[-1])
sv = tf.train.Supervisor(logdir=os.path.join(logdir, 'logs'),
global_step=m.train_net.global_step)
sv.summary_writer.add_graph(tf.get_default_graph()) # write the graph to logs
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_config.gpu_options.allow_growth = True
with sv.managed_session(config=session_config) as session:
with session.as_default():
if m.q_model is not None:
print('load lstmlm for q model')
m.q_model.restore(session, './lstm/' + run_lstmlm.create_name(q_config) + '/model.ckpt')
m.train(sv, session,
print_per_epoch=0.1,
operation=task.Ops(m),
)
if __name__ == '__main__':
tf.app.run(main=main)
| [
"[email protected]"
]
| |
cad49a464e253ae9342c164c950fd6c0ec78bdcf | d5a32e532fe231c16e52149604f0db34c5f4d2f9 | /binarysearch.io/sum_of_the_deepest_node.py | a7848ee9b46081b4f3607498a2a3079159af306e | [
"MIT"
]
| permissive | mishrakeshav/Competitive-Programming | 93705f63337639e8464c1d50f3394434b7422f15 | 00c1bd272646754ca4c260d57989304c8e323838 | refs/heads/master | 2023-07-06T07:32:23.042324 | 2023-06-29T15:27:24 | 2023-06-29T15:27:24 | 216,195,590 | 3 | 3 | MIT | 2020-10-03T07:55:18 | 2019-10-19T11:27:53 | Python | UTF-8 | Python | false | false | 1,288 | py | # class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def solve(self, root):
# Write your code here
nodeLevel = {}
def deepest(root,level=0):
if root is None:
return
else:
if level not in nodeLevel:
nodeLevel[level] = []
nodeLevel[level].append(root.val)
deepest(root.left,level+1)
deepest(root.right, level+1)
deepest(root,0)
return sum(nodeLevel[max(nodeLevel)])
class Solution:
def solve(self, root):
waiting = [root]
while(waiting):
newWaiting = []
possAns = 0
for node in waiting:
possAns += node.val
if node.left:
newWaiting.append(node.left)
if node.right:
newWaiting.append(node.right)
if not newWaiting:
return possAns
else:
waiting = newWaiting
| [
"[email protected]"
]
| |
cefa36b71fd4da6c8b37f32c155e0eb34813882b | 0296bc69a0d9608ed826ad7a719395f019df098f | /Tools/semantic_check.py | 7bbe1e264b5436b25ade1ce79adfe0c38466b046 | []
| no_license | jcn16/Blender_HDRmap_render | c0486a77e04c5b41a6f75f123dbdb3d10c682367 | 50e6cdb79fef83081de9830e7105dd425a235a9e | refs/heads/main | 2023-07-19T22:22:53.622052 | 2021-08-20T06:29:10 | 2021-08-20T06:29:10 | 377,757,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | import os
from tqdm import tqdm
'''
check if semantic_mask is complete
'''
root='/media/jcn/新加卷/JCN/JCN_test_datset/Train_512'
child_dirs=os.listdir(root)
child_dirs.sort()
pbar=tqdm(total=len(child_dirs))
for model in child_dirs:
pbar.update(1)
sub_dirs=os.listdir(os.path.join(root,model))
sub_dirs.remove('prt')
sub_dirs.remove('GEO')
sub_dirs.sort()
for dir in sub_dirs:
src=os.path.join(root,model,dir,'semantic_mask.png')
if os.path.exists(src):
continue
else:
print(src)
| [
"[email protected]"
]
| |
5d2e5134e1095e1fd5b25e03a0582d9165899207 | f0e048b2398b42a3c3ec42925ab75f754cd8d214 | /configs/RAChallenge/s2anet_r101_fpn_1x_ms_ra.py | a06a8e7cd56271360518b99aafbdbfc70973c468 | []
| no_license | myknowntime/RIDet | c56535f52ccf76e41bd181faf2bceb2f0e8fbd57 | 96bee9a7089a267855d494fbf9d2f2f78064c54e | refs/heads/master | 2023-08-14T23:46:32.849835 | 2021-10-06T14:29:31 | 2021-10-06T14:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,958 | py | # fp16 settings
# fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='S2ANetDetector',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
rbox_head=dict(
type='S2ANetHead',
num_classes=6,
in_channels=256,
feat_channels=256,
stacked_convs=2,
align_conv_type='AlignConv',#[AlignConv,DCN,GA_DCN]
align_conv_size=3,
with_orconv=True,
anchor_ratios=[1.0],
anchor_strides=[8, 16, 32, 64, 128],
anchor_scales=[4],
target_means=[.0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0],
loss_fam_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=5.0), # loss权重修改 从1到5
loss_fam_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_odm_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=5.0), # loss权重修改 从1到5
loss_odm_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
fam_cfg=dict(
anchor_target_type='hbb_obb_rbox_overlap',
assigner=dict(
type='MaxIoUAssignerRbox',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
odm_cfg=dict(
anchor_target_type='obb_obb_rbox_overlap',
anchor_inside_type='center',
assigner=dict(
type='MaxIoUAssignerRbox',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.15,
nms=dict(type='nms_rotated', iou_thr=0.1),
max_per_img=2000)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
# dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
dict(type='RotatedRandomFlip', flip_ratio=0),
# dict(type='RandomRotate', rate=0.5, angles=[30, 60, 90, 120, 150], auto_bound=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
# dict(type='RotatedRandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# dataset settings
dataset_type = 'RAChallengeDataset'
data_root = 'data/RAChallenge/stage1/train/' # train无augmentation
warmup_data_root = 'data/RAChallenge/warmup/' # warmup数据无augmentation
test_root = 'data/RAChallenge/stage1/'
stage2_test_root = 'data/RAChallenge/stage2/'
all_data_root = 'data/RAChallenge/stage1/all_data_augment/' # train_aug + warmup_aug
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
# train no aug
train=dict(
type=dataset_type,
ann_file=all_data_root + 'train.json',
img_prefix=all_data_root + 'images/',
pipeline=train_pipeline),
# # train with aug
# train=dict(
# type=dataset_type,
# ann_file=aug_data_root + 'train.json',
# img_prefix=aug_data_root + 'images/',
# pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'trainval_split/trainval.json',
img_prefix=data_root + 'trainval_split/images/',
pipeline=test_pipeline),
# submission
test=dict(
type=dataset_type,
ann_file=stage2_test_root + 'test.json',
img_prefix=stage2_test_root + 'test2/',
pipeline=test_pipeline)
# # evalloss_fam_cls: 1.0105,
# test=dict(
# type=dataset_type,
# ann_file=warmup_data_root + 'train.json',
# img_prefix=warmup_data_root + 'images/',
# pipeline=test_pipeline)
)
# optimizer
# optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001)
optimizer = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.00005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[6, 10])
# step=[8, 16, 20])
# step=[12, 24, 36, 48])
checkpoint_config = dict(interval=2)
# yapf:disable
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'work_dirs/s2anet_r101_fpn_1024_ms_ra/'
load_from = 'work_dirs/s2anet_r101_fpn_1024_ms_ra/70_10-15.pth'
# load_from = None
resume_from = None
workflow = [('train', 1)] | [
"[email protected]"
]
| |
6d1a989bb6d0863e4392b6b5982cb9a5e2b1b642 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano993.py | 6a4a6c9844658758121f01dd960432ed50dbceed | []
| no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/61C328F6-6BDC-FB4F-A3E2-4BF07EBED5FA.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest993.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
]
| |
87c6b732826010a09b36dc58caec09f610519427 | 8d946e49d0e9c5e038b6dd5fdfc11c72f64470f9 | /instagram/urls.py | f97803739aafc2feba7d773b1e1fc52f1f78a5e7 | [
"MIT"
]
| permissive | gabyxbinnaeah/TwinterApp | bfc955fdf529b5ecce89f62ab6bd4f8ecf9e461e | a0f68527a3e01cd47e49f9a17988ec5095422695 | refs/heads/master | 2023-06-16T01:07:43.531740 | 2021-07-14T08:37:50 | 2021-07-14T08:37:50 | 384,447,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | """instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('chat.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/register/',RegistrationView.as_view(success_url='/'),name='django_registration_register'),
]
| [
"[email protected]"
]
| |
e6f6045cc2fb7e9d2b61ded3d712cc41bf1bd78b | c6e5d5ff2ee796fd42d7895edd86a49144998067 | /platform/core-scheduler/polyaxon/polyconf/wsgi.py | 752f03d7945e907b86cc6786cbdc1116ab7a7e94 | [
"Apache-2.0"
]
| permissive | zeyaddeeb/polyaxon | f4481059f93d8b70fb3d41840a244cd9aaa871e0 | 1f2b236f3ef36cf2aec4ad9ec78520dcc9ef4ee5 | refs/heads/master | 2023-01-19T05:15:34.334784 | 2020-11-27T17:08:35 | 2020-11-27T17:08:35 | 297,410,504 | 0 | 0 | Apache-2.0 | 2020-09-21T17:20:27 | 2020-09-21T17:20:26 | null | UTF-8 | Python | false | false | 995 | py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI config for search project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polyconf.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
9e9a78685cf9219df7db2543b3cb31fc4e86d42d | 4524c4940d7fa830c23e4dc8e1144d5eec74365b | /ex15.py | 75842d887ad14a1f8b08026f86360c2596f8855c | []
| no_license | AmyShackles/LearnPython3TheHardWay | ef493209a181f62bfa45ff3ec456ae0fd2c3e8a9 | 4e175d58dfe8c7295ebfbee3947e944b35e52f8c | refs/heads/master | 2020-03-23T03:49:53.052976 | 2018-07-27T21:14:30 | 2018-07-27T21:14:30 | 141,051,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | from sys import argv
# assigns arg[0] to the variable script and argv[1] to the variable filename
script, filename = argv
# assigns the function for opening the file to the variable txt
txt = open(filename)
# prints the string 'Here is your {value of variable filename}'
print(f"Here's your file {filename}:")
# prints the output of invoking read on open(filename)
print(txt.read())
# prints the string 'Type the filename again:'
print("Type the filename again:")
# asks the user for input (the filename)
file_again = input("> ")
# takes the user input and calls the function open on it and assigns it to the variable txt_again
txt_again = open(file_again)
# prints the output of invoking read on open(file_again)
print(txt_again.read())
| [
"[email protected]"
]
| |
0cdac74013d1815fdcf40dc9165e35d850ef2673 | 7b252f0c1b8ba7c9a35ead166482efbb4d804413 | /mysite/books/views.py | aad9117e9fbeb9c02c506f754902dff380645397 | []
| no_license | gzpgg3x/PythonExample | 191024f04796a13106b46f4f00a59185c33af91b | c64563f91cd5188b6d3d01688d8184a37ded46eb | refs/heads/master | 2021-01-10T19:38:53.325169 | 2013-04-11T22:36:29 | 2013-04-11T22:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,682 | py | # Create your views here.
# from django.shortcuts import render_to_response
# from django.http import Http404, HttpResponse
# def search_form(request):
# return render_to_response('search_form.html')
# def search(request):
# if 'q' in request.GET:
# message = 'You searched for: %r' % request.GET['q']
# else:
# message = 'You submitted an empty form.'
# return HttpResponse(message)
# from django.http import HttpResponse
# from django.shortcuts import render_to_response
# from books.models import Book
# def search_form(request):
# return render_to_response('search_form.html')
# def search(request):
# if 'q' in request.GET and request.GET['q']:
# q = request.GET['q']
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# else:
# # return HttpResponse('Please submit a search term.')
# return render_to_response('search_form.html', {'error': True})
# def search(request):
# error = False
# if 'q' in request.GET:
# q = request.GET['q']
# if not q:
# error = True
# elif len(q) > 20:
# error = True
# else:
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# return render_to_response('search_form.html',
# {'error': error})
# def search(request):
# errors = []
# if 'q' in request.GET:
# q = request.GET['q']
# if not q:
# errors.append('Enter a search term.')
# elif len(q) > 20:
# errors.append('Please enter at most 20 characters.')
# else:
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# return render_to_response('search_form.html',
# {'errors': errors})
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from books.forms import ContactForm
def search(request):
errors = []
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append('Enter a search term.')
elif len(q) > 20:
errors.append('Please enter at most 20 characters.')
else:
books = book.objects.filter(title__icontains=q)
return render_to_response('search_results.html',
{'books': books, 'query': q})
return render_to_response('search_form.html',
{'errors': errors})
# def contact(request):
# errors = []
# if request.method == 'POST':
# if not request.POST.get('subject', ''):
# errors.append('Enter a subject.')
# if not request.POST.get('message', ''):
# errors.append('Enter a message.')
# if request.POST.get('email') and '@' not in request.POST['email']:
# errors.append('Enter a valid e-mail address.')
# if not errors:
# send_mail(
# request.POST['subject'],
# request.POST['message'],
# request.POST.get('email', '[email protected]'),
# ['[email protected]'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# return render_to_response('contact_form.html',
# {'errors': errors})
# def contact(request):
# errors = []
# if request.method == 'POST':
# if not request.POST.get('subject', ''):
# errors.append('Enter a subject.')
# if not request.POST.get('message', ''):
# errors.append('Enter a message.')
# if request.POST.get('email') and '@' not in request.POST['email']:
# errors.append('Enter a valid e-mail address.')
# if not errors:
# send_mail(
# request.POST['subject'],
# request.POST['message'],
# request.POST.get('email', '[email protected]'),
# ['[email protected]'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# return render_to_response('contact_form.html', {
# 'errors': errors,
# 'subject': request.POST.get('subject', ''),
# 'message': request.POST.get('message', ''),
# 'email': request.POST.get('email', ''),
# })
# def contact(request):
# if request.method == 'POST':
# form = ContactForm(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# send_mail(
# cd['subject'],
# cd['message'],
# cd.get('email', '[email protected]'),
# ['[email protected]'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# else:
# form = ContactForm()
# return render_to_response('contact_form.html', {'form': form})
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
send_mail(
cd['subject'],
cd['message'],
cd.get('email', '[email protected]'),
['[email protected]'],
)
return HttpResponseRedirect('/contact/thanks/')
else:
form = ContactForm(
initial={'subject': 'I love your site!'}
)
return render_to_response('contact_form.html', {'form': form}) | [
"[email protected]"
]
| |
04d9ad1e786f05e26021f6185659f3aae41db9f9 | ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7 | /tests2/tests/fuji/test_sensor.py | 1d54acae68dd7d60074e321cc3d4aa4b3266471c | []
| no_license | facebook/openbmc | bef10604ced226288600f55248b7f1be9945aea4 | 32777c66a8410d767eae15baabf71c61a0bef13c | refs/heads/helium | 2023-08-17T03:13:54.729494 | 2023-08-16T23:24:18 | 2023-08-16T23:24:18 | 31,917,712 | 684 | 331 | null | 2023-07-25T21:19:08 | 2015-03-09T19:18:35 | C | UTF-8 | Python | false | false | 13,978 | py | #!/usr/bin/env python3
#
# Copyright 2020-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import os
import unittest
from abc import abstractmethod
from common.base_sensor_test import SensorUtilTest
from tests.fuji.helper.libpal import pal_is_fru_prsnt, pal_get_fru_id
from tests.fuji.test_data.sensors.sensor import (
PIM1_SENSORS_16Q,
PIM1_SENSORS_16O,
PIM2_SENSORS_16Q,
PIM2_SENSORS_16O,
PIM3_SENSORS_16Q,
PIM3_SENSORS_16O,
PIM4_SENSORS_16Q,
PIM4_SENSORS_16O,
PIM5_SENSORS_16Q,
PIM5_SENSORS_16O,
PIM6_SENSORS_16Q,
PIM6_SENSORS_16O,
PIM7_SENSORS_16Q,
PIM7_SENSORS_16O,
PIM8_SENSORS_16Q,
PIM8_SENSORS_16O,
PSU1_SENSORS,
PSU2_SENSORS,
PSU3_SENSORS,
PSU4_SENSORS,
SCM_SENSORS,
SMB_SENSORS,
)
from utils.test_utils import qemu_check
@unittest.skipIf(qemu_check(), "test env is QEMU, skipped")
class ScmSensorTest(SensorUtilTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util scm"]
def test_scm_sensor_keys(self):
result = self.get_parsed_result()
for key in SCM_SENSORS:
with self.subTest(key=key):
self.assertIn(
key, result.keys(), "Missing key {} in scm sensor data".format(key)
)
def test_scm_temp_sensor_range(self):
result = self.get_parsed_result()
SCM_TEMP_KEYS = [
"SCM_OUTLET_U7_TEMP",
"SCM_INLET_U8_TEMP",
"BMC_LM75_U9_TEMP",
"MB_OUTLET_TEMP",
"MB_INLET_TEMP",
"PCH_TEMP",
"VCCIN_VR_TEMP",
"1V05MIX_VR_TEMP",
"SOC_TEMP",
"VDDR_VR_TEMP",
"SOC_DIMMA0_TEMP",
"SOC_DIMMB0_TEMP",
]
for key in SCM_TEMP_KEYS:
with self.subTest(key=key):
value = result[key]
self.assertAlmostEqual(
float(value),
30,
delta=20,
msg="Sensor={} reported value={} not within range".format(
key, value
),
)
@unittest.skipIf(qemu_check(), "test env is QEMU, skipped")
class PimSensorTest(SensorUtilTest, unittest.TestCase):
@abstractmethod
def get_pim_sensors(self):
self._pim_id = 0
pass
def get_pim_temp_keys(self):
PIM_TEMP_KEYS = []
PIM_TEMP_KEYS.append("PIM{}_LM75_U37_TEMP_BASE".format(self._pim_id))
PIM_TEMP_KEYS.append("PIM{}_LM75_U26_TEMP".format(self._pim_id))
PIM_TEMP_KEYS.append("PIM{}_LM75_U37_TEMP_MEZZ".format(self._pim_id))
# PIM_TEMP_KEYS.append("PIM{}_QSFP_TEMP".format(self._pim_id))
return PIM_TEMP_KEYS
def base_test_pim_sensor_keys(self):
self.set_sensors_cmd()
if not pal_is_fru_prsnt(pal_get_fru_id("pim{}".format(self._pim_id))):
self.skipTest("pim{} is not present".format(self._pim_id))
result = self.get_parsed_result()
for key in self.get_pim_sensors():
with self.subTest(key=key):
self.assertIn(
key,
result.keys(),
"Missing key {} in pim{} sensor data".format(key, self._pim_id),
)
def base_test_pim_temp_sensor_range(self):
self.set_sensors_cmd()
if not pal_is_fru_prsnt(pal_get_fru_id("pim{}".format(self._pim_id))):
self.skipTest("pim{} is not present".format(self._pim_id))
result = self.get_parsed_result()
PIM_TEMP_KEYS = self.get_pim_temp_keys()
for key in PIM_TEMP_KEYS:
with self.subTest(key=key):
value = result[key]
self.assertAlmostEqual(
float(value),
30,
delta=20,
msg="Sensor={} reported value={} not within range".format(
key, value
),
)
def get_pim_name(self, ver):
""" """
pim_name = None
ver = int(ver, 16)
if ver & 0x80 == 0x0:
pim_name = "PIM_TYPE_16Q"
elif ver & 0x80 == 0x80:
pim_name = "PIM_TYPE_16O"
else:
pim_name = None
return pim_name
def get_pim_sensor_type(self, pim_num):
"""
Get PIM sensors type by read i2c device board version
"""
pim_sensor_type = None
bus = (pim_num * 8) + 80
PATH = "/sys/bus/i2c/devices/%d-0060/board_ver" % (bus)
if not os.path.exists(PATH):
raise Exception("Path for PIM board_ver doesn't exist")
with open(PATH, "r") as fp:
line = fp.readline()
if line:
ver = line.split("0x")[1]
pim_sensor_type = self.get_pim_name(ver)
else:
raise Exception("PIM board_ver is empty")
return pim_sensor_type
class Pim1SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim1")):
self.skipTest("pim1 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim1"]
self._pim_id = 1
def get_pim_sensors(self):
name = self.get_pim_sensor_type(0)
if name == "PIM_TYPE_16Q":
return PIM1_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM1_SENSORS_16O
else:
return PIM1_SENSORS_16Q
def test_pim1_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim1_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim2SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim2")):
self.skipTest("pim2 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim2"]
self._pim_id = 2
def get_pim_sensors(self):
name = self.get_pim_sensor_type(1)
if name == "PIM_TYPE_16Q":
return PIM2_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM2_SENSORS_16O
else:
return PIM2_SENSORS_16Q
def test_pim2_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim2_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim3SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim3")):
self.skipTest("pim3 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim3"]
self._pim_id = 3
def get_pim_sensors(self):
name = self.get_pim_sensor_type(2)
if name == "PIM_TYPE_16Q":
return PIM3_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM3_SENSORS_16O
else:
return PIM3_SENSORS_16Q
def test_pim3_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim3_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim4SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim4")):
self.skipTest("pim4 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim4"]
self._pim_id = 4
def get_pim_sensors(self):
name = self.get_pim_sensor_type(3)
if name == "PIM_TYPE_16Q":
return PIM4_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM4_SENSORS_16O
else:
return PIM4_SENSORS_16Q
def test_pim4_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim4_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim5SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim5")):
self.skipTest("pim5 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim5"]
self._pim_id = 5
def get_pim_sensors(self):
name = self.get_pim_sensor_type(4)
if name == "PIM_TYPE_16Q":
return PIM5_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM5_SENSORS_16O
else:
return PIM5_SENSORS_16Q
def test_pim5_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim5_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim6SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim6")):
self.skipTest("pim6 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim6"]
self._pim_id = 6
def get_pim_sensors(self):
name = self.get_pim_sensor_type(5)
if name == "PIM_TYPE_16Q":
return PIM6_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM6_SENSORS_16O
else:
return PIM6_SENSORS_16Q
def test_pim6_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim6_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim7SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim7")):
self.skipTest("pim7 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim7"]
self._pim_id = 7
def get_pim_sensors(self):
name = self.get_pim_sensor_type(6)
if name == "PIM_TYPE_16Q":
return PIM7_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM7_SENSORS_16O
else:
return PIM7_SENSORS_16Q
def test_pim7_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim7_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
class Pim8SensorTest(PimSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
if not pal_is_fru_prsnt(pal_get_fru_id("pim8")):
self.skipTest("pim8 is not present")
self.sensors_cmd = ["/usr/local/bin/sensor-util pim8"]
self._pim_id = 8
def get_pim_sensors(self):
name = self.get_pim_sensor_type(7)
if name == "PIM_TYPE_16Q":
return PIM8_SENSORS_16Q
elif name == "PIM_TYPE_16O":
return PIM8_SENSORS_16O
else:
return PIM8_SENSORS_16Q
def test_pim8_sensor_keys(self):
super().base_test_pim_sensor_keys()
def test_pim8_temp_sensor_range(self):
super().base_test_pim_temp_sensor_range()
@unittest.skipIf(qemu_check(), "test env is QEMU, skipped")
class PsuSensorTest(SensorUtilTest, unittest.TestCase):
@abstractmethod
def get_psu_sensors(self):
self._psu_id = 0
pass
def base_test_psu_sensor_keys(self):
self.set_sensors_cmd()
if not pal_is_fru_prsnt(pal_get_fru_id("psu{}".format(self._psu_id))):
self.skipTest("psu{} is not present".format(self._psu_id))
result = self.get_parsed_result()
for key in self.get_psu_sensors():
with self.subTest(key=key):
self.assertIn(
key,
result.keys(),
"Missing key {} in psu{} sensor data".format(key, self._psu_id),
)
class Psu1SensorTest(PsuSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util psu1"]
self._psu_id = 1
def get_psu_sensors(self):
return PSU1_SENSORS
def test_psu1_sensor_keys(self):
super().base_test_psu_sensor_keys()
class Psu2SensorTest(PsuSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util psu2"]
self._psu_id = 2
def get_psu_sensors(self):
return PSU2_SENSORS
def test_psu2_sensor_keys(self):
super().base_test_psu_sensor_keys()
class Psu3SensorTest(PsuSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util psu3"]
self._psu_id = 3
def get_psu_sensors(self):
return PSU3_SENSORS
def test_psu3_sensor_keys(self):
super().base_test_psu_sensor_keys()
class Psu4SensorTest(PsuSensorTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util psu4"]
self._psu_id = 4
def get_psu_sensors(self):
return PSU4_SENSORS
def test_psu4_sensor_keys(self):
super().base_test_psu_sensor_keys()
class SmbSensorTest(SensorUtilTest, unittest.TestCase):
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util smb"]
def test_smb_sensor_keys(self):
result = self.get_parsed_result()
for key in SMB_SENSORS:
with self.subTest(key=key):
self.assertIn(
key, result.keys(), "Missing key {} in SMB sensor data".format(key)
)
| [
"[email protected]"
]
| |
3805406d7d67e5a498dfff6b970543445e2a268e | 1fb55ab2c082348eb51263357563d20e1fd50b7d | /commons/c2cgeoportal_commons/alembic/main/29f2a32859ec_merge_1_6_and_master_branches.py | e92c014cbf7e91b379db31f2314f86152fee5f02 | [
"BSD-2-Clause-Views"
]
| permissive | nstoykov/c2cgeoportal | 40876bf577cc2ed1877affa9f307acef94d86daa | 42c3aab09e0c44a20d0162a85c51c6a9ca0ff95e | refs/heads/master | 2020-12-06T03:27:00.330795 | 2020-01-07T09:25:07 | 2020-01-07T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Merge 1.6 and master branches
Revision ID: 29f2a32859ec
Revises: ('22e6dfb556de', '116b9b79fc4d')
Create Date: 2015-12-16 14:10:56.704614
"""
# revision identifiers, used by Alembic.
revision = "29f2a32859ec"
down_revision = ("22e6dfb556de", "116b9b79fc4d")
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| [
"[email protected]"
]
| |
d2f817ce547020deb24980787d61a4775fe21557 | 0f6f95af209ff9192702c2176c4513cb28929ba5 | /syd/commands/base.py | ff759bd373fea19e05e2ad6b670aa903bdbfd1e8 | []
| no_license | SD2E/aliases-cli | 87a03f83cbbed5f5860e77457718f7eb6121a311 | c634012a2623b975b8eeb6e210fabe51fe53a6ab | refs/heads/master | 2020-03-10T19:33:47.852609 | 2018-04-18T22:27:40 | 2018-04-18T22:27:40 | 129,550,330 | 1 | 0 | null | 2018-04-17T19:55:08 | 2018-04-14T20:01:52 | Python | UTF-8 | Python | false | false | 425 | py | """The base command."""
from agavepy.agave import Agave
from .reactors import alias
class Base(object):
"""A base command."""
def __init__(self, options, *args, **kwargs):
self.options = options
self.args = args
self.kwargs = kwargs
self.store = alias.AliasStore(Agave.restore())
def run(self):
raise NotImplementedError('You must implement the run() method yourself!')
| [
"[email protected]"
]
| |
63df08aefaa3c1c7cab07d65e38a0de2816880ca | a0801d0e7325b31f0383fc68517e208680bb36d6 | /Kattis/commercials.py | fe78e87a705a79461a41b0c9e1c0aa6f1c6b0f44 | []
| no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | n, p = map(int, input().split())
vs = list(map(int, input().split()))
for i in range(n):
vs[i] -= p
mx = -10e8
mxh = 0
for i in range(n):
mxh = mxh + vs[i]
mx = max(mx, mxh)
mxh = max(mxh, 0)
print(mx)
| [
"[email protected]"
]
| |
52bac37037d550c2a2aae038c7e551a45f41832d | 91da8a59561d6f2c7852c0548298434e0ede2ac7 | /Linked list/sort_a_linkedList.py | a0ff4d4ccaf23fb3e9297409fbd6d52413ca3256 | []
| no_license | prashant97sikarwar/leetcode | 6d3828772cc426ccf53dad07edb1efbc2f1e1ded | e76054e27a5d4493bd1bcef2ebdeb21d257afb63 | refs/heads/master | 2023-08-23T05:06:23.181869 | 2021-10-28T18:19:10 | 2021-10-28T18:19:10 | 286,057,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | """Sort a linked list in O(n log n) time using constant space complexity."""
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
middle = self.findMiddle(head)
nextToMiddle = middle.next
middle.next = None
left = self.sortList(head)
right = self.sortList(nextToMiddle)
sortedList = self.finalMergeSort(left, right)
return sortedList
def findMiddle(self, node):
if node is None or node.next is None:
return node
slow = node
fast = node
while (fast.next != None and fast.next.next != None):
slow = slow.next
fast = fast.next.next
return slow
def finalMergeSort(self, a, b):
result = None
if a == None:
return b
if b == None:
return a
if a.val <= b.val:
result = a
result.next = self.finalMergeSort(a.next, b)
else:
result = b
result.next = self.finalMergeSort(a, b.next)
return result | [
"[email protected]"
]
| |
e5755a3d897e49d3a0ef501d254813c4afb0c40e | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/bgp/ctxafdef.py | d649b8e14aa620c77367e3c7b8b845e9914a019b | []
| no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 6,835 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class CtxAfDef(Mo):
"""
The BGP address family context definition.
"""
meta = ClassMeta("cobra.model.bgp.CtxAfDef")
meta.moClassName = "bgpCtxAfDef"
meta.rnFormat = "bgpCtxAfP-%(af)s"
meta.category = MoCategory.REGULAR
meta.label = "Address Family Context Definition"
meta.writeAccessMask = 0x20000001
meta.readAccessMask = 0x20000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.parentClasses.add("cobra.model.fv.BrEpP")
meta.superClasses.add("cobra.model.bgp.ACtxAfPol")
meta.superClasses.add("cobra.model.fabric.L3CtxPol")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoDomPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L3DomPol")
meta.rnPrefixes = [
('bgpCtxAfP-', True),
]
prop = PropMeta("str", "af", "af", 17566, PropCategory.REGULAR)
prop.label = "Address Family"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "ipv4-ucast"
prop._addConstant("ipv4-ucast", "ipv4-unicast-address-family", 1)
prop._addConstant("ipv6-ucast", "ipv6-unicast-address-family", 3)
prop._addConstant("vpnv4-ucast", "vpnv4-unicast-address-family", 2)
prop._addConstant("vpnv6-ucast", "vpnv6-unicast-address-family", 4)
meta.props.add("af", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "eDist", "eDist", 17563, PropCategory.REGULAR)
prop.label = "eBGP Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 20
prop.defaultValueStr = "20"
meta.props.add("eDist", prop)
prop = PropMeta("str", "iDist", "iDist", 17564, PropCategory.REGULAR)
prop.label = "iBGP Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 200
prop.defaultValueStr = "200"
meta.props.add("iDist", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "localDist", "localDist", 17565, PropCategory.REGULAR)
prop.label = "Local Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 220
prop.defaultValueStr = "220"
meta.props.add("localDist", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "af"))
def __init__(self, parentMoOrDn, af, markDirty=True, **creationProps):
namingVals = [af]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
ba390262491d2b85c1a086e78fde731a173b1ef6 | 4cf3f8845d64ed31737bd7795581753c6e682922 | /.history/main_20200118152650.py | 71f2c0cb3e667e65dce164fea2e7b4e149d01c27 | []
| no_license | rtshkmr/hack-roll | 9bc75175eb9746b79ff0dfa9307b32cfd1417029 | 3ea480a8bf6d0067155b279740b4edc1673f406d | refs/heads/master | 2021-12-23T12:26:56.642705 | 2020-01-19T04:26:39 | 2020-01-19T04:26:39 | 234,702,684 | 1 | 0 | null | 2021-12-13T20:30:54 | 2020-01-18T08:12:52 | Python | UTF-8 | Python | false | false | 82,147 | py | from telegram.ext import Updater, CommandHandler
import requests
import re
# API call to source, get json (url is obtained):
contents = requests.get('https://random.dog/woof.json').json()
image_url = contents['url']
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
197d249f49a3bf0f4bbe8e5c1e093ff2fd5d13c1 | 6f23adb3da803dda89e21cfa21a024a015ec1710 | /2020/16-2.py | 8883ed2bc121687d8e600f19d5385f5a9769ba9f | []
| no_license | Remboooo/adventofcode | 1478252bcb19c0dd19e4fa2effd355ee71a5d349 | 5647b8eddd0a3c7781a9c21019f6f06f6edc09bd | refs/heads/master | 2022-12-15T10:21:29.219459 | 2022-12-13T23:02:03 | 2022-12-13T23:02:03 | 226,883,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | from argparse import ArgumentParser
from collections import defaultdict
from functools import reduce
from itertools import islice, count
from pprint import pprint
from util import timed
def parse_rules(f):
rules = {}
for line in f:
line = line.strip()
if line == "":
break
name, values = line.split(": ", 1)
rules[name] = [tuple(int(v) for v in r.split("-", 1)) for r in values.split(" or ")]
return rules
def parse_my_ticket(f):
if f.readline().strip() != "your ticket:":
raise ValueError("First line was not 'your ticket:'")
result = tuple(int(field) for field in f.readline().split(','))
f.readline()
return result
def parse_tickets(f):
if f.readline().strip() != "nearby tickets:":
raise ValueError("First line was not 'nearby tickets:'")
for line in f:
line = line.strip()
yield tuple(int(field) for field in line.split(','))
@timed
def get_valid_tickets(nearby_tickets, rules):
# A ticket is valid if *all* fields match *any* of the rules
return [
ticket for ticket in nearby_tickets
if all(any(rlow <= field <= rhigh for rule in rules.values() for rlow, rhigh in rule) for field in ticket)
]
@timed
def find_field_ids(nearby_tickets, rules):
field_ids = {}
# For every field name in the rulebook, check which field IDs match its rules on all of the valid tickets
for field_name, rule in rules.items():
# Start by considering every possible field ID for this name
possible_ids = set(range(len(nearby_tickets[0])))
for ticket in nearby_tickets:
# Prune the possible IDs for this field name by checking which field IDs match its rules on this ticket
possible_ids &= {n for n, field in enumerate(ticket) if any(rlow <= field <= rhigh for rlow, rhigh in rule)}
field_ids[field_name] = possible_ids
# Some fields still have multiple possibilities after checking all of the tickets, but then others only have one,
# so there's some overlap and we can eliminate the ambiguities.
# I'm 99% sure this will not work in all possible cases, but it works for the test input and my puzzle input 🤷🏻
field_ids = {
name: next(
fid for fid in pid
if not any(
# if there's another field with a shorter list of ID options that also contains this ID, skip it
name != oname and len(opid) < len(pid) and fid in opid
for oname, opid in field_ids.items()
)
)
for name, pid in field_ids.items()
}
return field_ids
def main():
argparse = ArgumentParser()
argparse.add_argument("file", nargs='?', type=str, default="16-input.txt")
args = argparse.parse_args()
with open(args.file, 'r') as f:
rules = parse_rules(f)
my_ticket = parse_my_ticket(f)
nearby_tickets = list(parse_tickets(f))
nearby_tickets = get_valid_tickets(nearby_tickets, rules)
field_ids = find_field_ids(nearby_tickets, rules)
print(
reduce(lambda a, b: a * b, (my_ticket[fid] for name, fid in field_ids.items() if name.startswith('departure')))
)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
d9488e55773f53b084a0d709450f00dfefd69089 | 299fe2ca879e509798e95c00b7ba33914031f4a7 | /eruditio/shared_apps/django_wizard/wizard.py | dbfab0dcfbd21b9bc03b532bad0f2afd2d52e2e6 | [
"MIT"
]
| permissive | genghisu/eruditio | dcf2390c98d5d1a7c1044a9221bf319cb7d1f0f6 | 5f8f3b682ac28fd3f464e7a993c3988c1a49eb02 | refs/heads/master | 2021-01-10T11:15:28.230527 | 2010-04-23T21:13:01 | 2010-04-23T21:13:01 | 50,865,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | from django_wizard.models import ConfigOption, ConfigFixture, DefinedConfigOption
from django.core.exceptions import ObjectDoesNotExist
class ConfigIndex(object):
def __init__(self):
self._registry = {}
def register(self, configuration):
if configuration.__class__ == ConfigOption and not (configuration.app, configuration.name) in self._registry:
try:
existing_config = ConfigOption.objects.get(app = configuration.app, name = configuration.name)
except ObjectDoesNotExist:
configuration.save()
existing_config = configuration
try:
defined_config = DefinedConfigOption.objects.get(option__name = configuration.name, option__app = configuration.app)
except ObjectDoesNotExist:
defined_config = DefinedConfigOption(option = existing_config, value = configuration.default)
defined_config.save()
self._registry[(configuration.app, configuration.name)] = existing_config
def unregister(self, configuration):
try:
existing_config = ConfigOption.objects.get(app = configuration.app, name = configuration.name)
existing_config.delete()
del self._registry[(configuration.app, configuration.name)]
except ObjectDoesNotExist:
pass
def clear_registry(self):
self._registry = {}
config_index = ConfigIndex()
class FixtureIndex(object):
def __init__(self):
self._registry = {}
def register(self, fixture):
if not (fixture.app_label, fixture.module_name) in self._registry:
try:
existing_fixture = ConfigFixture.objects.get(app_label = fixture.app_label, module_name = fixture.module_name)
except ObjectDoesNotExist:
fixture.save()
existing_fixture = fixture
self._registry[(fixture.app_label, fixture.module_name)] = fixture
fixtures_index = FixtureIndex() | [
"genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb"
]
| genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb |
81bd2b2a328a6e4dd44b19fdd29ef301958ada2c | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/timeseriesinsights/v20210630preview/reference_data_set.py | 5b350da438874cbf3ea8e7b7b52d7935a1a141be | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,422 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ReferenceDataSetArgs', 'ReferenceDataSet']
@pulumi.input_type
class ReferenceDataSetArgs:
def __init__(__self__, *,
environment_name: pulumi.Input[str],
key_properties: pulumi.Input[Sequence[pulumi.Input['ReferenceDataSetKeyPropertyArgs']]],
resource_group_name: pulumi.Input[str],
data_string_comparison_behavior: Optional[pulumi.Input[Union[str, 'DataStringComparisonBehavior']]] = None,
location: Optional[pulumi.Input[str]] = None,
reference_data_set_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ReferenceDataSet resource.
:param pulumi.Input[str] environment_name: The name of the Time Series Insights environment associated with the specified resource group.
:param pulumi.Input[Sequence[pulumi.Input['ReferenceDataSetKeyPropertyArgs']]] key_properties: The list of key properties for the reference data set.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
:param pulumi.Input[Union[str, 'DataStringComparisonBehavior']] data_string_comparison_behavior: The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] reference_data_set_name: Name of the reference data set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value pairs of additional properties for the resource.
"""
pulumi.set(__self__, "environment_name", environment_name)
pulumi.set(__self__, "key_properties", key_properties)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if data_string_comparison_behavior is not None:
pulumi.set(__self__, "data_string_comparison_behavior", data_string_comparison_behavior)
if location is not None:
pulumi.set(__self__, "location", location)
if reference_data_set_name is not None:
pulumi.set(__self__, "reference_data_set_name", reference_data_set_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="environmentName")
def environment_name(self) -> pulumi.Input[str]:
"""
The name of the Time Series Insights environment associated with the specified resource group.
"""
return pulumi.get(self, "environment_name")
@environment_name.setter
def environment_name(self, value: pulumi.Input[str]):
pulumi.set(self, "environment_name", value)
@property
@pulumi.getter(name="keyProperties")
def key_properties(self) -> pulumi.Input[Sequence[pulumi.Input['ReferenceDataSetKeyPropertyArgs']]]:
"""
The list of key properties for the reference data set.
"""
return pulumi.get(self, "key_properties")
@key_properties.setter
def key_properties(self, value: pulumi.Input[Sequence[pulumi.Input['ReferenceDataSetKeyPropertyArgs']]]):
pulumi.set(self, "key_properties", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of an Azure Resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dataStringComparisonBehavior")
def data_string_comparison_behavior(self) -> Optional[pulumi.Input[Union[str, 'DataStringComparisonBehavior']]]:
"""
The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
"""
return pulumi.get(self, "data_string_comparison_behavior")
@data_string_comparison_behavior.setter
def data_string_comparison_behavior(self, value: Optional[pulumi.Input[Union[str, 'DataStringComparisonBehavior']]]):
pulumi.set(self, "data_string_comparison_behavior", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="referenceDataSetName")
def reference_data_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the reference data set.
"""
return pulumi.get(self, "reference_data_set_name")
@reference_data_set_name.setter
def reference_data_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reference_data_set_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value pairs of additional properties for the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ReferenceDataSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_string_comparison_behavior: Optional[pulumi.Input[Union[str, 'DataStringComparisonBehavior']]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
key_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReferenceDataSetKeyPropertyArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
reference_data_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A reference data set provides metadata about the events in an environment. Metadata in the reference data set will be joined with events as they are read from event sources. The metadata that makes up the reference data set is uploaded or modified through the Time Series Insights data plane APIs.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'DataStringComparisonBehavior']] data_string_comparison_behavior: The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
:param pulumi.Input[str] environment_name: The name of the Time Series Insights environment associated with the specified resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReferenceDataSetKeyPropertyArgs']]]] key_properties: The list of key properties for the reference data set.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] reference_data_set_name: Name of the reference data set.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value pairs of additional properties for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReferenceDataSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A reference data set provides metadata about the events in an environment. Metadata in the reference data set will be joined with events as they are read from event sources. The metadata that makes up the reference data set is uploaded or modified through the Time Series Insights data plane APIs.
:param str resource_name: The name of the resource.
:param ReferenceDataSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReferenceDataSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_string_comparison_behavior: Optional[pulumi.Input[Union[str, 'DataStringComparisonBehavior']]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
key_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ReferenceDataSetKeyPropertyArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
reference_data_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReferenceDataSetArgs.__new__(ReferenceDataSetArgs)
__props__.__dict__["data_string_comparison_behavior"] = data_string_comparison_behavior
if environment_name is None and not opts.urn:
raise TypeError("Missing required property 'environment_name'")
__props__.__dict__["environment_name"] = environment_name
if key_properties is None and not opts.urn:
raise TypeError("Missing required property 'key_properties'")
__props__.__dict__["key_properties"] = key_properties
__props__.__dict__["location"] = location
__props__.__dict__["reference_data_set_name"] = reference_data_set_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["creation_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:timeseriesinsights:ReferenceDataSet"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20170228preview:ReferenceDataSet"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20171115:ReferenceDataSet"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20180815preview:ReferenceDataSet"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20200515:ReferenceDataSet"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20210331preview:ReferenceDataSet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ReferenceDataSet, __self__).__init__(
'azure-native:timeseriesinsights/v20210630preview:ReferenceDataSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReferenceDataSet':
"""
Get an existing ReferenceDataSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ReferenceDataSetArgs.__new__(ReferenceDataSetArgs)
__props__.__dict__["creation_time"] = None
__props__.__dict__["data_string_comparison_behavior"] = None
__props__.__dict__["key_properties"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ReferenceDataSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> pulumi.Output[str]:
"""
The time the resource was created.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="dataStringComparisonBehavior")
def data_string_comparison_behavior(self) -> pulumi.Output[Optional[str]]:
"""
The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
"""
return pulumi.get(self, "data_string_comparison_behavior")
@property
@pulumi.getter(name="keyProperties")
def key_properties(self) -> pulumi.Output[Sequence['outputs.ReferenceDataSetKeyPropertyResponse']]:
"""
The list of key properties for the reference data set.
"""
return pulumi.get(self, "key_properties")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
17c32a613daa0e013bfcaad2caa72d86e7343183 | 53b47cbfea75afd22f37a2a9c8af4573165a0515 | /Week5/Assessment 1/algorithm/algo.py | aa58e2a50fecb31c7a089d08f9c8950556523934 | []
| no_license | bmolina-nyc/ByteAcademyWork | d757ed04033e23a4ec7aa8d09283f65b4cebcb17 | b7a6790c2905afc9532b348149b730b7ea71de44 | refs/heads/master | 2022-12-06T19:17:02.164451 | 2019-03-11T15:31:10 | 2019-03-11T15:31:10 | 169,432,884 | 0 | 1 | null | 2022-11-18T15:08:12 | 2019-02-06T16:00:41 | Python | UTF-8 | Python | false | false | 679 | py | def sorting(MyList):
zero_count = MyList.count(0)
list_check = MyList.count(0)
check = []
while zero_count > 0:
check.append(0)
zero_count -= 1
while True:
for el in MyList:
if el == 0:
idx = MyList.index(el)
pop = MyList.pop(idx)
MyList.append(pop)
elif el != 0:
continue
if MyList[-list_check:] == check:
return MyList
else:
continue
# print(sorting([1, 0, 7, 2, 0, 3, 9, 0, 4]))
# [1, 7, 2, 3, 9, 4, 0, 0, 0]
if __name__ == "__main__":
print(sorting([1, 0, 7, 2, 0, 3, 9, 0, 4])) | [
"[email protected]"
]
| |
01893f13d23f63efc4f427a9eb781cbc09388785 | dee345b10c7dc29dd6b0cac04677beef14f2d64f | /tests/test_manual_quality_merging.py | 35dc41c621fefe22e33d69b77f397f562e697051 | [
"MIT"
]
| permissive | richard-shepherd/calculation_graph | fcd0df6b0d4fc598586ee67c129ccc90b9cac383 | 647b1f13544e3525068c8b3b83a7eed3f7e473bd | refs/heads/master | 2016-09-05T19:46:14.567122 | 2015-05-21T10:58:14 | 2015-05-21T10:58:14 | 31,436,445 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | from graph import *
class SourceNode(GraphNode):
"""
A data source. Just a value with data-quality.
"""
def __init__(self, source_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.source_name = source_name
# The value provided by this source...
self.value = 0.0
# True if the source is working correctly...
self.source_is_good = False
def set_value(self, value, source_is_good):
"""
Sets the value of this source and whether the source is good.
This causes the node to need recalculation.
"""
self.value = value
self.source_is_good = source_is_good
self.needs_calculation()
def calculate(self):
"""
We set the data-quality from the source_is_good information.
"""
self.quality.clear_to_good()
if self.source_is_good is False:
self.quality.merge(Quality.BAD, "Source " + self.source_name + " is bad")
return GraphNode.CalculateChildrenType.CALCULATE_CHILDREN
class SourceChooserNode(GraphNode):
"""
Chooses between two of the SourceNodes above, depending on
their data-quality.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Parent nodes...
self.source_A_node = None
self.source_B_node = None
# The value for this node will be chosen from one of the
# parent sources...
self.value = 0.0
def set_dependencies(self):
"""
We hook up to two sources.
"""
self.source_A_node = self.add_parent_node(SourceNode, "A")
self.source_B_node = self.add_parent_node(SourceNode, "B")
def calculate_quality(self):
"""
We override automatic quality merging. In this case, we will
set this node's data-quality in the calculate() function.
"""
pass
def calculate(self):
"""
We choose the value from whichever parent node has Good
data-quality.
"""
if self.source_A_node.quality.is_good():
# Source A has good data...
self.value = self.source_A_node.value
self.quality.set_from(self.source_A_node.quality)
elif self.source_B_node.quality.is_good():
# Source B has good data...
self.value = self.source_B_node.value
self.quality.set_from(self.source_B_node.quality)
else:
# Neither source has good data...
self.value = 0.0
self.quality.set_to_bad("No source has Good data")
return GraphNode.CalculateChildrenType.CALCULATE_CHILDREN
def test_manual_quality_merging():
"""
Tests manual merging of quality from parent nodes.
The graph for this test has a "redundant" data source. The test node
has two parents A and B. It chooses which ever one of them has good
quality.
So in this case, we do not want to automatically merge quality, as
otherwise if one of the parents goes Bad, the "choosing" node would
go bad as well. In this case, as long as one of the parents is Good,
then the choosing node will be Good as well.
"""
graph_manager = GraphManager()
# We create the sources before the chooser, so we can set their values...
source_A_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceNode, "A")
source_B_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceNode, "B")
# We create a node to choose between the two sources above...
chooser_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceChooserNode)
# We set both sources to have Good data-quality. The value from source A
# is chosen when both are good...
source_A_node.set_value(123.0, source_is_good=True)
source_B_node.set_value(456.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 123.0
assert chooser_node.quality.is_good()
assert chooser_node.quality.get_description() == ""
# We set source B bad. The value from A should still be used...
source_B_node.set_value(457.0, source_is_good=False)
graph_manager.calculate()
assert chooser_node.value == 123.0
assert chooser_node.quality.is_good()
assert chooser_node.quality.get_description() == ""
# We set source A bad as well...
source_A_node.set_value(124.0, source_is_good=False)
graph_manager.calculate()
assert chooser_node.value == 0.0
assert chooser_node.quality.is_good() is False
assert "No source has Good data" in chooser_node.quality.get_description()
# We set source B Good...
source_B_node.set_value(567.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 567.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
# We set source A Good...
source_A_node.set_value(321.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 321.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
# We update A...
source_A_node.set_value(432.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 432.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
| [
"[email protected]"
]
| |
2e2aa42191565859e305c671773a20d93e459e04 | 615ce4f7790057c92d93f43a5f0cab9ba018fbd6 | /pathological/atomics/strings/evaluatable.py | 29efa6a5e4aceb5c95b7ef7a0bd4d68ee5b533fd | [
"MIT"
]
| permissive | OaklandPeters/pathological | 20e88fac7216eebfe735a018449813f956c69484 | c561eb30df8cdcc0f277a17cd08a03cf173e312f | refs/heads/master | 2021-01-10T16:00:54.798667 | 2016-03-07T14:47:54 | 2016-03-07T14:55:20 | 53,332,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | """
SQL
Python code
Python eval
Javscript
Javascript eval
C
"""
| [
"[email protected]"
]
| |
b8124490b623a6c5b281a10cce0cc972f2334d95 | 9017f217abe077aff77f64938a988fcc4a292e40 | /plate/common/syntax_highlighting.py | aa0484c12f2fad1de5134b9f34f2332e331f0d6d | [
"Apache-2.0"
]
| permissive | gogit/plate | c8c47d47de2b11d5c7b4840106181bb177b50c88 | 2e5fdb1ddfad560986b429cf2ff92aed4d35e56c | refs/heads/master | 2021-01-18T07:18:52.770779 | 2016-04-08T11:45:00 | 2016-04-08T11:45:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # -*- coding:utf-8 -*-
def syntax_highlight(lang, code):
"""
code highlighting HTML Format
:param lang: programming language
:param code: code
:return: highlighted code
"""
from pygments import lexers
from pygments import highlight
from pygments.formatters import HtmlFormatter
try:
lexer = lexers.get_lexer_by_name(lang.lower())
highlighted = highlight(code, lexer, HtmlFormatter())
splitted = highlighted.split('"highlight')
highlighted = splitted[0] + '"highlight '+lang + splitted[1]
highlighted = highlighted.replace("<pre>", "")
highlighted = highlighted.replace("</pre>", "")
highlighted = highlighted.replace("div", "pre")
return highlighted
except Exception as e:
raise e
| [
"[email protected]"
]
| |
cf778af0af1dddef7a128d1e74c241f6d5102ed0 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/e5f4987a8fe1b2a1907436c11e0c5ae9ae6b12b3-<mandatory>-fix.py | a5a74f66992fb6dfef2b09ed691874a499fea364 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | def mandatory(a):
from jinja2.runtime import Undefined
' Make a variable mandatory '
if isinstance(a, Undefined):
if (a._undefined_name is not None):
name = ("'%s' " % to_text(a._undefined_name))
else:
name = ''
raise AnsibleFilterError(('Mandatory variable %snot defined.' % name))
return a | [
"[email protected]"
]
| |
b32201fe4bcba5b5044dd43bd63144b156758276 | 8f5c7f28703b274163c2832f6511025e37b4295f | /helloworld.com/reviews/migrations/0001_initial.py | 98cc1576bfc65b810dcb3c4f295628f97c64f0c6 | []
| no_license | reinaaa05/portfolio | 159dc4d48b3e215bfb6c7115cd39b7f63ee2418a | e93189e3aa027e57bac490d8874519dd7f717620 | refs/heads/main | 2023-05-15T09:56:30.741402 | 2021-06-12T13:46:44 | 2021-06-12T13:46:44 | 307,375,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | # Generated by Django 3.1.7 on 2021-03-19 05:50
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ReviewsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_name', models.CharField(max_length=255, verbose_name='店名')),
('title', models.CharField(max_length=255, verbose_name='タイトル')),
('text', models.TextField(blank=True, verbose_name='口コミテキスト')),
('stars', models.IntegerField(choices=[(1, '☆'), (2, '☆☆'), (3, '☆☆☆'), (4, '☆☆☆☆'), (5, '☆☆☆☆☆')], verbose_name='星の数')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
],
),
]
| [
"[email protected]"
]
| |
aec91a9ce7f35dfa47aaf32a2ca1c5960d6a0f98 | fbf82e9a3d6e7b4dbaa2771eed0d96efabc87b3b | /platform/storage/storage/ceph/ceph/manager/cephmon_manager.py | 1a1d68c8581d61a22a5d18d91845d4a99f3ac9e9 | []
| no_license | woshidashayuchi/boxlinker-all | 71603066fee41988108d8e6c803264bd5f1552bc | 318f85e6ff5542cd70b7a127c0b1d77a01fdf5e3 | refs/heads/master | 2021-05-09T03:29:30.652065 | 2018-01-28T09:17:18 | 2018-01-28T09:17:18 | 119,243,814 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,801 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: YanHua <[email protected]>
import uuid
from common.logs import logging as log
from common.code import request_result
from ceph.driver import ceph_driver
class CephMonManager(object):
def __init__(self):
self.ceph_driver = ceph_driver.CephDriver()
def cephmon_init(self, cluster_info,
mon01_hostip, mon01_rootpwd, mon01_snic,
mon02_hostip, mon02_rootpwd, mon02_snic):
host_ssh_conf = self.ceph_driver.host_ssh_conf(
mon01_hostip, mon01_rootpwd)
if host_ssh_conf != 0:
if host_ssh_conf == 1:
log.warning('mon节点(%s)密码错误' % (mon01_hostip))
return request_result(523)
elif host_ssh_conf == 2:
log.warning('mon节点(%s)连接超时' % (mon01_hostip))
return request_result(522)
else:
log.warning('mon节点(%s)连接错误' % (mon01_hostip))
return request_result(601)
host_ssh_conf = self.ceph_driver.host_ssh_conf(
mon02_hostip, mon02_rootpwd)
if host_ssh_conf != 0:
if host_ssh_conf == 1:
log.warning('mon节点(%s)密码错误' % (mon01_hostip))
return request_result(523)
elif host_ssh_conf == 2:
log.warning('mon节点(%s)连接超时' % (mon01_hostip))
return request_result(522)
else:
log.warning('mon节点(%s)连接错误' % (mon01_hostip))
return request_result(601)
mon01_hostname = self.ceph_driver.remote_host_name(mon01_hostip)
if not mon01_hostname:
log.error('无法获取节点(%s)主机名' % (mon01_hostip))
return request_result(524)
mon02_hostname = self.ceph_driver.remote_host_name(mon02_hostip)
if not mon02_hostname:
log.error('无法获取节点(%s)主机名' % (mon02_hostip))
return request_result(524)
mon01_storage_ip = self.ceph_driver.storage_ip(
mon01_hostip, mon01_snic)
if not mon01_storage_ip:
log.error('无法获取节点(%s)存储IP' % (mon01_hostip))
return request_result(524)
mon02_storage_ip = self.ceph_driver.storage_ip(
mon02_hostip, mon02_snic)
if not mon02_storage_ip:
log.error('无法获取节点(%s)存储IP' % (mon02_hostip))
return request_result(524)
if cluster_info:
cluster_uuid = cluster_info.get('cluster_uuid')
cluster_name = cluster_info.get('cluster_name')
cluster_auth = cluster_info.get('cluster_auth')
service_auth = cluster_info.get('service_auth')
client_auth = cluster_info.get('client_auth')
ceph_pgnum = cluster_info.get('ceph_pgnum')
ceph_pgpnum = cluster_info.get('ceph_pgpnum')
public_network = cluster_info.get('public_network')
cluster_network = cluster_info.get('cluster_network')
osd_full_ratio = cluster_info.get('osd_full_ratio')
osd_nearfull_ratio = cluster_info.get('osd_nearfull_ratio')
journal_size = cluster_info.get('journal_size')
ntp_server = cluster_info.get('ntp_server')
else:
cluster_uuid = str(uuid.uuid4())
cluster_auth = 'none'
service_auth = 'none'
client_auth = 'none'
ceph_pgnum = 300
ceph_pgpnum = 300
public_network = '192.168.1.0/24'
cluster_network = '10.10.1.0/24'
osd_full_ratio = '.85'
osd_nearfull_ratio = '.70'
journal_size = 5000
ntp_server = 'time.nist.gov'
self.ceph_driver.ceph_conf_init(
cluster_uuid, cluster_auth, service_auth, client_auth,
ceph_pgnum, ceph_pgpnum, public_network, cluster_network,
osd_full_ratio, osd_nearfull_ratio, journal_size)
self.ceph_driver.mon_conf_update(
cluster_uuid, '1', mon01_hostname, mon01_storage_ip)
self.ceph_driver.mon_conf_update(
cluster_uuid, '2', mon02_hostname, mon02_storage_ip)
mon01_conf_dist = self.ceph_driver.conf_dist(
cluster_uuid, mon01_hostip)
if int(mon01_conf_dist) != 0:
log.error('mon节点(主机名:%s, IP:%s)ceph配置文件分发失败'
% (mon01_hostname, mon01_hostip))
return request_result(525)
mon02_conf_dist = self.ceph_driver.conf_dist(
cluster_uuid, mon02_hostip)
if int(mon02_conf_dist) != 0:
log.error('mon节点(主机名:%s, IP:%s)ceph配置文件分发失败'
% (mon02_hostname, mon02_hostip))
return request_result(525)
ntp01_conf = self.ceph_driver.host_ntp_conf(
mon01_hostip, ntp_server)
if int(ntp01_conf) != 0:
log.error('Host ntp server conf failure, '
'host_ip=%s, ntp_server=%s'
% (mon01_hostip, ntp_server))
return request_result(526)
ntp02_conf = self.ceph_driver.host_ntp_conf(
mon02_hostip, ntp_server)
if int(ntp02_conf) != 0:
log.error('Host ntp server conf failure, '
'host_ip=%s, ntp_server=%s'
% (mon02_hostip, ntp_server))
return request_result(526)
mon01_ceph_install = self.ceph_driver.ceph_service_install(
mon01_hostip, cluster_uuid)
if int(mon01_ceph_install) != 0:
log.error('Ceph service install failure, mon_host_ip=%s'
% (mon01_hostip))
return request_result(526)
mon02_ceph_install = self.ceph_driver.ceph_service_install(
mon02_hostip, cluster_uuid)
if int(mon02_ceph_install) != 0:
log.error('Ceph service install failure, mon_host_ip=%s'
% (mon02_hostip))
return request_result(526)
monmap_init = self.ceph_driver.monmap_init(
mon01_hostname, mon01_storage_ip,
mon02_hostname, mon02_storage_ip,
mon01_hostip, cluster_uuid)
if int(monmap_init) != 0:
log.error('Ceph monmap init failure')
return request_result(526)
monmap_sync = self.ceph_driver.monmap_sync(
mon01_hostip, mon02_hostip)
if int(monmap_sync) != 0:
log.error('Ceph monmap sync failure')
return request_result(526)
mon01_init = self.ceph_driver.mon_host_init(
mon01_hostname, mon01_hostip)
if int(mon01_init) != 0:
log.error('mon节点(主机名:%s,IP:%s)初始化失败'
% (mon01_hostname, mon01_hostip))
return request_result(526)
mon02_init = self.ceph_driver.mon_host_init(
mon02_hostname, mon02_hostip)
if int(mon02_init) != 0:
log.error('mon节点(主机名:%s,IP:%s)初始化失败'
% (mon02_hostname, mon02_hostip))
return request_result(526)
crush_ssd_add = self.ceph_driver.crush_ssd_add(mon01_hostip)
if int(crush_ssd_add) != 0:
log.error('创建ssd bucket失败')
return request_result(526)
control_host_name = self.ceph_driver.local_host_name()
self.ceph_driver.host_ssh_del(mon01_hostip, control_host_name)
self.ceph_driver.host_ssh_del(mon02_hostip, control_host_name)
result = {
"mon01_hostip": mon01_hostip,
"mon02_hostip": mon02_hostip,
"mon01_hostname": mon01_hostname,
"mon02_hostname": mon02_hostname,
"cluster_uuid": cluster_uuid,
"mon01_storage_ip": mon01_storage_ip,
"mon02_storage_ip": mon02_storage_ip
}
return request_result(0, result)
def cephmon_add(self, cluster_info, mon_id,
host_ip, rootpwd, storage_nic,
mon_list):
host_ssh_conf = self.ceph_driver.host_ssh_conf(
host_ip, rootpwd)
if host_ssh_conf != 0:
if host_ssh_conf == 1:
log.warning('mon节点(%s)密码错误' % (mon01_hostip))
return request_result(523)
elif host_ssh_conf == 2:
log.warning('mon节点(%s)连接超时' % (mon01_hostip))
return request_result(522)
else:
log.warning('mon节点(%s)连接错误' % (mon01_hostip))
return request_result(601)
host_name = self.ceph_driver.remote_host_name(host_ip)
if not host_name:
log.error('无法获取节点(%s)主机名' % (host_ip))
return request_result(524)
storage_ip = self.ceph_driver.storage_ip(
host_ip, storage_nic)
if not storage_ip:
log.error('无法获取节点(%s)存储IP' % (host_ip))
return request_result(524)
if cluster_info:
cluster_uuid = cluster_info.get('cluster_uuid')
cluster_name = cluster_info.get('cluster_name')
cluster_auth = cluster_info.get('cluster_auth')
service_auth = cluster_info.get('service_auth')
client_auth = cluster_info.get('client_auth')
ceph_pgnum = cluster_info.get('ceph_pgnum')
ceph_pgpnum = cluster_info.get('ceph_pgpnum')
public_network = cluster_info.get('public_network')
cluster_network = cluster_info.get('cluster_network')
osd_full_ratio = cluster_info.get('osd_full_ratio')
osd_nearfull_ratio = cluster_info.get('osd_nearfull_ratio')
journal_size = cluster_info.get('journal_size')
ntp_server = cluster_info.get('ntp_server')
else:
log.warning('Ceph cluster info not exists')
return request_result(528)
if not self.ceph_driver.ceph_conf_check(cluster_uuid):
self.ceph_driver.ceph_conf_init(
cluster_uuid, cluster_auth, service_auth, client_auth,
ceph_pgnum, ceph_pgpnum, public_network, cluster_network,
osd_full_ratio, osd_nearfull_ratio, journal_size)
for mon_info in mon_list:
ceph_mon_id = mon_info[0]
mon_host_name = mon_info[1]
mon_storage_ip = mon_info[2]
self.ceph_driver.mon_conf_update(
cluster_uuid, ceph_mon_id[-1],
mon_host_name, mon_storage_ip)
self.ceph_driver.mon_conf_update(
cluster_uuid, mon_id[-1], host_name, storage_ip)
mon_conf_dist = self.ceph_driver.conf_dist(cluster_uuid, host_ip)
if int(mon_conf_dist) != 0:
log.error('mon节点(主机名:%s, IP:%s)ceph配置文件分发失败'
% (host_name, host_ip))
return request_result(525)
ntp_conf = self.ceph_driver.host_ntp_conf(
host_ip, ntp_server)
if int(ntp_conf) != 0:
log.error('Host ntp server conf failure, '
'host_ip=%s, ntp_server=%s'
% (host_ip, ntp_server))
return request_result(526)
mon_ceph_install = self.ceph_driver.ceph_service_install(
host_ip, cluster_uuid)
if int(mon_ceph_install) != 0:
log.error('Ceph service install failure, mon_host_ip=%s'
% (host_ip))
return request_result(526)
mon_add = self.ceph_driver.mon_host_add(
host_name, host_ip, storage_ip)
if int(mon_add) != 0:
log.error('mon节点(主机名:%s,IP:%s)添加失败'
% (host_name, host_ip))
return request_result(526)
control_host_name = self.ceph_driver.local_host_name()
self.ceph_driver.host_ssh_del(host_ip, control_host_name)
result = {
"host_ip": host_ip,
"host_name": host_name,
"cluster_uuid": cluster_uuid,
"storage_ip": storage_ip
}
return request_result(0, result)
| [
"[email protected]"
]
| |
4ba863c2665be91eec065ed4c30766c4093d1eee | ce61d882ab402283c60580cd6b99ac74eb6e3644 | /sixdegrees/random_geometric_api.py | 21de8f64714697adfd352a786159f4126ce9aec3 | [
"MIT"
]
| permissive | benmaier/sixdegrees | f4a6c54cce8021cc4f55a832fbe629e8e22b0cb2 | 6fee5a4f81d91ec2ce92c8767add67b701651a85 | refs/heads/master | 2022-12-11T00:10:42.210563 | 2020-09-15T21:58:18 | 2020-09-15T21:58:18 | 216,598,531 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,459 | py | # -*- coding: utf-8 -*-
"""
This module provides convenient wrapper functions for
the C++-functions sampling from all random geometric
small-world network models.
"""
import numpy as np
from _sixdegrees import (
_random_geometric_small_world_network_coord_lists,
_random_geometric_small_world_network,
_random_geometric_kleinberg_network_coord_lists,
_random_geometric_kleinberg_network,
_twoD_random_geometric_kleinberg_network_coord_lists,
_twoD_random_geometric_kleinberg_network,
)
from sixdegrees.kleinberg_helper_functions import (
get_distance_connection_probability_parameters,
get_continuous_distance_connection_probability_parameters,
)
def random_geometric_kleinberg_network(N,
k,
mu,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
return_positions=False,
):
r"""
Generate a Kleinberg small-world network from
nodes that are uniformly i.i.d. on a ring.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
edges : list of tuple of int
A list of node index pairs that constitute the edges of the
generated network.
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
Example
=======
>>> N, edges = random_geometric_kleinberg_network(N=512,k=7,mu=-0.5)
>>> print(N)
512
"""
X = np.random.rand(N) * N
# positions do not need to be sorted necessarily
# but it helps knowing that 0, 1, 2,... are
# the first, second, third ... nodes in
# the interval [0,N]
X.sort()
_N, _edges = _random_geometric_kleinberg_network(
N,
k,
mu,
X.tolist(),
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, _edges, X
else:
return _N, _edges
def twoD_random_geometric_kleinberg_network(
N,
k,
mu,
periodic_boundary_conditions=True,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
epsilon=1e-9,
return_positions=False,
use_continuous_connection_probability=False,
X=None,
Y=None,
):
r"""
Generate a Kleinberg small-world network from
nodes that are uniformly i.i.d. on a square.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
periodic_boundary_conditions : bool, default = True
Whether or not to apply periodic boundary conditions (2-torus).
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
epsilon : float, default = 1e-9
Minimal distance below which node pairs are always connected.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
use_continuous_connection_probability : bool, default = False
Excess connection probability can be redistributed to the whole
ensemble or to nearest neighbors. If this flag is true, the
excess probability will be redistributed to the whole ensemble.
X : numpy.ndarray, default = None
Node positions in x-direction (:math:`x \in [0,1)`).
Y : numpy.ndarray, default = None
Node positions in y-direction (:math:`x \in [0,1)`).
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
edges : list of tuple of int
A list of node index pairs that constitute the edges of the
generated network.
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
Y : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
"""
if X is None:
X = np.random.rand(N)
if Y is None:
Y = np.random.rand(N)
kappa = mu-2
if not use_continuous_connection_probability:
C, rmin = get_distance_connection_probability_parameters(
N,
k,
kappa,
epsilon=epsilon,
use_periodic_boundary_conditions=periodic_boundary_conditions,
)
else:
C, rmin = get_continuous_distance_connection_probability_parameters(
N,
k,
kappa,
epsilon=epsilon,
use_periodic_boundary_conditions=periodic_boundary_conditions,
)
_N, _edges = _twoD_random_geometric_kleinberg_network(
N,
C,
rmin,
kappa,
X.tolist(),
Y.tolist(),
periodic_boundary_conditions=periodic_boundary_conditions,
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, _edges, X, Y
else:
return _N, _edges
def random_geometric_small_world_network(N,
k,
mu,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
return_positions=False,
):
r"""
Generate a small-world network from
nodes that are uniformly i.i.d. on a ring.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
edges : list of tuple of int
A list of node index pairs that constitute the edges of the
generated network.
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
"""
beta = k /(N-1-k) * ((N-1-k)/k)**mu
beta = k /(N-1-k) * ((N-1-k)/k)**mu
X = np.random.rand(N) * N
# positions do not need to be sorted necessarily
# but it helps knowing that 0, 1, 2,... are
# the first, second, third ... nodes in
# the interval [0,N]
X.sort()
_N, _edges = _random_geometric_small_world_network(
N,
k,
beta,
X.tolist(),
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, _edges, X
else:
return _N, _edges
def random_geometric_kleinberg_network_coord_lists(
N,
k,
mu,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
return_positions=False,
):
r"""
Generate a Kleinberg small-world network from
nodes that are uniformly i.i.d. on a ring.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
row : list of int
Row coordinates of non-zero entries in the generated adjacency matrix
col : list of int
Column coordinates of non-zero entries in the generated adjacency matrix
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
"""
X = np.random.rand(N) * N
# positions do not need to be sorted necessarily
# but it helps knowing that 0, 1, 2,... are
# the first, second, third ... nodes in
# the interval [0,N]
X.sort()
_N, row, col = _random_geometric_kleinberg_network_coord_lists(
N,
k,
mu,
X.tolist(),
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, row, col, X
else:
return _N, row, col
def random_geometric_small_world_network_coord_lists(
N,
k,
mu,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
return_positions=False,
):
r"""
Generate a small-world network from
nodes that are uniformly i.i.d. on a ring.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
row : list of int
Row coordinates of non-zero entries in the generated adjacency matrix
col : list of int
Column coordinates of non-zero entries in the generated adjacency matrix
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
"""
beta = k /(N-1-k) * ((N-1-k)/k)**mu
X = np.random.rand(N) * N
# positions do not need to be sorted necessarily
# but it helps knowing that 0, 1, 2,... are
# the first, second, third ... nodes in
# the interval [0,N]
X.sort()
_N, row, col = _random_geometric_small_world_network_coord_lists(
N,
k,
beta,
X.tolist(),
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, row, col, X
else:
return _N, row, col
def twoD_random_geometric_kleinberg_network_coord_lists(
N,
k,
mu,
periodic_boundary_conditions=True,
use_largest_component=False,
delete_non_largest_component_nodes=True,
seed=0,
epsilon=1e-9,
return_positions=False,
use_continuous_connection_probability=False,
X = None,
Y = None,
):
r"""
Generate a Kleinberg (power-law) small-world network from
nodes that are uniformly i.i.d. on a square.
Parameters
==========
N : int
Number of nodes.
k : float
Mean degree.
mu : float
Structural control parameter.
periodic_boundary_conditions : bool, default = True
Whether or not to apply periodic boundary conditions (2-torus).
use_largest_component : bool, default = False
Whether or not to only return the largest connected component
of size :math:`N_g\leq N`.
delete_non_largest_component_nodes : bool, default = True
If only the largest connected component is returned, relabel
the nodes to be indexed from zero to :math:`N_g`.
seed : int, default = 0
The seed with which to generate this network. Choose ``0``
for random initialization.
epsilon : float, default = 1e-9
Minimal distance below which node pairs are always connected.
return_positions : bool, default = False
Whether or not to return the node positions on the ring.
use_continuous_connection_probability : bool, default = False
Excess connection probability can be redistributed to the whole
ensemble or to nearest neighbors. If this flag is true, the
excess probability will be redistributed to the whole ensemble.
X : numpy.ndarray, default = None
Node positions in x-direction (:math:`x \in [0,1)`).
Y : numpy.ndarray, default = None
Node positions in y-direction (:math:`x \in [0,1)`).
Returns
=======
N : int
Number of nodes in the generated network (can be smaller than
the demanded number of nodes if only the largest connected
component is returned)
row : list of int
Row coordinates of non-zero entries in the generated adjacency matrix
col : list of int
Column coordinates of non-zero entries in the generated adjacency matrix
X : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
Y : numpy.ndarray
Node positions on the ring. Is only returned if
``return_positions = True`` was set.
"""
if X is None:
X = np.random.rand(N)
if Y is None:
Y = np.random.rand(N)
kappa = mu-2
if not use_continuous_connection_probability:
C, rmin = get_distance_connection_probability_parameters(
N,
k,
kappa,
epsilon=epsilon,
use_periodic_boundary_conditions=periodic_boundary_conditions,
)
else:
C, rmin = get_continuous_distance_connection_probability_parameters(
N,
k,
kappa,
epsilon=epsilon,
use_periodic_boundary_conditions=periodic_boundary_conditions,
)
_N, row, col = _twoD_random_geometric_kleinberg_network_coord_lists(
N,
C,
rmin,
kappa,
X.tolist(),
Y.tolist(),
periodic_boundary_conditions=periodic_boundary_conditions,
use_largest_component=use_largest_component,
delete_non_largest_component_nodes=delete_non_largest_component_nodes,
seed=seed
)
if return_positions:
return _N, row, col, X, Y
else:
return _N, row, col
if __name__=="__main__":
import matplotlib.pyplot as pl
N = 401
k = 30
mu = -1
kappa = 2*(mu-1)
_, edges = twoD_random_geometric_kleinberg_network(N,k,mu,periodic_boundary_conditions=False,use_continuous_connection_probability=True)
print(2*len(edges)/N, k)
C, rmin = get_continuous_distance_connection_probability_parameters(
N,
k,
2*(mu-1),
epsilon=1e-9,
use_periodic_boundary_conditions=False
)
def P(r, C, rmin, kappa):
if r < rmin:
return 1
else:
return C * r**kappa
r = np.linspace(0,1,10000)
Ps = [P(_r,C,rmin, kappa) for _r in r]
pl.plot(r, Ps)
#pl.xscale('log')
#pl.yscale('log')
pl.show()
#print(edges)
| [
"[email protected]"
]
| |
a255456d0a019eb574c9d471d675d371ed409336 | c971cefa15391eb5bfc661b61791aa641089ff71 | /Term 2/9/problem.py | a23dfbc6292d27e54b1f0f46a5e555232a0c0e39 | []
| no_license | theseana/ajil | d8816be35b7ee56d9bc6899587b71c664a455f5f | 3fcda909dae50394120c7c05d530ad905b05da0c | refs/heads/master | 2023-02-23T23:28:23.529531 | 2021-01-26T10:07:11 | 2021-01-26T10:07:11 | 305,970,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | # ssssssss
# iiiiiiii
# nnnnnnnn
# aaaaaaaa | [
"[email protected]"
]
| |
314be4571818499b9133b733908fa91faf720aad | 896b79a235796da67cdabd52e88d8b218882c3fe | /src/aioquic/quic/recovery.py | dae3712bc63d32f253ebceca8c49e3dc11109ba0 | [
"BSD-3-Clause"
]
| permissive | LautaroJayat/aioquic | b9b84b59c849a3dc4cfc3a986b679dd0723ddbf7 | b1ecd8a1dd9089fa536b7d62ab06d7127d768e8d | refs/heads/main | 2022-10-27T01:30:04.846852 | 2020-06-16T01:04:25 | 2020-06-16T01:04:25 | 272,577,005 | 0 | 0 | BSD-3-Clause | 2020-06-16T01:04:27 | 2020-06-16T01:01:58 | null | UTF-8 | Python | false | false | 17,096 | py | import math
from typing import Callable, Dict, Iterable, List, Optional
from .logger import QuicLoggerTrace
from .packet_builder import QuicDeliveryState, QuicSentPacket
from .rangeset import RangeSet
# loss detection
K_PACKET_THRESHOLD = 3
K_GRANULARITY = 0.001 # seconds
K_TIME_THRESHOLD = 9 / 8
K_MICRO_SECOND = 0.000001
K_SECOND = 1.0
# congestion control
K_MAX_DATAGRAM_SIZE = 1280
K_INITIAL_WINDOW = 10 * K_MAX_DATAGRAM_SIZE
K_MINIMUM_WINDOW = 2 * K_MAX_DATAGRAM_SIZE
K_LOSS_REDUCTION_FACTOR = 0.5
class QuicPacketSpace:
def __init__(self) -> None:
self.ack_at: Optional[float] = None
self.ack_queue = RangeSet()
self.discarded = False
self.expected_packet_number = 0
self.largest_received_packet = -1
self.largest_received_time: Optional[float] = None
# sent packets and loss
self.ack_eliciting_in_flight = 0
self.largest_acked_packet = 0
self.loss_time: Optional[float] = None
self.sent_packets: Dict[int, QuicSentPacket] = {}
class QuicPacketPacer:
def __init__(self) -> None:
self.bucket_max: float = 0.0
self.bucket_time: float = 0.0
self.evaluation_time: float = 0.0
self.packet_time: Optional[float] = None
def next_send_time(self, now: float) -> float:
if self.packet_time is not None:
self.update_bucket(now=now)
if self.bucket_time <= 0:
return now + self.packet_time
return None
def update_after_send(self, now: float) -> None:
if self.packet_time is not None:
self.update_bucket(now=now)
if self.bucket_time < self.packet_time:
self.bucket_time = 0.0
else:
self.bucket_time -= self.packet_time
def update_bucket(self, now: float) -> None:
if now > self.evaluation_time:
self.bucket_time = min(
self.bucket_time + (now - self.evaluation_time), self.bucket_max
)
self.evaluation_time = now
def update_rate(self, congestion_window: int, smoothed_rtt: float) -> None:
pacing_rate = congestion_window / max(smoothed_rtt, K_MICRO_SECOND)
self.packet_time = max(
K_MICRO_SECOND, min(K_MAX_DATAGRAM_SIZE / pacing_rate, K_SECOND)
)
self.bucket_max = (
max(
2 * K_MAX_DATAGRAM_SIZE,
min(congestion_window // 4, 16 * K_MAX_DATAGRAM_SIZE),
)
/ pacing_rate
)
if self.bucket_time > self.bucket_max:
self.bucket_time = self.bucket_max
class QuicCongestionControl:
"""
New Reno congestion control.
"""
def __init__(self) -> None:
self.bytes_in_flight = 0
self.congestion_window = K_INITIAL_WINDOW
self._congestion_recovery_start_time = 0.0
self._congestion_stash = 0
self._rtt_monitor = QuicRttMonitor()
self.ssthresh: Optional[int] = None
def on_packet_acked(self, packet: QuicSentPacket) -> None:
self.bytes_in_flight -= packet.sent_bytes
# don't increase window in congestion recovery
if packet.sent_time <= self._congestion_recovery_start_time:
return
if self.ssthresh is None or self.congestion_window < self.ssthresh:
# slow start
self.congestion_window += packet.sent_bytes
else:
# congestion avoidance
self._congestion_stash += packet.sent_bytes
count = self._congestion_stash // self.congestion_window
if count:
self._congestion_stash -= count * self.congestion_window
self.congestion_window += count * K_MAX_DATAGRAM_SIZE
def on_packet_sent(self, packet: QuicSentPacket) -> None:
self.bytes_in_flight += packet.sent_bytes
def on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None:
for packet in packets:
self.bytes_in_flight -= packet.sent_bytes
def on_packets_lost(self, packets: Iterable[QuicSentPacket], now: float) -> None:
lost_largest_time = 0.0
for packet in packets:
self.bytes_in_flight -= packet.sent_bytes
lost_largest_time = packet.sent_time
# start a new congestion event if packet was sent after the
# start of the previous congestion recovery period.
if lost_largest_time > self._congestion_recovery_start_time:
self._congestion_recovery_start_time = now
self.congestion_window = max(
int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW
)
self.ssthresh = self.congestion_window
# TODO : collapse congestion window if persistent congestion
def on_rtt_measurement(self, latest_rtt: float, now: float) -> None:
# check whether we should exit slow start
if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing(
latest_rtt, now
):
self.ssthresh = self.congestion_window
class QuicPacketRecovery:
"""
Packet loss and congestion controller.
"""
def __init__(
self,
initial_rtt: float,
peer_completed_address_validation: bool,
send_probe: Callable[[], None],
quic_logger: Optional[QuicLoggerTrace] = None,
) -> None:
self.max_ack_delay = 0.025
self.peer_completed_address_validation = peer_completed_address_validation
self.spaces: List[QuicPacketSpace] = []
# callbacks
self._quic_logger = quic_logger
self._send_probe = send_probe
# loss detection
self._pto_count = 0
self._rtt_initial = initial_rtt
self._rtt_initialized = False
self._rtt_latest = 0.0
self._rtt_min = math.inf
self._rtt_smoothed = 0.0
self._rtt_variance = 0.0
self._time_of_last_sent_ack_eliciting_packet = 0.0
# congestion control
self._cc = QuicCongestionControl()
self._pacer = QuicPacketPacer()
@property
def bytes_in_flight(self) -> int:
return self._cc.bytes_in_flight
@property
def congestion_window(self) -> int:
return self._cc.congestion_window
def discard_space(self, space: QuicPacketSpace) -> None:
assert space in self.spaces
self._cc.on_packets_expired(
filter(lambda x: x.in_flight, space.sent_packets.values())
)
space.sent_packets.clear()
space.ack_at = None
space.ack_eliciting_in_flight = 0
space.loss_time = None
if self._quic_logger is not None:
self._log_metrics_updated()
def get_loss_detection_time(self) -> float:
# loss timer
loss_space = self._get_loss_space()
if loss_space is not None:
return loss_space.loss_time
# packet timer
if (
not self.peer_completed_address_validation
or sum(space.ack_eliciting_in_flight for space in self.spaces) > 0
):
timeout = self.get_probe_timeout() * (2 ** self._pto_count)
return self._time_of_last_sent_ack_eliciting_packet + timeout
return None
def get_probe_timeout(self) -> float:
if not self._rtt_initialized:
return 2 * self._rtt_initial
return (
self._rtt_smoothed
+ max(4 * self._rtt_variance, K_GRANULARITY)
+ self.max_ack_delay
)
def on_ack_received(
self,
space: QuicPacketSpace,
ack_rangeset: RangeSet,
ack_delay: float,
now: float,
) -> None:
"""
Update metrics as the result of an ACK being received.
"""
is_ack_eliciting = False
largest_acked = ack_rangeset.bounds().stop - 1
largest_newly_acked = None
largest_sent_time = None
if largest_acked > space.largest_acked_packet:
space.largest_acked_packet = largest_acked
for packet_number in sorted(space.sent_packets.keys()):
if packet_number > largest_acked:
break
if packet_number in ack_rangeset:
# remove packet and update counters
packet = space.sent_packets.pop(packet_number)
if packet.is_ack_eliciting:
is_ack_eliciting = True
space.ack_eliciting_in_flight -= 1
if packet.in_flight:
self._cc.on_packet_acked(packet)
largest_newly_acked = packet_number
largest_sent_time = packet.sent_time
# trigger callbacks
for handler, args in packet.delivery_handlers:
handler(QuicDeliveryState.ACKED, *args)
# nothing to do if there are no newly acked packets
if largest_newly_acked is None:
return
if largest_acked == largest_newly_acked and is_ack_eliciting:
latest_rtt = now - largest_sent_time
log_rtt = True
# limit ACK delay to max_ack_delay
ack_delay = min(ack_delay, self.max_ack_delay)
# update RTT estimate, which cannot be < 1 ms
self._rtt_latest = max(latest_rtt, 0.001)
if self._rtt_latest < self._rtt_min:
self._rtt_min = self._rtt_latest
if self._rtt_latest > self._rtt_min + ack_delay:
self._rtt_latest -= ack_delay
if not self._rtt_initialized:
self._rtt_initialized = True
self._rtt_variance = latest_rtt / 2
self._rtt_smoothed = latest_rtt
else:
self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs(
self._rtt_min - self._rtt_latest
)
self._rtt_smoothed = (
7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest
)
# inform congestion controller
self._cc.on_rtt_measurement(latest_rtt, now=now)
self._pacer.update_rate(
congestion_window=self._cc.congestion_window,
smoothed_rtt=self._rtt_smoothed,
)
else:
log_rtt = False
self._detect_loss(space, now=now)
if self._quic_logger is not None:
self._log_metrics_updated(log_rtt=log_rtt)
self._pto_count = 0
def on_loss_detection_timeout(self, now: float) -> None:
loss_space = self._get_loss_space()
if loss_space is not None:
self._detect_loss(loss_space, now=now)
else:
self._pto_count += 1
# reschedule some data
for space in self.spaces:
self._on_packets_lost(
tuple(
filter(
lambda i: i.is_crypto_packet, space.sent_packets.values()
)
),
space=space,
now=now,
)
self._send_probe()
def on_packet_sent(self, packet: QuicSentPacket, space: QuicPacketSpace) -> None:
space.sent_packets[packet.packet_number] = packet
if packet.is_ack_eliciting:
space.ack_eliciting_in_flight += 1
if packet.in_flight:
if packet.is_ack_eliciting:
self._time_of_last_sent_ack_eliciting_packet = packet.sent_time
# add packet to bytes in flight
self._cc.on_packet_sent(packet)
if self._quic_logger is not None:
self._log_metrics_updated()
def _detect_loss(self, space: QuicPacketSpace, now: float) -> None:
"""
Check whether any packets should be declared lost.
"""
loss_delay = K_TIME_THRESHOLD * (
max(self._rtt_latest, self._rtt_smoothed)
if self._rtt_initialized
else self._rtt_initial
)
packet_threshold = space.largest_acked_packet - K_PACKET_THRESHOLD
time_threshold = now - loss_delay
lost_packets = []
space.loss_time = None
for packet_number, packet in space.sent_packets.items():
if packet_number > space.largest_acked_packet:
break
if packet_number <= packet_threshold or packet.sent_time <= time_threshold:
lost_packets.append(packet)
else:
packet_loss_time = packet.sent_time + loss_delay
if space.loss_time is None or space.loss_time > packet_loss_time:
space.loss_time = packet_loss_time
self._on_packets_lost(lost_packets, space=space, now=now)
def _get_loss_space(self) -> Optional[QuicPacketSpace]:
loss_space = None
for space in self.spaces:
if space.loss_time is not None and (
loss_space is None or space.loss_time < loss_space.loss_time
):
loss_space = space
return loss_space
def _log_metrics_updated(self, log_rtt=False) -> None:
data = {
"bytes_in_flight": self._cc.bytes_in_flight,
"cwnd": self._cc.congestion_window,
}
if self._cc.ssthresh is not None:
data["ssthresh"] = self._cc.ssthresh
if log_rtt:
data.update(
{
"latest_rtt": self._quic_logger.encode_time(self._rtt_latest),
"min_rtt": self._quic_logger.encode_time(self._rtt_min),
"smoothed_rtt": self._quic_logger.encode_time(self._rtt_smoothed),
"rtt_variance": self._quic_logger.encode_time(self._rtt_variance),
}
)
self._quic_logger.log_event(
category="recovery", event="metrics_updated", data=data
)
def _on_packets_lost(
self, packets: Iterable[QuicSentPacket], space: QuicPacketSpace, now: float
) -> None:
lost_packets_cc = []
for packet in packets:
del space.sent_packets[packet.packet_number]
if packet.in_flight:
lost_packets_cc.append(packet)
if packet.is_ack_eliciting:
space.ack_eliciting_in_flight -= 1
if self._quic_logger is not None:
self._quic_logger.log_event(
category="recovery",
event="packet_lost",
data={
"type": self._quic_logger.packet_type(packet.packet_type),
"packet_number": str(packet.packet_number),
},
)
self._log_metrics_updated()
# trigger callbacks
for handler, args in packet.delivery_handlers:
handler(QuicDeliveryState.LOST, *args)
# inform congestion controller
if lost_packets_cc:
self._cc.on_packets_lost(lost_packets_cc, now=now)
self._pacer.update_rate(
congestion_window=self._cc.congestion_window,
smoothed_rtt=self._rtt_smoothed,
)
if self._quic_logger is not None:
self._log_metrics_updated()
class QuicRttMonitor:
"""
Roundtrip time monitor for HyStart.
"""
def __init__(self) -> None:
self._increases = 0
self._last_time = None
self._ready = False
self._size = 5
self._filtered_min: Optional[float] = None
self._sample_idx = 0
self._sample_max: Optional[float] = None
self._sample_min: Optional[float] = None
self._sample_time = 0.0
self._samples = [0.0 for i in range(self._size)]
def add_rtt(self, rtt: float) -> None:
self._samples[self._sample_idx] = rtt
self._sample_idx += 1
if self._sample_idx >= self._size:
self._sample_idx = 0
self._ready = True
if self._ready:
self._sample_max = self._samples[0]
self._sample_min = self._samples[0]
for sample in self._samples[1:]:
if sample < self._sample_min:
self._sample_min = sample
elif sample > self._sample_max:
self._sample_max = sample
def is_rtt_increasing(self, rtt: float, now: float) -> bool:
if now > self._sample_time + K_GRANULARITY:
self.add_rtt(rtt)
self._sample_time = now
if self._ready:
if self._filtered_min is None or self._filtered_min > self._sample_max:
self._filtered_min = self._sample_max
delta = self._sample_min - self._filtered_min
if delta * 4 >= self._filtered_min:
self._increases += 1
if self._increases >= self._size:
return True
elif delta > 0:
self._increases = 0
return False
| [
"[email protected]"
]
| |
afc502fd894e0319fb56f6217f21a3b934829d0c | e4045e99ae5395ce5369a1374a20eae38fd5179b | /db/add_emp.py | 07ba831ed9d733fb43393f8841ade51ce422921f | []
| no_license | srikanthpragada/09_MAR_2018_PYTHON_DEMO | 74fdb54004ab82b62f68c9190fe868f3c2961ec0 | 8684137c77d04701f226e1e2741a7faf9eeef086 | refs/heads/master | 2021-09-11T15:52:17.715078 | 2018-04-09T15:29:16 | 2018-04-09T15:29:16 | 124,910,054 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | import sqlite3
try:
con = sqlite3.connect(r"e:\classroom\python\hr.db")
cur = con.cursor()
# take input from user
ename = input("Enter name :")
salary = input("Enter salary : ")
dept = input("Enter dept id :")
# get next emp id
cur.execute("select max(empid) + 1 from emp")
empid = cur.fetchone()[0]
cur.execute("insert into emp values(?,?,?,?)", (empid, ename, salary, dept))
con.commit()
print("Added Employee")
except Exception as ex:
print("Error : ", ex)
finally:
con.close()
| [
"[email protected]"
]
| |
19bf93fb7f263d11e77e96002fe5f58d107ffb35 | 382e308f433dd3b2c2601568f480be30a704e7d7 | /Django 실습/sample_community/board/views.py | 94525375f9d3f0d7adf021b9bb0a2d913286ea95 | []
| no_license | 5d5ng/LACUC-capstone1 | 29240f4109d397ceab3ad7bb771cbcdf69cb944c | 01b0a1136dab592b778ac99c346c318d3c6ed30f | refs/heads/master | 2022-12-03T15:57:55.804687 | 2019-11-18T09:44:04 | 2019-11-18T09:44:04 | 211,851,523 | 0 | 1 | null | 2022-11-17T07:05:21 | 2019-09-30T12:11:32 | Python | UTF-8 | Python | false | false | 453 | py | from django.shortcuts import render
from .models import Board
from .forms import BoardForm
# Create your views here.
def board_write(request):
form = BoardForm()
return render(request, 'board_write.html', {'form': form})
def board_list(request):
boards = Board.objects.all().order_by('-id') # 시간 역순으로 모든 게시글을 가져옴
return render(request, 'board_list.html', {'boards': boards}) # 템플릿으로 전달
| [
"[email protected]"
]
| |
ae8e11dbf700e8b547f3301a18102059e7cdabf8 | 54bb9ba6d507cd25b2c2ac553665bc5fc95280d1 | /src/onegov/file/__init__.py | 7cc5a6d0232f51b69f22604b6201246450e833ec | [
"MIT"
]
| permissive | href/onegov-cloud | 9ff736d968979380edba266b6eba0e9096438397 | bb292e8e0fb60fd1cd4e11b0196fbeff1a66e079 | refs/heads/master | 2020-12-22T07:59:13.691431 | 2020-01-28T08:51:54 | 2020-01-28T08:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from onegov.file.collection import FileCollection, FileSetCollection
from onegov.file.integration import DepotApp
from onegov.file.models import (
File,
FileSet,
AssociatedFiles,
SearchableFile
)
__all__ = (
'AssociatedFiles',
'DepotApp',
'File',
'FileCollection',
'FileSet',
'FileSetCollection',
'SearchableFile',
)
| [
"[email protected]"
]
| |
5494da1fde51e2b036cfae84db3b9f33a86c2556 | 931926968461bbe8fc6295d4f5b702c5de99c231 | /paper/plot_cifar10_confusion_diff.py | f9a3028e134b9ca9deca51fdf7202d96223084c2 | []
| no_license | annaproxy/modules | 93315ce684bdda4fb7a34a518ac2154e506a6579 | 771e1fa49edd2f237883842f741ea1d8ce1fccdc | refs/heads/master | 2022-12-27T22:27:39.408250 | 2020-10-06T10:30:22 | 2020-10-06T10:30:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,606 | py | #!/usr/bin/env python3
import os
import lib
from lib import StatTracker
import torch
import shutil
import matplotlib.pyplot as plt
import numpy as np
import itertools
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lib.common import group
BASE_DIR = "out/cifar10_confusion/"
shutil.rmtree(BASE_DIR, ignore_errors=True)
def draw(runs, name):
VER_DIR = f"{BASE_DIR}/{name}/download/"
os.makedirs(VER_DIR, exist_ok=True)
def draw_confusion(means: np.ndarray, std: np.ndarray):
print("MEAN", means)
figure = plt.figure(figsize=[7,3])#means.shape)
ax = plt.gca()
#, vmin = -65, vmax = 65
im = plt.imshow(means, interpolation='nearest', cmap=plt.cm.viridis, aspect='auto')
x_marks = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
assert len(x_marks) == means.shape[1]
y_marks = x_marks
assert len(y_marks) == means.shape[0]
plt.xticks(np.arange(means.shape[1]), x_marks, rotation=45)
# plt.xticks(np.arange(means.shape[1]), x_marks)
plt.yticks(np.arange(means.shape[0]), y_marks)
# for tick in figure.axes[0].xaxis.get_major_ticks()[1::2]:
# tick.set_pad(15)
# Use white text if squares are dark; otherwise black.
threshold = (means.max() + means.min()) / 2.
print("THRESHOLD", threshold)
# rmap = np.around(means, decimals=0)
rmap = np.round(means).astype(np.int)
std = np.round(std).astype(np.int)
for i, j in itertools.product(range(means.shape[0]), range(means.shape[1])):
color = "white" if means[i, j] < threshold else "black"
plt.text(j, i, f"${rmap[i, j]}\\pm{std[i,j]}$", ha="center", va="center", color=color)
plt.ylabel("True label", labelpad=-10)
plt.xlabel("Predicted label", labelpad=-10)
# plt.xlabel("Predicted label", labelpad=10)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.1)
plt.colorbar(im, cax)
# plt.tight_layout()
return figure
def create_trackers(runs):
trackers = {}
for i_run, run in enumerate(runs):
for f in run.files(per_page=10000):
if not f.name.startswith("export") or "/confusion" not in f.name:
continue
if f.name not in trackers:
trackers[f.name] = StatTracker()
full_name = os.path.join(VER_DIR, f.name)
print(f"Downloading {full_name}")
f.download(root=VER_DIR, replace=True)
data = torch.load(full_name)
data = data.astype(np.float32)
if "confusion_difference" not in f.name:
data = data / np.sum(data, axis=1, keepdims=True)
data = data * 100
trackers[f.name].add(data)
# break
#
# if i_run >= 2:
# break
return trackers
trackers = create_trackers(runs)
for k, v in trackers.items():
s = v.get()
figure = draw_confusion(s.mean, s.std)
prefix = f"out/cifar10_confusion/{name}/"
dir = os.path.join(prefix, os.path.dirname(k))
os.makedirs(dir, exist_ok=True)
figure.savefig(f"{prefix}/{k}.pdf", bbox_inches='tight', pad_inches = 0.01)
plt.close()
draw(lib.get_runs(["cifar10_no_dropout"]), "no_dropout")
draw(lib.get_runs(["cifar10"]), "with_dropout")
draw(lib.get_runs(["cifar10_resnet"]), "resnet")
| [
"[email protected]"
]
| |
74bc86c8f16604ca3cd74876f70d09cfaef95070 | a568e4dc461f71f0ae053fe51e3ddd0fe23bf858 | /development/index_site.py | 1789373a554d1f41d08b10458f9e08a08425dac8 | [
"MIT"
]
| permissive | vatlab/sos-docs | 413e344a7581e4e2cef5da3d24345a73f3669c43 | 2b42c280dae0feaeea51161041827c362abe6db0 | refs/heads/master | 2023-06-26T04:30:59.078944 | 2023-06-16T20:26:39 | 2023-06-16T20:26:39 | 105,951,462 | 3 | 15 | MIT | 2023-06-16T20:18:39 | 2017-10-05T23:46:39 | Jupyter Notebook | UTF-8 | Python | false | false | 4,206 | py | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
#
import os
import glob
import re
import argparse
from bs4 import BeautifulSoup
'''
A simple script to create tipue content by searching for documentation
files under the top docs directory of the SoS website.
'''
def parse_html(url, html):
print('Parsing {}'.format(html))
with open(html, 'rb') as content:
soup = BeautifulSoup(content, "html.parser", from_encoding='utf-8')
#
# try to get the title of the page from h1, h2, or title, and
# uses filename if none of them exists.
#
title = soup.find('h1')
if title is None:
title = soup.find('h2')
if title is None:
title = soup.find('title')
if title is None:
title = os.path.basename(html).rsplit('.')[0]
else:
title = title.get_text()
maintitle = soup.find('h1')
if maintitle is None:
maintitle = soup.find('h2')
if maintitle is None:
maintitle = soup.find('title')
if maintitle is None:
maintitle = os.path.basename(html).rsplit('.')[0]
else:
maintitle = maintitle.get_text()
# remove special characters which might mess up js file
title = re.sub(r'[¶^a-zA-Z0-9_\.\-]', ' ', title)
#
# sear
all_text = []
for header in soup.find_all(re.compile('^h[1-6]$')):
# remove special character
part = re.sub(r'[^a-zA-Z0-9_\-=\'".,\\]', ' ',
header.get_text()).replace('"', "'").strip() + "\n"
part = re.sub(r'\s+', ' ', part)
ids = [x for x in header.findAll('a') if x.get('id')]
if ids:
tag = '#' + ids[0].get('id')
else:
hrefs = header.findAll('a', {'class': 'anchor-link'})
if hrefs:
tag = hrefs[0].get('href')
else:
tag = ''
part = '{{"mainTitle": "{}", "title": "{}", "text": "{}", "tags": "", "mainUrl": "{}", "url": "{}"}}'.format(
maintitle.replace('¶', '').strip(),
header.get_text().replace('¶', '').replace('"', r'\"').strip(),
part, url, url + tag.replace('"', r'\"'))
all_text.append(part)
return all_text
def generate_tipue_content(docs_dir):
# get a list of html files and their url
documentations = glob.glob(
os.path.join(docs_dir, 'doc', 'user_guide', '*.html'))
text = [
parse_html(url, html)
for (url, html) in [('https://vatlab.github.io/sos-docs/',
os.path.join(docs_dir, 'index.html')),
('https://vatlab.github.io/sos-docs/running.html',
os.path.join(docs_dir, 'running.html')),
('https://vatlab.github.io/sos-docs/notebook.html',
os.path.join(docs_dir, 'notebook.html')),
('https://vatlab.github.io/sos-docs/workflow.html',
os.path.join(docs_dir, 'workflow.html'))] +
[('https://vatlab.github.io/sos-docs/doc/user_guide/{}'.format(
os.path.basename(x)), x) for x in documentations]
]
# write the output to file.
with open(
os.path.join(docs_dir, 'tipuesearch', 'tipuesearch_content.js'),
'w') as out:
out.write('''\
var tipuesearch = {{"pages": [
{}
]}};
'''.format(',\n'.join(sum(text, []))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Index SoS website')
parser.add_argument(
'docs_dir',
metavar='DIR',
help='''Path of the top SoS docs directory. This script will parse content of
HTML files under $DOC_DIR (e.g. Overview.html, /doc/documentation/*.html), get
the headers of the files, and write the results in $DOC_DIR/tipuesearch_content.hs
''')
args = parser.parse_args()
generate_tipue_content(args.docs_dir)
| [
"[email protected]"
]
| |
ea2793abe07a25467fb61292b764ddc1f7d4ac4c | 68263c011d12b19d6ff17f0f2420fe497ef28fc2 | /api/tacticalrmm/core/views.py | 93d53c22c421640a769e0772dfab93f3222aa002 | [
"MIT"
]
| permissive | bradhawkins85/tacticalrmm | 79ec6f003b559c96d15a5bd0621a2e968d2ea53d | 4371f270569a6eb094dda834f2d1b14ed62af5e4 | refs/heads/develop | 2023-05-21T13:19:47.187899 | 2020-09-02T18:52:40 | 2020-09-02T18:52:40 | 292,421,792 | 0 | 0 | MIT | 2021-05-05T05:55:52 | 2020-09-03T00:06:11 | null | UTF-8 | Python | false | false | 1,723 | py | import os
from django.conf import settings
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.exceptions import ParseError
from rest_framework.parsers import FileUploadParser
from rest_framework.views import APIView
from .models import CoreSettings
from .serializers import CoreSettingsSerializer
from tacticalrmm.utils import notify_error
class UploadMeshAgent(APIView):
parser_class = (FileUploadParser,)
def put(self, request, format=None):
if "meshagent" not in request.data:
raise ParseError("Empty content")
f = request.data["meshagent"]
mesh_exe = os.path.join(settings.EXE_DIR, "meshagent.exe")
with open(mesh_exe, "wb+") as j:
for chunk in f.chunks():
j.write(chunk)
return Response(status=status.HTTP_201_CREATED)
@api_view()
def get_core_settings(request):
settings = CoreSettings.objects.first()
return Response(CoreSettingsSerializer(settings).data)
@api_view(["PATCH"])
def edit_settings(request):
settings = CoreSettings.objects.first()
serializer = CoreSettingsSerializer(instance=settings, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("ok")
@api_view()
def version(request):
return Response(settings.APP_VER)
@api_view()
def email_test(request):
core = CoreSettings.objects.first()
r = core.send_mail(
subject="Test from Tactical RMM", body="This is a test message", test=True
)
if not isinstance(r, bool) and isinstance(r, str):
return notify_error(r)
return Response("Email Test OK!")
| [
"[email protected]"
]
| |
de8074fe4170e2bd14801b70bceb614046f97b3e | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/settings/trading_global_settings_pb2.py | d2d9db44611b0f5eebc2b3e22c3a68f670146ab3 | []
| no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,630 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/trading_global_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/trading_global_settings.proto',
package='pogoprotos.settings',
syntax='proto3',
serialized_pb=_b('\n1pogoprotos/settings/trading_global_settings.proto\x12\x13pogoprotos.settings\"I\n\x15TradingGlobalSettings\x12\x16\n\x0e\x65nable_trading\x18\x01 \x01(\x08\x12\x18\n\x10min_player_level\x18\x02 \x01(\rb\x06proto3')
)
_TRADINGGLOBALSETTINGS = _descriptor.Descriptor(
name='TradingGlobalSettings',
full_name='pogoprotos.settings.TradingGlobalSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_trading', full_name='pogoprotos.settings.TradingGlobalSettings.enable_trading', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_player_level', full_name='pogoprotos.settings.TradingGlobalSettings.min_player_level', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=147,
)
DESCRIPTOR.message_types_by_name['TradingGlobalSettings'] = _TRADINGGLOBALSETTINGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TradingGlobalSettings = _reflection.GeneratedProtocolMessageType('TradingGlobalSettings', (_message.Message,), dict(
DESCRIPTOR = _TRADINGGLOBALSETTINGS,
__module__ = 'pogoprotos.settings.trading_global_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.TradingGlobalSettings)
))
_sym_db.RegisterMessage(TradingGlobalSettings)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
3dcc56e34644f42ea06d92fb7188107801b668d2 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/YZYQ/yzyq_144.py | 83432d061fadacfc51640039e13e534672cfe407 | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class yzyq_144(xtp_test_case):
# yzyq_144
def test_yzyq_144(self):
title = '默认3:订单报价未超过涨跌幅限制-沪A对手方最优转限价买入=涨停价 重启oms'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
61bc779a1546701f5153a4a6635e4020d619e8cb | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /tests/test_dynunet.py | 39371c0e1dc31ecb08ea630eefe8443f705fb731 | [
"Apache-2.0"
]
| permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 5,036 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any, Sequence, Union
import torch
from parameterized import parameterized
from monai.networks.nets import DynUNet
strides: Sequence[Union[Sequence[int], int]]
kernel_size: Sequence[Any]
expected_shape: Sequence[Any]
TEST_CASE_DYNUNET_2D = []
for kernel_size in [(3, 3, 3, 1), ((3, 1), 1, (3, 3), (1, 1))]:
for strides in [(1, 1, 1, 1), (2, 2, 2, 1)]:
for in_channels in [2, 3]:
for res_block in [True, False]:
out_channels = 2
in_size = 64
spatial_dims = 2
expected_shape = (1, out_channels, *[in_size // strides[0]] * spatial_dims)
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "batch",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_2D.append(test_case)
TEST_CASE_DYNUNET_3D = [] # in 3d cases, also test anisotropic kernel/strides
for out_channels in [2, 3]:
for res_block in [True, False]:
in_channels = 1
in_size = 64
expected_shape = (1, out_channels, 64, 32, 64)
test_case = [
{
"spatial_dims": 3,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": (3, (1, 1, 3), 3, 3),
"strides": ((1, 2, 1), 2, 2, 1),
"upsample_kernel_size": (2, 2, 1),
"norm_name": "instance",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_3D.append(test_case)
TEST_CASE_DEEP_SUPERVISION = []
for spatial_dims in [2, 3]:
for res_block in [True, False]:
for deep_supr_num in [1, 2]:
for strides in [(1, 2, 1, 2, 1), (2, 2, 2, 1), (2, 1, 1, 2, 2)]:
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": 1,
"out_channels": 2,
"kernel_size": [3] * len(strides),
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "group",
"deep_supervision": True,
"deep_supr_num": deep_supr_num,
"res_block": res_block,
},
torch.randn(1, 1, *[in_size] * spatial_dims),
]
scale = 1
all_expected_shapes = []
for stride in strides[: 1 + deep_supr_num]:
scale *= stride
deep_out_shape = (1, 2, *[in_size // scale] * spatial_dims)
all_expected_shapes.append(deep_out_shape)
test_case.append(all_expected_shapes)
TEST_CASE_DEEP_SUPERVISION.append(test_case)
class TestDynUNet(unittest.TestCase):
@parameterized.expand(TEST_CASE_DYNUNET_2D + TEST_CASE_DYNUNET_3D)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
net.eval()
with torch.no_grad():
result = net(input_data)
self.assertEqual(result.shape, expected_shape)
class TestDynUNetDeepSupervision(unittest.TestCase):
@parameterized.expand(TEST_CASE_DEEP_SUPERVISION)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
with torch.no_grad():
results = net(input_data)
self.assertEqual(len(results), len(expected_shape))
for idx in range(len(results)):
result, sub_expected_shape = results[idx], expected_shape[idx]
self.assertEqual(result.shape, sub_expected_shape)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
762eb5522286793c28ee067dc804473cca9f7b95 | 801f367bd19b8f2ab08669fd0a85aad7ace961ac | /project/experiments/exp_025_pns_start_identity/src/tmp_which_nodes_are_slow_results.py | 2b351dbe039f6593b70e34ad3375078ad22ad97b | [
"MIT"
]
| permissive | Wendong-Huo/thesis-bodies | d91b694a6b1b6a911476573ed1ed27eb27fb000d | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | refs/heads/main | 2023-04-17T18:32:38.541537 | 2021-03-12T19:53:23 | 2021-03-12T19:53:23 | 623,471,326 | 1 | 0 | null | 2023-04-04T12:45:48 | 2023-04-04T12:45:47 | null | UTF-8 | Python | false | false | 2,040 | py | import pandas as pd
with open("output_data/tmp/which_nodes_are_slow.txt", "r") as f:
grep_results = f.readlines()
for idx, line in enumerate(grep_results):
if "1785959" in line:
print(grep_results[idx-1])
print(line)
break
# exit(0)
l = len("output_data/tensorboard/")
df_results = pd.read_pickle("output_data/tmp/which_nodes_are_slow")
df_results["node"] = ""
df_results["num_bodies"] = 0
for idx_df, row in df_results.iterrows():
path = row["path"][l:]
df_results.at[idx_df, "path"] = path
df_results.at[idx_df, "num_bodies"] = len(path.split("-"))-3
node = ""
for idx, line in enumerate(grep_results):
if path in line:
job_id = line[:7]
if int(job_id)<1785585 or int(job_id)>1786224:
continue # I started exp_012 several times
_tmp = grep_results[idx-1].split(":")[-1]
node = _tmp.split(".")[0]
break
if node=="":
print("not found.")
else:
df_results.at[idx_df, "node"] = node
df_results = df_results.sort_values(by="node")
df_results.to_csv("output_data/tmp/who_slow.csv")
# df_results = df_results[df_results["path"].str.len()>90]
# print(sorted(df_results["path"].str.len().unique()))
# print(df_results.shape)
# df_results["node_prefix"] = df_results["node"].str.slice(start=0, stop=5)
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# sns.barplot(data=df_results, x="node_prefix", y="min_fps", ax=ax)
sns.barplot(data=df_results, x="node", y="min_fps", ax=ax)
plt.xticks(rotation=45)
# ax1 = ax.twinx()
# ax.set_ylim(0,350)
# ax1.set_ylim(0,350)
# sns.lineplot(x=[-0.5,df_results.shape[0]], y=[34.7,34.7], color="black", ax=ax1)
plt.show()
df_results = df_results.sort_values(by="min_fps")
print(df_results.iloc[0])
# df_slow = df_results[df_results["min_fps"]<80]
# print(df_slow["node"].unique())
# for node in df_slow["node"].unique():
# print(df_results[df_results["node"]==node])
# print(df_results.iloc[-1]) | [
"[email protected]"
]
| |
98fe52e38140e1691a95e0a3e3e42abfcfd8ead4 | d96289f157e2bbbf6f3560f3cc327e490df84b54 | /exp_configs/__init__.py | c98ed0eabbf9a5cd320bccd9a1242a1ddc6f5ad4 | []
| no_license | IssamLaradji/semantic_segmentation_template | 74e8766ce3265ba7fc9416f9c85811d05dca39f9 | f7286eaafb5d5bc81e2f7d6bb87f6e24db026a08 | refs/heads/main | 2023-08-22T09:53:37.381702 | 2021-10-14T21:45:42 | 2021-10-14T21:45:42 | 417,287,252 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from . import issam_exps, sandra_exps
EXP_GROUPS = {}
EXP_GROUPS.update(issam_exps.EXP_GROUPS)
EXP_GROUPS.update(sandra_exps.EXP_GROUPS)
| [
"[email protected]"
]
| |
f4bff5cd10b131b2a0d7ac0bf7e2d36014f08278 | 6160586aa239eada16e735d40d57970dedbe1dfc | /modules/app_additional/app_customaction/app_customaction_delete.py | c182fad8429e4802d3dfe6058d3c4d97757f8530 | []
| no_license | showgea/AIOT | 7f9ffcd49da54836714b3342232cdba330d11e6c | fe8275aba1c4b5402c7c2c2987509c0ecf49f330 | refs/heads/master | 2020-07-23T10:19:37.478456 | 2019-09-23T12:25:59 | 2019-09-23T12:25:59 | 207,525,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_customaction_delete(customActionId):
url_ = url + "/app/v1.0/lumi/app/customaction/delete"
params_ = {
"customActionId": customActionId
}
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % params_)
r = requests.get(url=url_, params=params_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_customaction_delete("123")
print(result_main.text)
| [
"[email protected]"
]
| |
d1361e5603dbcad0458945a81f77ece19988ca14 | 4e59c2444334c67e419dbc97a2fd326115f15555 | /db_orm_models/blocking/presence/browsing_intent_snapshot/methods.py | 64c2c313d23e2bb2069b8e73e40c8bdb2a79cfe0 | [
"MIT"
]
| permissive | bbcawodu/nav-online-backend | cebf41fd3373606ac880b1fc4935885d13948c86 | 3085ad686b253ea82478eb2fc365f51dda6d9d96 | refs/heads/master | 2021-01-22T04:44:13.105412 | 2018-08-14T16:40:55 | 2018-08-14T16:40:55 | 102,269,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | def filter_query_obj_by_session_id(query_obj, obj_model, rqst_session_id, list_of_ids):
if isinstance(rqst_session_id, unicode) and rqst_session_id.lower() == "all":
query_obj = query_obj.order_by(obj_model.presence_browsing_session_data_id)
else:
query_obj = query_obj.filter(obj_model.presence_browsing_session_data_id.in_(list_of_ids)).\
order_by(obj_model.presence_browsing_session_data_id)
return query_obj
def filter_query_obj_by_intent(query_obj, obj_model, rqst_intent):
query_obj = query_obj.filter(obj_model.calculated_intent == rqst_intent).order_by(obj_model.presence_browsing_session_data_id)
return query_obj | [
"[email protected]"
]
| |
0687859952c1036a7b340e2b2af5c0511016df40 | 9f09ecfed34f5014116a1c7afadec2b9c07e9971 | /example_project/some_modules/third_modules/a53.py | 4271b8389a1366db2940c7006acd0fb466cfcb5a | [
"MIT"
]
| permissive | Yuriy-Leonov/cython_imports_limit_issue | a04ce73e8c750f3a61d7aaacaf58665273bf4a49 | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | refs/heads/master | 2020-09-11T23:57:56.677138 | 2019-11-18T17:48:50 | 2019-11-18T17:51:07 | 222,232,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | class A53:
pass
| [
"[email protected]"
]
| |
fe67cbd2fbdca0fb9203371b298604412056b63b | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1904/py01/day02/game.py | da175d732f5814bc887260e614a5f974e7b8ad95 | []
| no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 778 | py | import random # 导入random模块
# random.choice()从一个序列对象中随机选择一项
computer = random.choice(['石头', '剪刀', '布'])
player = input('请出拳(石头/剪刀/布): ')
print("Your choice: %s, Computer's choice: %s" % (player, computer))
if player == '石头':
if computer == '石头':
print('平局')
elif computer == '剪刀':
print('You WIN!!!')
else:
print('You LOSE!!!')
elif player == '剪刀':
if computer == '石头':
print('You LOSE!!!')
elif computer == '剪刀':
print('平局')
else:
print('You WIN!!!')
else:
if computer == '石头':
print('You WIN!!!')
elif computer == '剪刀':
print('You LOSE!!!')
else:
print('平局')
| [
"[email protected]"
]
| |
67711448c51b3aa2c18dbd24c029ab0a57c28569 | 9df89a1652d183d8fc654acd728f9a578d6d1912 | /cli/tests/psym_tests/test_user.py | 400718eaa8635641ddbdad811c5aae5771aba6a4 | [
"BSD-3-Clause"
]
| permissive | duranrojasm/symphony | b37d54a134e29093edacb80442e204fc71a37fbe | 55b3d0c20b669374303bafb10e9c96c734647c9c | refs/heads/main | 2023-08-24T02:00:33.433220 | 2021-10-28T20:35:23 | 2021-10-28T20:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import random
import string
from unittest import skip
from psym import UserDeactivatedException
from psym.api.user import (
activate_user,
add_user,
deactivate_user,
edit_user,
get_active_users,
)
from psym.graphql.enum.user_role import UserRole
from psym.graphql.enum.user_status import UserStatus
from ..utils import init_client
from ..utils.base_test import BaseTest
class TestUser(BaseTest):
@staticmethod
def random_string(length: int = 10) -> str:
letters = string.ascii_lowercase
return "".join(random.choices(letters, k=length))
def test_user_created(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
self.assertEqual(user_name, u.email)
self.assertEqual(UserStatus.ACTIVE, u.status)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
client2 = init_client(email=user_name, password=user_name)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_edited(self) -> None:
user_name = f"{self.random_string()}@fb.com"
new_password = self.random_string()
u = add_user(client=self.client, email=user_name, password=user_name)
edit_user(
client=self.client,
user=u,
new_password=new_password,
new_role=UserRole.OWNER,
)
client2 = init_client(email=user_name, password=new_password)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_deactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(1, len(active_users))
with self.assertRaises(UserDeactivatedException):
init_client(email=user_name, password=user_name)
def test_user_reactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
activate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
| [
"[email protected]"
]
| |
eb2b9cbc7dcb2e45e3686d9f629a4a03d6867c1d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_Class841.py | 4f84993699cbd099216e52673eb203502687df81 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=22
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=23
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class841.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
]
| |
9d271bad43590d0385529bc485e0fd4d18fa1faf | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/core/ui/qt/custom_label_editor.py | af48d20e9e521f5da679a7aaf6a049248224d552 | [
"Apache-2.0"
]
| permissive | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | #===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from traits.etsconfig.etsconfig import ETSConfig
ETSConfig.toolkit = 'qt4'
#============= enthought library imports =======================
from traits.api import HasTraits, Str, Int, Color, \
Button, Any, Instance, on_trait_change
from traitsui.api import View, UItem
from traitsui.qt4.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
#============= standard library imports ========================
import random
from PySide.QtGui import QLabel
#============= local library imports ==========================
class _CustomLabelEditor(Editor):
# txtctrl = Any
color = Any
bgcolor = Any
weight = Any
text_size = Any
def init(self, parent):
self.control = self._create_control(parent)
# self.item.on_trait_change(self._set_color, 'color')
self.sync_value(self.factory.color, 'color', mode='from')
self.sync_value(self.factory.bgcolor, 'bgcolor', mode='from')
self.sync_value(self.factory.weight, 'weight', mode='from')
self.sync_value(self.factory.text_size, 'text_size', mode='from')
@on_trait_change('color, bgcolor, weight, text_size')
def _update_style(self):
self._set_style()
def _set_style(self, control=None,
color=None, bgcolor=None,
size=None, weight=None):
if control is None:
control = self.control
if color is None:
color = self.color.name()
if bgcolor is None:
if self.bgcolor is None:
bgcolor = 'transparent'
else:
bgcolor = self.bgcolor.name()
if size is None:
size = self.text_size
if not size:
size = self.item.size
if weight is None:
weight = self.weight
if not weight:
weight = self.item.weight
css = '''QLabel {{color:{};
background-color:{};
font-size:{}px;
font-weight:{};}}
'''.format(color,
bgcolor,
size,
weight)
control.setStyleSheet(css)
def update_editor(self):
if self.control:
# print self.object, self.value
if isinstance(self.value, (str, int, float, long, unicode)):
self.control.setText(str(self.value))
# self.control.SetLabel(self.value)
def _create_control(self, parent):
control = QLabel()
color = self.item.color.name()
self._set_style(color=color,
control=control)
control.setMargin(5)
parent.setSpacing(0)
return control
class CustomLabelEditor(BasicEditorFactory):
klass = _CustomLabelEditor
color = Str
bgcolor = Str
weight = Str
text_size = Str
class CustomLabel(UItem):
editor = Instance(CustomLabelEditor, ())
size = Int(12)
size_name = Str
color = Color('black')
color_name = Str
bgcolor = Color('transparent')
bgcolor_name = Str
weight = Str('normal')
top_padding = Int(5)
bottom_padding = Int(5)
left_padding = Int(5)
right_padding = Int(5)
def _size_name_changed(self):
self.editor.text_size = self.size_name
def _color_name_changed(self):
self.editor.color = self.color_name
def _bgcolor_name_changed(self):
self.editor.bgcolor = self.bgcolor_name
#===============================================================================
# demo
#===============================================================================
class Demo(HasTraits):
a = Str('asdfsdf')
foo = Button
color = Color('blue')
bgcolor = Color('green')
cnt = 0
size = Int(12)
def _foo_fired(self):
self.a = 'fffff {}'.format(random.random())
if self.cnt % 2 == 0:
self.color = 'red'
self.bgcolor = 'blue'
else:
self.bgcolor = 'red'
self.color = 'blue'
self.cnt += 1
def traits_view(self):
v = View(
UItem('size'),
'foo',
CustomLabel('a',
# color='blue',
size=24,
size_name='size',
top_padding=10,
left_padding=10,
color_name='color',
bgcolor_name='bgcolor'
),
resizable=True,
width=400,
height=100)
return v
if __name__ == '__main__':
d = Demo()
d.configure_traits()
#============= EOF =============================================
# css = '''QLabel {{ color:{}; font-size:{}px; font-weight:{};}}
# # '''.format(self.item.color.name(), self.item.size, self.item.weight)
# control.setStyleSheet(css)
# control.setAlignment(Qt.AlignCenter)
# control.setGeometry(0, 0, self.item.width, self.item.height)
# vbox = QVBoxLayout()
# vbox.setSpacing(0)
# hbox = QHBoxLayout()
# hbox.addLayout(vbox)
# parent.addLayout(vbox)
# print vbox.getContentsMargins()
# vbox.setContentsMargins(5, 5, 5, 5)
# vbox.setSpacing(-1)
# vbox.addSpacing(5)
# vbox.addSpacing(10)
# vbox.addWidget(control)
# vbox.addSpacing(5)
# vbox.addStretch()
# vbox.setSpacing(-1)
# vbox.setMargin(10)
# control.setLayout(vbox)
# parent.addWidget(control) | [
"[email protected]"
]
| |
df5d74665f7e253a5707711a3a7f978bebb10b96 | 50e375bdc8affc1a8c09aa567a740fa19df7d5a6 | /DSBQ/deployment/fixtures_old/test_Oracle_pytest_new.py | cbacf2973495522b4d34ecada22a816bff063a78 | []
| no_license | michTalebzadeh/SparkStructuredStreaming | ca7a257626e251c7b03a9844cfd229fa8ea95af5 | 87ef34ffe52061fcbb4f22fcd97764037717696a | refs/heads/master | 2023-07-13T00:49:10.753863 | 2021-07-12T16:39:50 | 2021-07-12T16:39:50 | 364,826,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from pyspark.sql import SparkSession
import pytest
from sparkutils import sparkstuff as s
from src.config import ctest, test_url
from src.CreateSampleDataInMysql import extractHiveData, loadIntoMysqlTable, readSourceData, transformData, saveData, readSavedData
"""
@pytest.fixtures_old(scope = "session")
def initParameters():
# Prepare test data here in this fixtures_old
appName = ctest['common']['appName']
spark_session = s.spark_session(appName)
# create sample data
# read Hive source table and select read_df number of rows (see config_test.yml)
house_df = extractHiveData() ## read Hive table as sample source
# write to Mysql DB
loadIntoMysqlTable(house_df)
# data is ready to be tested in mysql
read_df = readSourceData()
# do Transform part of ETL (Extract, Transform, Load)
transformation_df = transformData()
# save data to target test table in mysql
saveData()
# read that data saved to ensure that the rows will tally
readSavedData_df = readSavedData()
return [read_df, transformation_df, readSavedData_df]
"""
def test_validity():
house_df = extractHiveData()
loadIntoMysqlTable(house_df)
# Assert that data read from source table is what is expected
read_df = readSourceData()
assert read_df.count() == ctest['statics']['read_df_rows']
# Assert data written to target table is what it should be
transformation_df = transformData()
assert transformation_df.count() == ctest['statics']['transformation_df_rows']
# Assert what is written tallies with the number of rows transformed
readSavedData_df = readSavedData()
assert readSavedData_df.subtract(transformation_df).count() == 0 | [
"[email protected]"
]
| |
a4a5217a92054490d85cba7f63ef1acb282a4847 | 989bb5d2d3e89db21fcbeac91a1e64967ea6377b | /sagemaker_neo_compilation_jobs/deploy_pytorch_model_on_Inf1_instance/resnet18.py | 9421aea5af6f0bf1ea89a34f99bc2cb5dcbceb35 | [
"Apache-2.0"
]
| permissive | araitats/amazon-sagemaker-examples | 7cec9ea5822f0469d5dfabbcf3cab62ce9c0f0d1 | 512cb3b6310ae812c6124a451751237d98a109b1 | refs/heads/master | 2023-04-19T05:54:47.334359 | 2021-04-27T21:04:33 | 2021-04-27T21:04:33 | 338,094,683 | 2 | 1 | Apache-2.0 | 2021-04-27T15:35:14 | 2021-02-11T17:07:39 | Jupyter Notebook | UTF-8 | Python | false | false | 565 | py | def input_fn(request_body, request_content_type):
import torch
import torchvision.transforms as transforms
from PIL import Image
import io
f = io.BytesIO(request_body)
input_image = Image.open(f).convert('RGB')
preprocess = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch | [
"[email protected]"
]
| |
cb4bec1c5c9f2e7faae17939c77ca7c5189da426 | 7ede001485ce68aebcd185f9f7b01b1196e8900d | /forex/env-python2/lib/python2.7/site-packages/v20/user.py | 32388c147cd659635b7adaa22c68fbd120b48212 | [
"MIT"
]
| permissive | phroiland/forex_algos | 971f04ebceb579a761dca6c1184fc14d1e78f9d1 | 055f51e55c52d6dd5cfd38550a48892a0fb09b0d | refs/heads/master | 2023-05-29T00:28:19.350350 | 2022-05-12T21:16:35 | 2022-05-12T21:16:35 | 92,301,496 | 1 | 0 | null | 2023-05-22T20:44:39 | 2017-05-24T14:28:29 | Python | UTF-8 | Python | false | false | 8,342 | py | import ujson as json
from v20.base_entity import BaseEntity
from v20.base_entity import EntityDict
from v20.request import Request
from v20 import spec_properties
class UserInfo(BaseEntity):
"""
A representation of user information, as provided to the user themself.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.user_UserInfo
def __init__(self, **kwargs):
"""
Create a new UserInfo instance
"""
super(UserInfo, self).__init__()
#
# The user-provided username.
#
self.username = kwargs.get("username")
#
# The user's OANDA-assigned user ID.
#
self.userID = kwargs.get("userID")
#
# The country that the user is based in.
#
self.country = kwargs.get("country")
#
# The user's email address.
#
self.emailAddress = kwargs.get("emailAddress")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new UserInfo from a dict (generally from loading a JSON
response). The data used to instantiate the UserInfo is a shallow copy
of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
return UserInfo(**data)
class UserInfoExternal(BaseEntity):
"""
A representation of user information, as available to external (3rd party)
clients.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.user_UserInfoExternal
def __init__(self, **kwargs):
"""
Create a new UserInfoExternal instance
"""
super(UserInfoExternal, self).__init__()
#
# The user's OANDA-assigned user ID.
#
self.userID = kwargs.get("userID")
#
# The country that the user is based in.
#
self.country = kwargs.get("country")
#
# Flag indicating if the the user's Accounts adhere to FIFO execution
# rules.
#
self.FIFO = kwargs.get("FIFO")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new UserInfoExternal from a dict (generally from loading
a JSON response). The data used to instantiate the UserInfoExternal is
a shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
return UserInfoExternal(**data)
class EntitySpec(object):
"""
The user.EntitySpec wraps the user module's type definitions
and API methods so they can be easily accessed through an instance of a v20
Context.
"""
UserInfo = UserInfo
UserInfoExternal = UserInfoExternal
def __init__(self, ctx):
self.ctx = ctx
def get_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfo.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def get_external_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the externally-available user information for the specified user.
This endpoint is intended to be used by 3rd parties that have been
authorized by a user to view their personal information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}/externalInfo'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfoExternal.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
| [
"[email protected]"
]
| |
e4b50200cdcfab029ada56611d23bd13fb829714 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/42e441b34ef3b68f657a5e36027aaa21ff0b4d84-<run_bottleneck_on_image>-bug.py | 8ad0925041a855fb4f37bc20e4557601749d4a45 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py |
def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):
"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: Numpy array of image data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n "
bottleneck_values = sess.run(bottleneck_tensor, {
image_data_tensor: image_data,
})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
| [
"[email protected]"
]
| |
37ba1aa162d25931444ba005344100537f2992fa | 4dfc7fc9b84f76d690e33414610bc59a9b07001a | /bds/views/municipality.py | f7a746ad648d09ab470d311aed22e6dbf6fdd580 | []
| no_license | pythondev0101/-j-natividad-web-billing | e62da9ac943a74d2e1e9416d553fd3baafd3937f | 39f7b0d60d86a08d1c5d40cacf9904b28dc2355c | refs/heads/main | 2023-08-28T00:34:43.435674 | 2021-05-24T12:37:54 | 2021-05-24T12:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | from datetime import datetime
from flask import redirect, url_for, request, current_app, flash
from flask_login import current_user, login_required
from app import db
from app.admin.templating import admin_table, admin_edit
from bds import bp_bds
from bds.models import Municipality
from bds.forms import MunicipalityForm, MunicipalityEditForm
@bp_bds.route('/municipalities')
@login_required
def municipalities():
fields = [Municipality.id, Municipality.name, Municipality.description, Municipality.created_at, Municipality.updated_at]
form = MunicipalityForm()
return admin_table(Municipality, fields=fields,form=form,\
create_url='bp_bds.create_municipality', edit_url='bp_bds.edit_municipality')
@bp_bds.route('/municipalities/create', methods=['POST'])
@login_required
def create_municipality():
form = MunicipalityForm()
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
new = Municipality()
new.name = form.name.data
new.description = form.description.data
db.session.add(new)
db.session.commit()
flash('New municipality added successfully!')
except Exception as exc:
flash(str(exc), 'error')
return redirect(url_for('bp_bds.municipalities'))
@bp_bds.route('/municipalities/<int:oid>/edit', methods=['GET', 'POST'])
@login_required
def edit_municipality(oid):
ins = Municipality.query.get_or_404(oid)
form = MunicipalityEditForm(obj=ins)
if request.method == "GET":
return admin_edit(Municipality, form,'bp_bds.edit_municipality', oid, 'bp_bds.municipalities')
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
ins.name = form.name.data
ins.description = form.description.data
ins.updated_at = datetime.now()
ins.updated_by = "{} {}".format(current_user.fname,current_user.lname)
db.session.commit()
flash('Municipality update Successfully!','success')
except Exception as exc:
flash(str(exc),'error')
return redirect(url_for('bp_bds.municipalities'))
| [
"[email protected]"
]
| |
327914a84e501df8aa4e30d0ab286a73a37f1b35 | dc75370390e821b857b327100f0d2e9a60f34f89 | /chat/migrations/0001_initial.py | 477def66b7a7e08f361a5435958dcb17e478690a | []
| no_license | DontTouchMyMind/OnlineChat_Udemy | 018e24f6dfe7c1c2d1f37540f219f7b652987666 | 77ee36d89adbf71d07b6f73f9b6757aacabde939 | refs/heads/master | 2023-01-28T16:56:40.070478 | 2020-12-07T11:40:11 | 2020-12-07T11:40:11 | 315,873,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 3.1.3 on 2020-11-25 07:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Online',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
],
),
]
| [
"[email protected]"
]
| |
2ba938b829b5293d85393bdbabcce4e6f8a94016 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/game_control/AOGAS.py | d7b1cef8d85ccd796edb079ec9bcef0a8e803485 | []
| no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,091 | py | # 2017.05.04 15:21:37 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/game_control/AOGAS.py
import time
import weakref
import BigWorld
import Event
from constants import AOGAS_TIME, ACCOUNT_ATTR
from debug_utils import LOG_ERROR, LOG_DEBUG
from enumerations import AttributeEnumItem, Enumeration
from helpers import time_utils
from skeletons.gui.game_control import IAOGASController
TIME_MODIFER = 3600
AOGAS_FORCE_START_NOTIFY = False
_DEFAULT_AOGAS_NOTIFY_TIMEOUT = 5000.0
AOGAS_NOTIFY_MSG = Enumeration('Notification message for Anti-online game addiction system', [('AOND_1', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_2', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_5', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('RESET', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT})], instance=AttributeEnumItem)
class AOGAS_NOTIFY_TIME(object):
AOND_1 = 1 * TIME_MODIFER - 600
AOND_2 = 2 * TIME_MODIFER - 600
AOND_3 = AOGAS_TIME.REDUCED_GAIN
AOND_5 = AOGAS_TIME.NO_GAIN
class AOGAS_NOTIFY_PERIOD(object):
AOND_START = 5 * TIME_MODIFER / 6
AOND_2_3 = 1 * TIME_MODIFER
AOND_3_5 = 0.5 * TIME_MODIFER
AOND_END = 0.25 * TIME_MODIFER
class AOGASController(IAOGASController):
def __init__(self):
super(AOGASController, self).__init__()
self.onNotifyAccount = Event.Event()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
self.__aogasStartedAt = 0
self.__isAogasEnabled = True
self.__notificator = _AOGASNotificator(self, '_AOGASController__notifyAccount')
def fini(self):
self.__notificator.stop()
self.onNotifyAccount.clear()
super(AOGASController, self).fini()
def onLobbyStarted(self, ctx):
serverTime = ctx.get('aogasStartedAt')
if serverTime is not None:
self.__aogasStartedAt = time_utils.makeLocalServerTime(serverTime)
else:
self.__aogasStartedAt = time.time()
self.__isAogasEnabled = ctx.get('isAogasEnabled', True)
if not self.__notificator.isStarted():
self.__requestRequiredInfo()
return
def onDisconnected(self):
self.__notificator.stop()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
def onLobbyInited(self, event):
LOG_DEBUG('enableNotifyAccount ', self.__lastNotifyMessages)
self.__isNotifyAccount = True
for message in self.__lastNotifyMessages:
self.onNotifyAccount(message)
self.__lastNotifyMessages = []
def onAvatarBecomePlayer(self):
LOG_DEBUG('disableNotifyAccount')
self.__isNotifyAccount = False
def __notifyAccount(self, message, collect = False):
if self.__isNotifyAccount:
self.onNotifyAccount(message)
elif collect:
self.__lastNotifyMessages.append(message)
else:
self.__lastNotifyMessages = [message]
def __requestRequiredInfo(self):
BigWorld.player().stats.get('attrs', self.__receiveAccountAttrs)
def __receiveAccountAttrs(self, resultID, attrs):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, attrs)
return
if self.__isAogasEnabled and ACCOUNT_ATTR.AOGAS & attrs != 0 or AOGAS_FORCE_START_NOTIFY:
BigWorld.player().stats.get('accOnline', self.__receiveAccOnline)
elif self.__notificator.isStarted():
self.__notificator.stop()
def __receiveAccOnline(self, resultID, accOnline):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, accOnline)
return
if not accOnline:
self.__notifyAccount(AOGAS_NOTIFY_MSG.RESET)
delta = round(time.time() - self.__aogasStartedAt)
AOND = delta + accOnline
LOG_DEBUG('Calculate AOND (seconds,seconds,seconds) : ', AOND, delta, accOnline)
self.__notificator.start(AOND)
class _AOGASNotificator(object):
def __init__(self, scope, function):
self.__scope = weakref.ref(scope)
self.__function = function
self.__started = False
self.__AOND = 0
self.__callbackID = None
return
def start(self, AOND):
if self.__started:
return
self.__started = True
self.__AOND = AOND
notificated = False
if AOND > AOGAS_NOTIFY_TIME.AOND_1:
prevAOND = self.__getPrevNotifyTime(AOND)
self.__doNotify(self.__getNotifyMessages(prevAOND))
notificated = prevAOND == AOND
if notificated:
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
LOG_DEBUG('AOGAS started (seconds,seconds)', self.__AOND, notifyPeriod)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
else:
notifyTime = self.__getNextNotifyTime(AOND)
nextNotifyDelay = abs(notifyTime - AOND)
LOG_DEBUG('AOGAS started (seconds,seconds,seconds)', self.__AOND, notifyTime, nextNotifyDelay)
self.__callbackID = BigWorld.callback(nextNotifyDelay, lambda : self.__notify(nextNotifyDelay))
def stop(self):
self.__started = False
if self.__callbackID is not None:
BigWorld.cancelCallback(self.__callbackID)
self.__callbackID = None
return
def isStarted(self):
return self.__started
def __getNotifyPeriod(self, AOND):
if AOND < AOGAS_NOTIFY_TIME.AOND_1:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_START
elif AOND < AOGAS_NOTIFY_TIME.AOND_3:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_2_3
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_3_5
else:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_END
return notifyPeriod
def __getNextNotifyTime(self, AOND):
notifyTime = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime
def __getPrevNotifyTime(self, AOND):
notifyTime = 0
notifyPeriod = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime - notifyPeriod
def __getNotifyMessages(self, AOND):
if AOND == AOGAS_NOTIFY_TIME.AOND_1:
messages = (AOGAS_NOTIFY_MSG.AOND_1,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_2:
messages = (AOGAS_NOTIFY_MSG.AOND_2,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_3:
messages = (AOGAS_NOTIFY_MSG.AOND_3, AOGAS_NOTIFY_MSG.AOND_MORE_3)
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_3,)
else:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_5,)
return messages
def __doNotify(self, messages):
notifyHandler = getattr(self.__scope(), self.__function, None)
if notifyHandler is not None and callable(notifyHandler):
collect = len(messages) > 1
for message in messages:
notifyHandler(message, collect)
LOG_DEBUG('notify (seconds, message)', self.__AOND, message)
else:
LOG_ERROR('Not found notify handler ', self.__scope(), self.__function)
return
def __notify(self, notifyPeriod):
self.__AOND += notifyPeriod
self.__doNotify(self.__getNotifyMessages(self.__AOND))
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\game_control\AOGAS.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:21:38 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
73268d8e78be08959b7a0ae204f64a99e367dc91 | ac47074bcf749273941ab01213bb6d1f59c40c99 | /project/multi_factor/alpha_model/exposure/alpha_factor_dividend_12m.py | 578ecd49441115d3a844ec792f25ce7045c363c4 | []
| no_license | xuzhihua95/quant | c5561e2b08370610f58662f2871f1f1490681be2 | c7e312c70d5f400b7e777d2ff4c9f6f223eabfee | refs/heads/master | 2020-05-19T17:04:08.796981 | 2019-04-24T02:50:29 | 2019-04-24T02:50:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | from quant.stock.date import Date
from quant.stock.stock import Stock
from quant.project.multi_factor.alpha_model.exposure.alpha_factor import AlphaFactor
class AlphaDividend12m(AlphaFactor):
"""
因子说明: 最近12月股息率, 根据最新财报更新数据
披露日期 为 最近财报
表明因子估值能力
"""
def __init__(self):
AlphaFactor.__init__(self)
self.exposure_path = self.data_path
self.raw_factor_name = 'alpha_raw_dividend_12m'
def cal_factor_exposure(self, beg_date, end_date):
""" 计算因子暴露 """
dividend_12m = Stock().read_factor_h5("dividendyield2") / 100
beg_date = Date().change_to_str(beg_date)
end_date = Date().change_to_str(end_date)
dividend_12m = dividend_12m.loc[:, beg_date:end_date]
res = dividend_12m.T.dropna(how='all').T
self.save_alpha_factor_exposure(res, self.raw_factor_name)
if __name__ == "__main__":
from datetime import datetime
beg_date = '20040101'
end_date = datetime.today()
self = AlphaDividend12m()
self.cal_factor_exposure(beg_date, end_date)
| [
"[email protected]"
]
| |
49be6f19af78663962e559d4140c0414b52e4836 | b5644b71eade9abd62e3cd8747808b8edeea8ee1 | /movies/admin.py | 25365dac1ee29104521aa3d036714f35d6767529 | []
| no_license | HSx3/project_UBD | 68aa8dd1a3a2bf9c3523967a3c489a51c4bdac04 | fcc2b035dac07376ddb0e6c1eceb4544e3415455 | refs/heads/master | 2020-05-24T06:09:14.730903 | 2019-05-17T00:34:29 | 2019-05-17T00:34:29 | 187,133,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.contrib import admin
from .models import Movie, Genre, Actor, Director, Cast, Score
# Register your models here.
admin.site.register(Movie)
admin.site.register(Genre)
admin.site.register(Director)
admin.site.register(Actor)
admin.site.register(Cast)
admin.site.register(Score) | [
"[email protected]"
]
| |
7f23664b7bbc4be12bd5c23a8f685cf41f098106 | f6aac61a48a87743be9c40fecdc24344bae4d263 | /scripts/gfs/gfs2iemre.py | 062adac7a62c91781a649ef342cf23c96977f333 | [
"MIT"
]
| permissive | akrherz/iem | 8714d99b371c8818f7cdde73dd24639e9fc7d42b | 178015584b7fb5b585f65be6013eaf16fb6db0c7 | refs/heads/main | 2023-08-19T02:58:24.507782 | 2023-08-18T12:08:31 | 2023-08-18T12:08:31 | 4,253,774 | 118 | 74 | MIT | 2023-09-14T18:28:41 | 2012-05-07T20:32:59 | Python | UTF-8 | Python | false | false | 6,702 | py | """Copy GFS grib data to IEMRE grid...
Run from RUN_50_AFTER.sh
"""
import shutil
import subprocess
import sys
from datetime import date, timedelta
import numpy as np
import pygrib
from pyiem import iemre
from pyiem.util import logger, ncopen, utc
from scipy.interpolate import NearestNDInterpolator
LOG = logger()
def create(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = "/mesonet/data/iemre/gfs_current_new.nc"
with ncopen(fn, "w") as nc:
nc.title = "GFS on IEMRE Grid."
nc.contact = "Daryl Herzmann, [email protected], 515-294-5978"
nc.gfs_forecast = f"{ts:%Y-%m-%dT%H:%M:%SZ}"
nc.history = f"{date.today():%d %B %Y} Generated"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
# store 20 days worth, to be safe of future changes
nc.createDimension("time", 20)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat"))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.bounds = "lat_bnds"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon"))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.bounds = "lon_bnds"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = f"Days since {ts:%Y-%m-%d} 00:00:0.0"
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
# Placeholder
tm[:] = np.arange(0, 20)
high = nc.createVariable(
"high_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
high.units = "K"
high.scale_factor = 0.01
high.long_name = "2m Air Temperature 12 Hour High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable(
"low_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
low.units = "K"
low.scale_factor = 0.01
low.long_name = "2m Air Temperature 12 Hour Low"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
ncvar = nc.createVariable(
"tsoil", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "K"
ncvar.scale_factor = 0.01
ncvar.long_name = "0-10 cm Average Soil Temperature"
ncvar.standard_name = "0-10 cm Average Soil Temperature"
ncvar.coordinates = "lon lat"
ncvar = nc.createVariable(
"p01d", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "mm"
ncvar.scale_factor = 0.01
ncvar.long_name = "Precipitation Accumulation"
ncvar.standard_name = "precipitation_amount"
ncvar.coordinates = "lon lat"
def merge_grib(nc, now):
"""Merge what grib data we can find into the netcdf file."""
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
lons = None
lats = None
tmaxgrid = None
tmingrid = None
tsoilgrid = None
pgrid = None
hits = 0
for fhour in range(6, 385, 6):
fxtime = now + timedelta(hours=fhour)
grbfn = now.strftime(
f"/mesonet/tmp/gfs/%Y%m%d%H/gfs.t%Hz.sfluxgrbf{fhour:03.0f}.grib2"
)
grbs = pygrib.open(grbfn)
for grb in grbs:
name = grb.shortName.lower()
if lons is None:
lats, lons = [np.ravel(x) for x in grb.latlons()]
lons = np.where(lons > 180, lons - 360, lons)
if name == "tmax":
if tmaxgrid is None:
tmaxgrid = grb.values
else:
tmaxgrid = np.where(
grb.values > tmaxgrid, grb.values, tmaxgrid
)
elif name == "tmin":
if tmingrid is None:
tmingrid = grb.values
else:
tmingrid = np.where(
grb.values < tmingrid, grb.values, tmingrid
)
elif name == "prate":
# kg/m^2/s over six hours
hits += 1
if pgrid is None:
pgrid = grb.values * 6.0 * 3600
else:
pgrid += grb.values * 6.0 * 3600
# Hacky
elif name == "st" and str(grb).find("0.0-0.1 m") > -1:
if tsoilgrid is None:
tsoilgrid = grb.values
else:
tsoilgrid += grb.values
grbs.close()
# Write tmax, tmin out at 6z
if fxtime.hour == 6:
# The actual date is minus one
days = (fxtime.date() - now.date()).days - 1
if hits == 4:
LOG.info("Writing %s, days=%s", fxtime, days)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmaxgrid))
nc.variables["high_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmingrid))
nc.variables["low_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(pgrid))
nc.variables["p01d"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator(
(lons, lats), np.ravel(tsoilgrid / 4.0)
)
nc.variables["tsoil"][days, :, :] = nn(xi, yi)
tmingrid = None
tmaxgrid = None
tsoilgrid = None
hits = 0
def main(argv):
"""Do the work."""
now = utc(*[int(s) for s in argv[1:5]])
# Run every hour, filter those we don't run
if now.hour % 6 != 0:
return
create(now)
with ncopen("/mesonet/data/iemre/gfs_current_new.nc", "a") as nc:
merge_grib(nc, now)
shutil.move(
"/mesonet/data/iemre/gfs_current_new.nc",
"/mesonet/data/iemre/gfs_current.nc",
)
# Archive this as we need it for various projects
cmd = [
"pqinsert",
"-i",
"-p",
(
f"data a {now:%Y%m%d%H%M} bogus "
f"model/gfs/gfs_{now:%Y%m%d%H}_iemre.nc nc"
),
"/mesonet/data/iemre/gfs_current.nc",
]
subprocess.call(cmd)
# Generate 4inch plots based on 6z GFS
if now.hour == 6:
subprocess.call(["python", "gfs_4inch.py"])
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
]
| |
875c0f821af2f07ad32cab2bdcedef57dd82e2a5 | a26ecf8a24ed20ed9ee4728fa189cc9168f4416b | /library/__init__.py | 09902a0aa53c4e5e72e8ac4cf82bc0737e8102b8 | []
| no_license | mfa/addition_seq2seq_allennlp | c3cf543c65a939aa33ed7aa74f9bf0457f913530 | e8176b33cd6ce375f13d9e720aa4d92a4f210912 | refs/heads/master | 2020-04-25T23:41:27.294094 | 2019-03-10T13:25:15 | 2019-03-10T13:25:15 | 173,153,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from library.data import AdditionSeq2SeqDatasetReader
| [
"[email protected]"
]
| |
da325578a57f0f5949a3625ee61b64b1612a13c1 | 04f948d94cf288eafccf2b513078aeed77e3faef | /prof.py | a35159b88b3feed2074e0fcec867c1df8d0ddf85 | [
"Apache-2.0"
]
| permissive | jdily/qpth | a9d0e5a662c407e6b6a92a25962040f0a2834ce8 | 296c01775ac82e7890aa688839f39fff6a6cb681 | refs/heads/master | 2021-01-21T12:58:33.373545 | 2017-05-16T15:02:12 | 2017-05-16T15:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | #!/usr/bin/env python3
import argparse
import sys
import numpy as np
import numpy.random as npr
import qpth.solvers.pdipm.single as pdipm_s
import qpth.solvers.pdipm.batch as pdipm_b
import itertools
import time
import torch
import gurobipy as gpy
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
import setproctitle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nTrials', type=int, default=10)
args = parser.parse_args()
setproctitle.setproctitle('bamos.optnet.prof')
npr.seed(0)
prof(args)
def prof(args):
print('| \# Vars | \# Batch | Gurobi | single | batched |')
print('|----------+----------+--------+--------+---------|')
# for nz, nBatch in itertools.product([100,500], [1, 64, 128]):
for nz, nBatch in itertools.product([100], [1, 64, 128]):
times = []
for i in range(args.nTrials):
times.append(prof_instance(nz, nBatch))
times = np.array(times)
print(("| {:5d} " * 2 + "| ${:.5e} \pm {:.5e}$ s " * 3 + '|').format(
*([nz, nBatch] + [item for sublist in zip(times.mean(axis=0), times.std(axis=0))
for item in sublist])))
def prof_instance(nz, nBatch, cuda=True):
nineq, neq = 100, 0
assert(neq == 0)
L = npr.rand(nBatch, nz, nz)
Q = np.matmul(L, L.transpose((0, 2, 1))) + 1e-3 * np.eye(nz, nz)
G = npr.randn(nBatch, nineq, nz)
z0 = npr.randn(nBatch, nz)
s0 = npr.rand(nBatch, nineq)
p = npr.randn(nBatch, nz)
h = np.matmul(G, np.expand_dims(z0, axis=(2))).squeeze(2) + s0
A = npr.randn(nBatch, neq, nz)
b = np.matmul(A, np.expand_dims(z0, axis=(2))).squeeze(2)
zhat_g = []
gurobi_time = 0.0
for i in range(nBatch):
m = gpy.Model()
zhat = m.addVars(nz, lb=-gpy.GRB.INFINITY, ub=gpy.GRB.INFINITY)
obj = 0.0
for j in range(nz):
for k in range(nz):
obj += 0.5 * Q[i, j, k] * zhat[j] * zhat[k]
obj += p[i, j] * zhat[j]
m.setObjective(obj)
for j in range(nineq):
con = 0
for k in range(nz):
con += G[i, j, k] * zhat[k]
m.addConstr(con <= h[i, j])
m.setParam('OutputFlag', False)
start = time.time()
m.optimize()
gurobi_time += time.time() - start
t = np.zeros(nz)
for j in range(nz):
t[j] = zhat[j].x
zhat_g.append(t)
p, L, Q, G, z0, s0, h = [torch.Tensor(x) for x in [p, L, Q, G, z0, s0, h]]
if cuda:
p, L, Q, G, z0, s0, h = [x.cuda() for x in [p, L, Q, G, z0, s0, h]]
if neq > 0:
A = torch.Tensor(A)
b = torch.Tensor(b)
else:
A, b = [torch.Tensor()] * 2
if cuda:
A = A.cuda()
b = b.cuda()
# af = adact.AdactFunction()
single_results = []
start = time.time()
for i in range(nBatch):
A_i = A[i] if neq > 0 else A
b_i = b[i] if neq > 0 else b
U_Q, U_S, R = pdipm_s.pre_factor_kkt(Q[i], G[i], A_i)
single_results.append(pdipm_s.forward(p[i], Q[i], G[i], A_i, b_i, h[i],
U_Q, U_S, R))
single_time = time.time() - start
start = time.time()
Q_LU, S_LU, R = pdipm_b.pre_factor_kkt(Q, G, A)
zhat_b, nu_b, lam_b, s_b = pdipm_b.forward(p, Q, G, h, A, b, Q_LU, S_LU, R)
batched_time = time.time() - start
# Usually between 1e-4 and 1e-5:
# print('Diff between gurobi and pdipm: ',
# np.linalg.norm(zhat_g[0]-zhat_b[0].cpu().numpy()))
# import IPython, sys; IPython.embed(); sys.exit(-1)
# import IPython, sys; IPython.embed(); sys.exit(-1)
# zhat_diff = (single_results[0][0] - zhat_b[0]).norm()
# lam_diff = (single_results[0][2] - lam_b[0]).norm()
# eps = 0.1 # Pretty relaxed.
# if zhat_diff > eps or lam_diff > eps:
# print('===========')
# print("Warning: Single and batched solutions might not match.")
# print(" + zhat_diff: {}".format(zhat_diff))
# print(" + lam_diff: {}".format(lam_diff))
# print(" + (nz, neq, nineq, nBatch) = ({}, {}, {}, {})".format(
# nz, neq, nineq, nBatch))
# print('===========')
return gurobi_time, single_time, batched_time
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
99c86317623eebc3408c6fbd9cbef298f9049dc0 | 7d1bd4868e4a9ef612003ba15e34bf247cf1a42c | /swp/manage.py | be1f3aa4f4e273d633e8263d8119311e124f02cd | []
| no_license | Student-Welfare-Portal/Web-App-Django | 45f7569ce1b5c67deb54231864a49017d2d86831 | f51b791aed2746fe525e4633c9538837a1a35585 | refs/heads/master | 2020-03-31T20:24:37.089227 | 2019-01-20T10:30:58 | 2019-01-20T10:30:58 | 152,539,070 | 0 | 6 | null | 2018-12-11T03:15:51 | 2018-10-11T06:04:02 | HTML | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "swp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"="
]
| = |
dc57e4b3c1c4954f61c5cb315bb48277c0c10ea5 | 42e85e88b8936942eb9e5ed068034c9579384586 | /pipeline_logic/omop/python/schemas.py | 7d87b889016dcae33fda3424b828096f1cafdd78 | []
| no_license | dr-you-group/Data-Ingestion-and-Harmonization | 55b634d8a7abe22cc7f06b3b0bce27467c6720ca | 145aec62daa5df450c94180d5252dd3bc23f0eae | refs/heads/master | 2023-08-25T15:25:59.934816 | 2021-10-07T15:27:07 | 2021-10-07T15:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,408 | py | from collections import OrderedDict
from pyspark.sql import types as T
from pyspark.sql.types import StructType, StructField
def schema_dict_to_struct(schema_dict, all_string_type):
field_list = []
for col_name, col_type in schema_dict.items():
if all_string_type:
field_list.append(StructField(col_name, T.StringType(), True))
else:
field_list.append(StructField(col_name, col_type, True))
struct_schema = StructType(field_list)
return struct_schema
def schema_dict_all_string_type(schema_dict, all_lowercase=False):
result = OrderedDict()
for col_name in schema_dict.keys():
if all_lowercase:
col_name = col_name.lower()
result[col_name] = T.StringType()
return result
complete_domain_schema_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
('CARE_SITE_NAME', T.StringType()),
('PLACE_OF_SERVICE_CONCEPT_ID', T.IntegerType()),
('LOCATION_ID', T.LongType()),
('CARE_SITE_SOURCE_VALUE', T.StringType()),
('PLACE_OF_SERVICE_SOURCE_VALUE', T.StringType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
('CONDITION_ERA_END_DATE', T.DateType()),
('CONDITION_OCCURRENCE_COUNT', T.IntegerType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
('CONDITION_START_DATETIME', T.TimestampType()),
('CONDITION_END_DATE', T.DateType()),
('CONDITION_END_DATETIME', T.TimestampType()),
('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
('STOP_REASON', T.StringType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('CONDITION_SOURCE_VALUE', T.StringType()),
('CONDITION_SOURCE_CONCEPT_ID', T.IntegerType()),
('CONDITION_STATUS_SOURCE_VALUE', T.StringType()),
('CONDITION_STATUS_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
('DEATH_DATETIME', T.TimestampType()),
('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
('CAUSE_CONCEPT_ID', T.IntegerType()),
('CAUSE_SOURCE_VALUE', T.StringType()),
('CAUSE_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
('DRUG_ERA_END_DATE', T.DateType()),
('DRUG_EXPOSURE_COUNT', T.IntegerType()),
('GAP_DAYS', T.IntegerType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
('DRUG_EXPOSURE_START_DATETIME', T.TimestampType()),
('DRUG_EXPOSURE_END_DATE', T.DateType()),
('DRUG_EXPOSURE_END_DATETIME', T.TimestampType()),
('VERBATIM_END_DATE', T.DateType()),
('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
('STOP_REASON', T.StringType()),
('REFILLS', T.IntegerType()),
('QUANTITY', T.FloatType()),
('DAYS_SUPPLY', T.IntegerType()),
('SIG', T.StringType()),
('ROUTE_CONCEPT_ID', T.IntegerType()),
('LOT_NUMBER', T.StringType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('DRUG_SOURCE_VALUE', T.StringType()),
('DRUG_SOURCE_CONCEPT_ID', T.IntegerType()),
('ROUTE_SOURCE_VALUE', T.StringType()),
('DOSE_UNIT_SOURCE_VALUE', T.StringType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
('ADDRESS_1', T.StringType()),
('ADDRESS_2', T.StringType()),
('CITY', T.StringType()),
('STATE', T.StringType()),
('ZIP', T.StringType()),
('COUNTY', T.StringType()),
('LOCATION_SOURCE_VALUE', T.StringType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
('MEASUREMENT_DATETIME', T.TimestampType()),
('MEASUREMENT_TIME', T.StringType()),
('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
('OPERATOR_CONCEPT_ID', T.IntegerType()),
('VALUE_AS_NUMBER', T.FloatType()),
('VALUE_AS_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('RANGE_LOW', T.FloatType()),
('RANGE_HIGH', T.FloatType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('MEASUREMENT_SOURCE_VALUE', T.StringType()),
('MEASUREMENT_SOURCE_CONCEPT_ID', T.IntegerType()),
('UNIT_SOURCE_VALUE', T.StringType()),
('VALUE_SOURCE_VALUE', T.StringType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
('OBSERVATION_DATETIME', T.TimestampType()),
('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
('VALUE_AS_NUMBER', T.FloatType()),
('VALUE_AS_STRING', T.StringType()),
('VALUE_AS_CONCEPT_ID', T.IntegerType()),
('QUALIFIER_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('OBSERVATION_SOURCE_VALUE', T.StringType()),
('OBSERVATION_SOURCE_CONCEPT_ID', T.IntegerType()),
('UNIT_SOURCE_VALUE', T.StringType()),
('QUALIFIER_SOURCE_VALUE', T.StringType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
('OBSERVATION_PERIOD_END_DATE', T.DateType()),
('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('MONTH_OF_BIRTH', T.IntegerType()),
('DAY_OF_BIRTH', T.IntegerType()),
('BIRTH_DATETIME', T.TimestampType()),
('RACE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_CONCEPT_ID', T.IntegerType()),
('LOCATION_ID', T.LongType()),
('PROVIDER_ID', T.LongType()),
('CARE_SITE_ID', T.LongType()),
('PERSON_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_CONCEPT_ID', T.IntegerType()),
('RACE_SOURCE_VALUE', T.StringType()),
('RACE_SOURCE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_SOURCE_VALUE', T.StringType()),
('ETHNICITY_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
('PROCEDURE_DATETIME', T.TimestampType()),
('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
('MODIFIER_CONCEPT_ID', T.IntegerType()),
('QUANTITY', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('VISIT_OCCURRENCE_ID', T.LongType()),
('VISIT_DETAIL_ID', T.IntegerType()),
('PROCEDURE_SOURCE_VALUE', T.StringType()),
('PROCEDURE_SOURCE_CONCEPT_ID', T.IntegerType()),
('MODIFIER_SOURCE_VALUE', T.StringType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
('PROVIDER_NAME', T.StringType()),
('NPI', T.StringType()),
('DEA', T.StringType()),
('SPECIALTY_CONCEPT_ID', T.IntegerType()),
('CARE_SITE_ID', T.LongType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('PROVIDER_SOURCE_VALUE', T.StringType()),
('SPECIALTY_SOURCE_VALUE', T.StringType()),
('SPECIALTY_SOURCE_CONCEPT_ID', T.IntegerType()),
('GENDER_SOURCE_VALUE', T.StringType()),
('GENDER_SOURCE_CONCEPT_ID', T.IntegerType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
('VISIT_START_DATETIME', T.TimestampType()),
('VISIT_END_DATE', T.DateType()),
('VISIT_END_DATETIME', T.TimestampType()),
('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
('PROVIDER_ID', T.LongType()),
('CARE_SITE_ID', T.LongType()),
('VISIT_SOURCE_VALUE', T.StringType()),
('VISIT_SOURCE_CONCEPT_ID', T.IntegerType()),
('ADMITTING_SOURCE_CONCEPT_ID', T.IntegerType()),
('ADMITTING_SOURCE_VALUE', T.StringType()),
('DISCHARGE_TO_CONCEPT_ID', T.IntegerType()),
('DISCHARGE_TO_SOURCE_VALUE', T.StringType()),
('PRECEDING_VISIT_OCCURRENCE_ID', T.IntegerType()),
]),
}
required_domain_schema_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
('CONDITION_ERA_END_DATE', T.DateType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('UNIT_CONCEPT_ID', T.IntegerType()),
('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
('DRUG_ERA_END_DATE', T.DateType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
('DRUG_EXPOSURE_END_DATE', T.DateType()),
('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
('OBSERVATION_PERIOD_END_DATE', T.DateType()),
('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
('RACE_CONCEPT_ID', T.IntegerType()),
('ETHNICITY_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
('VISIT_END_DATE', T.DateType()),
('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
])
}
# Required columns that are essential
# Records should be dropped if they contain null, not just warned
null_cols_to_drop_dict = {
'care_site': OrderedDict([
('CARE_SITE_ID', T.LongType()),
]),
'condition_era': OrderedDict([
('CONDITION_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_ERA_START_DATE', T.DateType()),
# ('CONDITION_ERA_END_DATE', T.DateType()),
]),
'condition_occurrence': OrderedDict([
('CONDITION_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('CONDITION_CONCEPT_ID', T.IntegerType()),
('CONDITION_START_DATE', T.DateType()),
# ('CONDITION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'death': OrderedDict([
('PERSON_ID', T.LongType()),
('DEATH_DATE', T.DateType()),
# ('DEATH_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'dose_era': OrderedDict([
('DOSE_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
# ('UNIT_CONCEPT_ID', T.IntegerType()),
# ('DOSE_VALUE', T.FloatType()),
('DOSE_ERA_START_DATE', T.DateType()),
# ('DOSE_ERA_END_DATE', T.DateType()),
]),
'drug_era': OrderedDict([
('DRUG_ERA_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_ERA_START_DATE', T.DateType()),
# ('DRUG_ERA_END_DATE', T.DateType()),
]),
'drug_exposure': OrderedDict([
('DRUG_EXPOSURE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('DRUG_CONCEPT_ID', T.IntegerType()),
('DRUG_EXPOSURE_START_DATE', T.DateType()),
# ('DRUG_EXPOSURE_END_DATE', T.DateType()),
# ('DRUG_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'location': OrderedDict([
('LOCATION_ID', T.LongType()),
]),
'measurement': OrderedDict([
('MEASUREMENT_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('MEASUREMENT_CONCEPT_ID', T.IntegerType()),
('MEASUREMENT_DATE', T.DateType()),
# ('MEASUREMENT_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation': OrderedDict([
('OBSERVATION_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_CONCEPT_ID', T.IntegerType()),
('OBSERVATION_DATE', T.DateType()),
# ('OBSERVATION_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'observation_period': OrderedDict([
('OBSERVATION_PERIOD_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('OBSERVATION_PERIOD_START_DATE', T.DateType()),
# ('OBSERVATION_PERIOD_END_DATE', T.DateType()),
# ('PERIOD_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'person': OrderedDict([
('PERSON_ID', T.LongType()),
# ('GENDER_CONCEPT_ID', T.IntegerType()),
('YEAR_OF_BIRTH', T.IntegerType()),
# ('RACE_CONCEPT_ID', T.IntegerType()),
# ('ETHNICITY_CONCEPT_ID', T.IntegerType()),
]),
'procedure_occurrence': OrderedDict([
('PROCEDURE_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('PROCEDURE_CONCEPT_ID', T.IntegerType()),
('PROCEDURE_DATE', T.DateType()),
# ('PROCEDURE_TYPE_CONCEPT_ID', T.IntegerType()),
]),
'provider': OrderedDict([
('PROVIDER_ID', T.LongType()),
]),
'visit_occurrence': OrderedDict([
('VISIT_OCCURRENCE_ID', T.LongType()),
('PERSON_ID', T.LongType()),
('VISIT_CONCEPT_ID', T.IntegerType()),
('VISIT_START_DATE', T.DateType()),
# ('VISIT_END_DATE', T.DateType()),
# ('VISIT_TYPE_CONCEPT_ID', T.IntegerType()),
])
}
manifest_schema = T.StructType([
T.StructField("SITE_ABBREV", T.StringType(), True),
T.StructField("SITE_NAME", T.StringType(), True),
T.StructField("CONTACT_NAME", T.StringType(), True),
T.StructField("CONTACT_EMAIL", T.StringType(), True),
T.StructField("CDM_NAME", T.StringType(), True),
T.StructField("CDM_VERSION", T.StringType(), True),
T.StructField("VOCABULARY_VERSION", T.StringType(), True),
T.StructField("N3C_PHENOTYPE_YN", T.StringType(), True),
T.StructField("N3C_PHENOTYPE_VERSION", T.StringType(), True),
T.StructField("SHIFT_DATE_YN", T.StringType(), True),
T.StructField("MAX_NUM_SHIFT_DAYS", T.StringType(), True),
T.StructField("RUN_DATE", T.StringType(), True),
T.StructField("UPDATE_DATE", T.StringType(), True),
T.StructField("NEXT_SUBMISSION_DATE", T.StringType(), True),
T.StructField("CONTRIBUTION_DATE", T.StringType(), True),
])
| [
"[email protected]"
]
| |
3f6e2abacfeac461a57ba7a45a1cf5a7fed12415 | a275c7e4161c89ed3ee6289b75ad1d017634baab | /kontrollbank/pipelines.py | fb4ba7933de52e17d5cffd84c31fac2ff44fb0a5 | []
| no_license | SimeonYS/Oesterreichische-Kontrollbank-AG | c277d179aa41990458fbed76143fb48c0d8346d2 | f2aa83979c1faa52fdc18fb2802222af0de2d0e3 | refs/heads/main | 2023-04-18T01:17:55.803542 | 2021-04-29T06:34:11 | 2021-04-29T06:34:11 | 339,081,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import sqlite3
class KontrollbankPipeline:
# Database setup
conn = sqlite3.connect('KontrollBank.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute("""CREATE TABLE IF NOT EXISTS articles
(date text, title text, link text, content text)""")
def process_item(self, item, spider):
self.c.execute("""SELECT * FROM articles WHERE title = ? AND date = ?""",
(item.get('title'), item.get('date')))
duplicate = self.c.fetchall()
if len(duplicate):
return item
print(f"New entry added at {item['link']}")
# Insert values
self.c.execute("INSERT INTO articles (date, title, link, content)"
"VALUES (?,?,?,?)", (item.get('date'), item.get('title'), item.get('link'), item.get('content')))
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close()
| [
"[email protected]"
]
| |
5a6eb0cb2eb972dee48c7e91616bf75ba288e65f | 101d866f8e2f84dc8f76181341180c13b38e0ecf | /utils/tes.py | 1937dc4f93ef482fe7fa346571d89d6792137995 | []
| no_license | cming091/autotest | 1d9a6f5f750c04b043a6bc45efa423f2e730b3aa | 0f6fe31a27de9bcf0697c28574b97555fe36d1e1 | refs/heads/master | 2023-06-02T18:22:24.971786 | 2021-06-21T08:52:47 | 2021-06-21T08:52:47 | 378,858,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,257 | py | import json
import requests
import logging
from utils import cfg
def register(warehouse_name):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/register'
result = 'fail'
warehouseID = ''
try:
data = {
'userID': "111",
'warehouseName': warehouse_name,
'length': 1000,
'width': 1000
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'register success, {res_data}')
warehouseID = res_data['data']['warehouseID']
else:
logging.error(f'register error, {res_data}')
else:
logging.error(f'register error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'register error, {e}')
return result, warehouseID
def register_warebasic(warehouse_name,warehouseID,warehouseCode):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':8000/wes/warebasic/warehouse/registerWarehouse'
result = 'fail'
try:
data = {
'warehouseID': warehouseID,
'warehouseName': warehouse_name,
'warehouseCode': warehouseCode,
}
headers = {'Content-Type': 'application/json'}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'register success, {res_data}')
else:
logging.error(f'register_warebasic error, {res_data}')
else:
logging.error(f'register_warebasic error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'register_warebasic error, {e}')
return result
def upload(file_path):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':81/upload'
result = 'fail'
file_url = ''
md5 = ''
try:
data = {'file': open(file_path, 'rb')}
r = requests.post(url=url, files=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'upload success, {res_data}')
file_url = res_data['data']['url']
md5 = res_data['data']['md5']
else:
logging.error(f'upload error,data: {data} res: {res_data}')
else:
logging.error(f'upload error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'upload error, {e}')
return result, file_url, md5
def import_wareservice(md5,fileName,fileURL,warehouseID):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/importByURL'
result = 'fail'
try:
data = {
'clearNodeTypeIndex': 1,
'clearAllFrame': 1,
'clearNodeTypeInsulate': 1,
'md5': md5,
'fileName': fileName,
'fileURL': fileURL,
'importType': 'COVER',
'userName': 'admin',
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import wareservice success, {res_data}')
else:
logging.error(f'import wareservice error, {res_data}')
else:
logging.error(f'import wareservice error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import wareservice error, {e}')
return result
def import_wareservice_915(md5,fileName,fileURL,warehouseID,regionType,regionName):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/importByURL'
result = 'fail'
try:
data = {
'regionType':regionType,
'regionName':regionName,
'clearNodeTypeIndex': 0,
'clearAllFrame': 0,
'clearNodeTypeInsulate': 0,
'md5': md5,
'fileName': fileName,
'fileURL': fileURL,
'importType': 'COVER',
'userName': 'admin',
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import wareservice success, {res_data}')
else:
logging.error(f'import wareservice error, {res_data}')
else:
logging.error(f'import wareservice error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import wareservice error, {e}')
return result
def import_warebase(fileName,fileURL,warehouseID):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/warebase/api/warehouse/initWarehouseByUrl'
result = 'fail'
try:
data = {
'warehouseName': fileName,
'fileURL': fileURL,
'warehouseID': warehouseID
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import warebase success, {res_data}')
else:
logging.error(f'import warebase error, {res_data}')
else:
logging.error(f'import warebase error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import warebase error, {e}')
return result
def import_warebasic(warehouseCode,regionCode,regionName,regionType,fileURL):
url = cfg.G_CONFIG_DICT['base.url_base'] + ':8000/wes/warebasic/warehouse/importMapByFileUrl'
result = 'fail'
try:
data = {
'warehouseCode': warehouseCode,
'regionCode': regionCode,
'regionName': regionName,
'regionType': regionType,
'fileUrl': fileURL
}
headers = {'Content-Type': 'application/json'}
r = requests.post(url=url, headers = headers, data=json.dumps(data))
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'import warebasic success, {res_data}')
else:
logging.error(f'import warebasic error, {res_data}')
else:
logging.error(f'import warebasic error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'import warebasic error, {e}')
return result
def set_warehouse_sn(warehouse_id, sn_type, robot_id, sn):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/api/warehouse/setWarehouseSNInfo'
print(f"---------------------------------{url}---------------------------------")
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={str(warehouse_id)}&snType={str(sn_type)}&robotID={str(robot_id)}&sn={str(sn)}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'set warehouse sn success, {res_data}')
else:
logging.error(f'set warehouse sn error, {res_data}')
else:
logging.error(f'set warehouse sn error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'set warehouse sn error, {e}')
return result
def multi_add_pod(warehouse_id, pod_info):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/multiAddPod'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={warehouse_id}&podInfo={pod_info}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
else:
logging.error(f'multi add pod error, {res_data}')
else:
logging.error(f'multi add pod error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'multi add pod error, {e}')
return result
def multi_add_pod_815(warehouse_id, pod_info, request_id, client_code):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/multiAddPod'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = f'warehouseID={warehouse_id}&podInfo={pod_info}&requestID={request_id}&clientCode={client_code}'
result = 'fail'
try:
r = requests.post(url=url, data=data, headers=headers)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
else:
logging.error(f'multi add pod error, {res_data}')
else:
logging.error(f'multi add pod error, http response code is {r.status_code}')
except Exception as e:
logging.error(f'multi add pod error, {e}')
return result
def all_resume_robots(warehouse_id):
url = cfg.G_CONFIG_DICT['base.url_base'] + '/tes/apiv2/resumeRobots'
result = 'fail'
try:
data = {
'warehouseID': warehouse_id,
'all': 1
}
r = requests.post(url=url, data=data)
if r.status_code == 200:
res_data = r.json()
if res_data['returnCode'] == 0:
result = 'succ'
logging.info(f'all_resume_robots success, {res_data}')
else:
logging.error(f'all_resume_robots error, {res_data}')
else:
logging.error(f'all_resume_robots, http response code is {r.status_code}')
except Exception as e:
logging.error(f'all_resume_robots, {e}')
return result
# if __name__ == "__main__":
# import os
# root_path = os.path.dirname(os.path.dirname(__file__))
# cfg_path = os.path.join(root_path, './conf/config.ini')
# cfg.load_cfg(cfg_path)
#
# file_path = '/Users/zhangjinqiang/Downloads/V1.4_big-118-hetu1.4.hetu'
# res = import_map(file_path)
# print('import map res = ', res)
#
# warehouse_id = '268370858668458740'
# sn_type = '0'
# robot_id = '37463339938'
# sn = '850809707888977'
# res = set_warehouse_sn(warehouse_id, sn_type, robot_id, sn)
# print('set_warehouse_sn, res =', res)
#
# pod_info = [
# {"podID": "201", "posID": "1568272772503", "posType": 2, "podFace": 3.14, "podType": 2},
# {"podID": "202", "posID": "1568272772518", "posType": 2, "podFace": 3.14, "podType": 2}
# ]
# res = multi_add_pod(warehouse_id, json.dumps(pod_info))
# print('multi_add_pod, res = ', res)
| [
"[email protected]"
]
| |
50b9260ebbf8a1f583eaf4f101ca5bb2e43e63f0 | 99f9ecdb35c9927698f3a3e8b5864dd7f5b8aef7 | /thingsboard_gateway/connectors/request/request_uplink_converter.py | fd6b5b3a26888c39d5c2e9274c43f6f01eef19bd | [
"Apache-2.0"
]
| permissive | luxiaosu/thingsboard-gateway | 43bd4af5f7944c68a403c8bdb125e7536e202c2b | 646bc6bb64a05aac8710c9a3e736db6ec8d5864b | refs/heads/master | 2023-07-30T13:23:18.783011 | 2021-10-07T15:41:20 | 2021-10-07T15:41:20 | 408,509,478 | 0 | 0 | Apache-2.0 | 2021-09-20T16:01:15 | 2021-09-20T16:01:14 | null | UTF-8 | Python | false | false | 810 | py | # Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from thingsboard_gateway.connectors.converter import Converter, abstractmethod
class RequestUplinkConverter(Converter):
@abstractmethod
def convert(self, config, data):
pass
| [
"[email protected]"
]
| |
19f06cd1078d337384ddc3da7c6e980f4f9cebf3 | 2328a25664cd427f2043164ad815698bbb021c34 | /ProfilerApp/ProfilerApp/__init__.py | 304131b26aa01fa05bbc7b96a95f61758190e504 | []
| no_license | Dishan765/Automated-Cybercrime-Profiling | 7f7f017c8d4614ddffd5f662dc7e279a8d40608e | 31a7f89be7a2ed06444bda7cb0ece52854d4e7e7 | refs/heads/master | 2023-07-04T19:35:07.333739 | 2021-08-21T19:44:41 | 2021-08-21T19:44:41 | 347,069,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from ProfilerApp.config import Config
from flask_mail import Mail
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from ProfilerApp.users.routes import users
from ProfilerApp.posts.routes import posts
from ProfilerApp.profiles.routes import profile
from ProfilerApp.admin.routes import admin
#from ProfilerApp.main.routes import main
from ProfilerApp.api.routes import api
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(profile)
app.register_blueprint(admin)
#app.register_blueprint(main)
app.register_blueprint(api)
return app | [
"[email protected]"
]
| |
65cdabf8faee54817569aebc2ce8097e24679139 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s621337044.py | f7964c4a3f01cff6041508b36017d68bb3b4e4ed | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | N=int(input())
*A,=map(int,input().split())
ans=[]
for a in A:
if len(ans)<a-1:
ans=[-1]
break
else:
ans.insert(a-1,a)
for x in ans:
print(x) | [
"[email protected]"
]
| |
40caab98def245cb3c4d05ebd2fc31b31a1ee555 | 8ca52d458dda5b1a557828003240942ed02e19d9 | /4_6_4.py | e5089bcc2e205cbdc7aabdf73f0bfe4462b4cd77 | [
"MIT"
]
| permissive | rursvd/pynumerical2 | 48c8a7707c4327bfb88d0b747344cc1d71b80b69 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | refs/heads/master | 2020-04-19T04:15:34.457065 | 2019-12-06T04:12:16 | 2019-12-06T04:12:16 | 167,957,944 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | n = 2
m = 3
for i in range(n):
for j in range(m):
print(i,j)
| [
"[email protected]"
]
| |
d6ee7fda37973ff33a434afd1575004b50819c0a | 751d837b8a4445877bb2f0d1e97ce41cd39ce1bd | /codegolf/hello-world-rainbow.py | 0e86441c738f717c2150798dc6f368cbd9961c53 | [
"MIT"
]
| permissive | qeedquan/challenges | d55146f784a3619caa4541ac6f2b670b0a3dd8ba | 56823e77cf502bdea68cce0e1221f5add3d64d6a | refs/heads/master | 2023-08-11T20:35:09.726571 | 2023-08-11T13:02:43 | 2023-08-11T13:02:43 | 115,886,967 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #!/usr/bin/env python
"""
Dealing with colors in non-markup languages often complicates things. I would like to see some variations of how color is used in different languages.
The object of this competition is to output 'Hello World' in the seven colors of the rainbow.
According to Wikipedia, these are the 7 colors.
Red #FF0000 (RGB: 255, 0, 0)
Orange #FF7F00 (RGB: 255, 127, 0)
Yellow #FFFF00 (RGB: 255, 255, 0)
Green #00FF00 (RGB: 0, 255, 0)
Blue #0000FF (RGB: 0, 0, 255)
Indigo #6600FF (RGB: 111, 0, 255)
Violet #8B00FF (RGB: 143, 0, 255)
The rules
The program must output 'Hello World'. (Doesn't necessarily need to be text, but it must be distiguishable as 'Hello World')
Each letter must be a different color.
The colors can be in any order.
You must use each of the seven colors at least once. (You may use more than the given colors)
No use of markup languages in any case.
The winner is whoever has the lowest amount of characters AND follows the rules
Bonus -1 character if it is written in DART
I will pick the winner on Jan 11 (if I remember ;D).
Good luck
"""
def rainbow(s):
p = 31
for c in s:
print("\033[%d;1m%c" % (p, c), end='')
p += 1
if p >= 37:
p = 31
print("\033[0m")
def main():
rainbow("Hello World!")
main()
| [
"[email protected]"
]
| |
a678ce0647f4fcc50b8dfa7d82c5c516efdabcc1 | 53262ee5b8437d208a80de997a8de5074a92426a | /root_numpy/tmva/__init__.py | 8286f5266882d4967b02669008fcb582b4da83cb | [
"BSD-3-Clause"
]
| permissive | scikit-hep/root_numpy | bb2c7280a5e9e15df91c86ff3c6d9bfe3464c754 | 049e487879d70dd93c97e323ba6b71c56d4759e8 | refs/heads/master | 2023-04-07T11:25:50.080999 | 2023-01-06T17:57:30 | 2023-01-06T17:57:30 | 3,823,872 | 87 | 25 | BSD-3-Clause | 2021-02-27T10:02:21 | 2012-03-25T11:40:22 | Python | UTF-8 | Python | false | false | 544 | py | try:
from . import _libtmvanumpy
except ImportError: # pragma: no cover
import warnings
warnings.warn(
"root_numpy.tmva requires that you install root_numpy with "
"the tmva interface enabled", ImportWarning)
__all__ = []
else:
from ._data import add_classification_events, add_regression_events
from ._evaluate import evaluate_reader, evaluate_method
__all__ = [
'add_classification_events',
'add_regression_events',
'evaluate_reader',
'evaluate_method',
]
| [
"[email protected]"
]
| |
458dc8884ad6649d49359f7b856a3c5baf07039e | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /wicd/rev519-537/right-branch-537/wicd/wicd-client.py | 96cef4b2cc9a14ce6f3fefe19abd026d2c623630 | []
| no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,485 | py | """ wicd - wireless connection daemon frontend implementation
This module implements a usermode frontend for wicd. It updates connection
information, provides an (optional) tray icon, and allows for launching of
the wicd GUI and Wired Profile Chooser.
class TrayIcon() -- Parent class of TrayIconGUI and IconConnectionInfo.
class TrayConnectionInfo() -- Child class of TrayIcon which provides
and updates connection status.
class TrayIconGUI() -- Child class of TrayIcon which implements the tray.
icon itself. Parent class of StatusTrayIconGUI and EggTrayIconGUI.
class StatusTrayIconGUI() -- Implements the tray icon using a
gtk.StatusIcon.
class EggTrayIconGUI() -- Implements the tray icon using egg.trayicon.
def usage() -- Prints usage information.
def main() -- Runs the wicd frontend main loop.
"""
import sys
import gtk
import gobject
import getopt
import os
import pango
import time
from dbus import DBusException
from dbus import version as dbus_version
from wicd import wpath
from wicd import misc
from wicd import gui
from wicd.dbusmanager import DBusManager
if not (gtk.gtk_version[0] >= 2 and gtk.gtk_version[1] >= 10):
try:
import egg.trayicon
USE_EGG = True
except ImportError:
print 'Unable to load wicd.py: Missing egg.trayicon module.'
sys.exit(1)
else:
USE_EGG = False
if not dbus_version or (dbus_version < (0, 80, 0)):
import dbus.glib
else:
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
misc.RenameProcess("wicd-client")
if __name__ == '__main__':
wpath.chdir(__file__)
dbus_manager = None
daemon = None
wireless = None
wired = None
wired = None
language = misc.get_language_list_tray()
class NetworkMenuItem(gtk.ImageMenuItem):
def __init__(self, lbl, is_active=False):
gtk.ImageMenuItem.__init__(self)
self.label = gtk.Label(lbl)
if is_active:
atrlist = pango.AttrList()
atrlist.insert(pango.AttrWeight(pango.WEIGHT_BOLD, 0, 50))
self.label.set_attributes(atrlist)
self.label.set_justify(gtk.JUSTIFY_LEFT)
self.label.set_alignment(0, 0)
self.add(self.label)
self.label.show()
class TrayIcon:
""" Base Tray Icon class.
Base Class for implementing a tray icon to display network status.
"""
def __init__(self, use_tray, animate):
if USE_EGG:
self.tr = self.EggTrayIconGUI(use_tray)
else:
self.tr = self.StatusTrayIconGUI(use_tray)
self.icon_info = self.TrayConnectionInfo(self.tr, use_tray, animate)
class TrayConnectionInfo:
""" Class for updating the tray icon status. """
def __init__(self, tr, use_tray=True, animate=True):
""" Initialize variables needed for the icon status methods. """
self.last_strength = -2
self.still_wired = False
self.network = ''
self.tried_reconnect = False
self.connection_lost_counter = 0
self.tr = tr
self.use_tray = use_tray
self.last_sndbytes = -1
self.last_rcvbytes = -1
self.max_snd_gain = 10000
self.max_rcv_gain = 10000
self.animate = animate
self.update_tray_icon()
def wired_profile_chooser(self):
""" Launch the wired profile chooser. """
gui.WiredProfileChooser()
daemon.SetNeedWiredProfileChooser(False)
def set_wired_state(self, info):
""" Sets the icon info for a wired state. """
wired_ip = info[0]
self.tr.set_from_file(wpath.images + "wired.png")
self.tr.set_tooltip(language['connected_to_wired'].replace('$A',
wired_ip))
def set_wireless_state(self, info):
""" Sets the icon info for a wireless state. """
lock = ''
wireless_ip = info[0]
self.network = info[1]
strength = info[2]
cur_net_id = int(info[3])
sig_string = daemon.FormatSignalForPrinting(str(strength))
if wireless.GetWirelessProperty(cur_net_id, "encryption"):
lock = "-lock"
self.tr.set_tooltip(language['connected_to_wireless']
.replace('$A', self.network)
.replace('$B', sig_string)
.replace('$C', str(wireless_ip)))
self.set_signal_image(int(strength), lock)
def set_connecting_state(self, info):
""" Sets the icon info for a connecting state. """
if info[0] == 'wired' and len(info) == 1:
cur_network = language['wired']
else:
cur_network = info[1]
self.tr.set_tooltip(language['connecting'] + " to " +
cur_network + "...")
self.tr.set_from_file(wpath.images + "no-signal.png")
def set_not_connected_state(self, info):
""" Set the icon info for the not connected state. """
self.tr.set_from_file(wpath.images + "no-signal.png")
if wireless.GetKillSwitchEnabled():
status = (language['not_connected'] + " (" +
language['killswitch_enabled'] + ")")
else:
status = language['not_connected']
self.tr.set_tooltip(status)
def update_tray_icon(self, state=None, info=None):
""" Updates the tray icon and current connection status. """
if not self.use_tray: return False
if not state or not info:
[state, info] = daemon.GetConnectionStatus()
if state == misc.WIRED:
self.set_wired_state(info)
elif state == misc.WIRELESS:
self.set_wireless_state(info)
elif state == misc.CONNECTING:
self.set_connecting_state(info)
elif state in (misc.SUSPENDED, misc.NOT_CONNECTED):
self.set_not_connected_state(info)
else:
print 'Invalid state returned!!!'
return False
return True
def set_signal_image(self, wireless_signal, lock):
""" Sets the tray icon image for an active wireless connection. """
if self.animate:
prefix = self.get_bandwidth_state()
else:
prefix = 'idle-'
if daemon.GetSignalDisplayType() == 0:
if wireless_signal > 75:
signal_img = "high-signal"
elif wireless_signal > 50:
signal_img = "good-signal"
elif wireless_signal > 25:
signal_img = "low-signal"
else:
signal_img = "bad-signal"
else:
if wireless_signal >= -60:
signal_img = "high-signal"
elif wireless_signal >= -70:
signal_img = "good-signal"
elif wireless_signal >= -80:
signal_img = "low-signal"
else:
signal_img = "bad-signal"
img_file = ''.join([wpath.images, prefix, signal_img, lock, ".png"])
self.tr.set_from_file(img_file)
def get_bandwidth_state(self):
""" Determines what network activity state we are in. """
transmitting = False
receiving = False
dev_dir = '/sys/class/net/'
wiface = daemon.GetWirelessInterface()
for fldr in os.listdir(dev_dir):
if fldr == wiface:
dev_dir = dev_dir + fldr + "/statistics/"
break
try:
rcvbytes = int(open(dev_dir + "rx_bytes", "r").read().strip())
sndbytes = int(open(dev_dir + "tx_bytes", "r").read().strip())
except IOError:
sndbytes = None
rcvbytes = None
if not rcvbytes or not sndbytes:
return 'idle-'
activity = self.is_network_active(rcvbytes, self.max_rcv_gain,
self.last_rcvbytes)
receiving = activity[0]
self.max_rcv_gain = activity[1]
self.last_rcvbytes = activity[2]
activity = self.is_network_active(sndbytes, self.max_snd_gain,
self.last_sndbytes)
transmitting = activity[0]
self.max_snd_gain = activity[1]
self.last_sndbytes = activity[2]
if transmitting and receiving:
return 'both-'
elif transmitting:
return 'transmitting-'
elif receiving:
return 'receiving-'
else:
return 'idle-'
def is_network_active(self, bytes, max_gain, last_bytes):
""" Determines if a network is active.
Determines if a network is active by looking at the
number of bytes sent since the previous check. This method
is generic, and can be used to determine activity in both
the sending and receiving directions.
Returns:
A tuple containing three elements:
1) a boolean specifying if the network is active.
2) an int specifying the maximum gain the network has had.
3) an int specifying the last recorded number of bytes sent.
"""
active = False
if last_bytes == -1:
last_bytes = bytes
elif bytes > (last_bytes + float(max_gain / 20.0)):
last_bytes = bytes
active = True
gain = bytes - last_bytes
if gain > max_gain:
max_gain = gain
return (active, max_gain, last_bytes)
class TrayIconGUI(object):
""" Base Tray Icon UI class.
Implements methods and variables used by both egg/StatusIcon
tray icons.
"""
def __init__(self, use_tray):
menu = """
<ui>
<menubar name="Menubar">
<menu action="Menu">
<menu action="Connect">
</menu>
<separator/>
<menuitem action="About"/>
<menuitem action="Quit"/>
</menu>
</menubar>
</ui>
"""
actions = [
('Menu', None, 'Menu'),
('Connect', gtk.STOCK_CONNECT, "Connect"),
('About', gtk.STOCK_ABOUT, '_About...', None,
'About wicd-tray-icon', self.on_about),
('Quit',gtk.STOCK_QUIT,'_Quit',None,'Quit wicd-tray-icon',
self.on_quit),
]
actg = gtk.ActionGroup('Actions')
actg.add_actions(actions)
self.manager = gtk.UIManager()
self.manager.insert_action_group(actg, 0)
self.manager.add_ui_from_string(menu)
self.menu = (self.manager.get_widget('/Menubar/Menu/About').
props.parent)
self.gui_win = None
self.current_icon_path = None
self.use_tray = use_tray
self._is_scanning = False
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
net_menuitem.connect("activate", self.on_net_menu_activate)
def tray_scan_started(self):
""" Callback for when a wireless scan is started. """
self._is_scanning = True
self.init_network_menu()
def tray_scan_ended(self):
""" Callback for when a wireless scan finishes. """
self._is_scanning = False
self.populate_network_menu()
def on_activate(self, data=None):
""" Opens the wicd GUI. """
self.toggle_wicd_gui()
def on_quit(self, widget=None):
""" Closes the tray icon. """
sys.exit(0)
def on_about(self, data=None):
""" Opens the About Dialog. """
dialog = gtk.AboutDialog()
dialog.set_name('Wicd Tray Icon')
dialog.set_version('2.0')
dialog.set_comments('An icon that shows your network connectivity')
dialog.set_website('http://wicd.net')
dialog.run()
dialog.destroy()
def _add_item_to_menu(self, net_menu, lbl, type_, n_id, is_connecting,
is_active):
""" Add an item to the network list submenu. """
def network_selected(widget, net_type, net_id):
""" Callback method for a menu item selection. """
if net_type == "__wired__":
wired.ConnectWired()
else:
wireless.ConnectWireless(net_id)
item = NetworkMenuItem(lbl, is_active)
image = gtk.Image()
if type_ == "__wired__":
image.set_from_icon_name("network-wired", 2)
else:
pb = gtk.gdk.pixbuf_new_from_file_at_size(self._get_img(n_id),
20, 20)
image.set_from_pixbuf(pb)
del pb
item.set_image(image)
del image
item.connect("activate", network_selected, type_, n_id)
net_menu.append(item)
item.show()
if is_connecting:
item.set_sensitive(False)
del item
def _get_img(self, net_id):
""" Determines which image to use for the wireless entries. """
def fix_strength(val, default):
""" Assigns given strength to a default value if needed. """
return val is not None and int(val) or default
def get_prop(prop):
return wireless.GetWirelessProperty(net_id, prop)
strength = fix_strength(get_prop("quality"), -1)
dbm_strength = fix_strength(get_prop('strength'), -100)
if daemon.GetWPADriver() == 'ralink legacy' or \
daemon.GetSignalDisplayType() == 1:
if dbm_strength >= -60:
signal_img = 'signal-100.png'
elif dbm_strength >= -70:
signal_img = 'signal-75.png'
elif dbm_strength >= -80:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
else:
if strength > 75:
signal_img = 'signal-100.png'
elif strength > 50:
signal_img = 'signal-75.png'
elif strength > 25:
signal_img = 'signal-50.png'
else:
signal_img = 'signal-25.png'
return wpath.images + signal_img
def on_net_menu_activate(self, item):
""" Trigger a background scan to populate the network menu.
Called when the network submenu is moused over. We
sleep briefly, clear pending gtk events, and if
we're still being moused over we trigger a scan.
This is to prevent scans when the user is just
mousing past the menu to select another menu item.
"""
def dummy(x=None): pass
if self._is_scanning:
return True
self.init_network_menu()
time.sleep(.4)
while gtk.events_pending():
gtk.main_iteration()
if item.state != gtk.STATE_PRELIGHT:
return True
wireless.Scan(reply_handler=dummy, error_handler=dummy)
def populate_network_menu(self, data=None):
""" Populates the network list submenu. """
def get_prop(net_id, prop):
return wireless.GetWirelessProperty(net_id, prop)
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
submenu = net_menuitem.get_submenu()
self._clear_menu(submenu)
is_connecting = daemon.CheckIfConnecting()
num_networks = wireless.GetNumberOfNetworks()
[status, info] = daemon.GetConnectionStatus()
if daemon.GetAlwaysShowWiredInterface() or \
wired.CheckPluggedIn():
if status == misc.WIRED:
is_active = True
else:
is_active = False
self._add_item_to_menu(submenu, "Wired Network", "__wired__", 0,
is_connecting, is_active)
sep = gtk.SeparatorMenuItem()
submenu.append(sep)
sep.show()
if num_networks > 0:
for x in range(0, num_networks):
essid = get_prop(x, "essid")
if status == misc.WIRELESS and info[1] == essid:
is_active = True
else:
is_active = False
self._add_item_to_menu(submenu, essid, "wifi", x,
is_connecting, is_active)
else:
no_nets_item = gtk.MenuItem(language['no_wireless_networks_found'])
no_nets_item.set_sensitive(False)
no_nets_item.show()
submenu.append(no_nets_item)
net_menuitem.show()
def init_network_menu(self):
""" Set the right-click menu for to the scanning state. """
net_menuitem = self.manager.get_widget("/Menubar/Menu/Connect/")
submenu = net_menuitem.get_submenu()
self._clear_menu(submenu)
loading_item = gtk.MenuItem(language['scanning'] + "...")
loading_item.set_sensitive(False)
loading_item.show()
submenu.append(loading_item)
net_menuitem.show()
def _clear_menu(self, menu):
""" Clear the right-click menu. """
for item in menu.get_children():
menu.remove(item)
item.destroy()
def toggle_wicd_gui(self):
""" Toggles the wicd GUI. """
if not self.gui_win:
self.gui_win = gui.appGui(dbus_manager)
bus = dbus_manager.get_bus()
bus.add_signal_receiver(self.gui_win.dbus_scan_finished,
'SendEndScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(self.gui_win.dbus_scan_started,
'SendStartScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(self.gui_win.update_connect_buttons,
'StatusChanged', 'org.wicd.daemon')
elif not self.gui_win.is_visible:
self.gui_win.show_win()
else:
self.gui_win.exit()
return True
class EggTrayIconGUI(TrayIconGUI):
""" Tray Icon for gtk < 2.10.
Uses the deprecated egg.trayicon module to implement the tray icon.
Since it relies on a deprecated module, this class is only used
for machines running versions of GTK < 2.10.
"""
def __init__(self, use_tray=True):
"""Initializes the tray icon"""
TrayIcon.TrayIconGUI.__init__(self, use_tray)
self.use_tray = use_tray
if not use_tray:
self.toggle_wicd_gui()
return
self.tooltip = gtk.Tooltips()
self.eb = gtk.EventBox()
self.tray = egg.trayicon.TrayIcon("WicdTrayIcon")
self.pic = gtk.Image()
self.tooltip.set_tip(self.eb, "Initializing wicd...")
self.pic.set_from_file("images/no-signal.png")
self.eb.connect('button_press_event', self.tray_clicked)
self.eb.add(self.pic)
self.tray.add(self.eb)
self.tray.show_all()
def tray_clicked(self, widget, event):
""" Handles tray mouse click events. """
if event.button == 1:
self.toggle_wicd_gui()
elif event.button == 3:
self.init_network_menu()
self.menu.popup(None, None, None, event.button, event.time)
def set_from_file(self, val=None):
""" Calls set_from_file on the gtk.Image for the tray icon. """
if not self.use_tray: return
self.pic.set_from_file(val)
def set_tooltip(self, val):
""" Set the tooltip for this tray icon.
Sets the tooltip for the gtk.ToolTips associated with this
tray icon.
"""
if not self.use_tray: return
self.tooltip.set_tip(self.eb, val)
class StatusTrayIconGUI(gtk.StatusIcon, TrayIconGUI):
""" Class for creating the wicd tray icon on gtk > 2.10.
Uses gtk.StatusIcon to implement a tray icon.
"""
def __init__(self, use_tray=True):
TrayIcon.TrayIconGUI.__init__(self, use_tray)
self.use_tray = use_tray
if not use_tray:
self.toggle_wicd_gui()
return
gtk.StatusIcon.__init__(self)
self.current_icon_path = ''
self.set_visible(True)
self.connect('activate', self.on_activate)
self.connect('popup-menu', self.on_popup_menu)
self.set_from_file(wpath.images + "no-signal.png")
self.set_tooltip("Initializing wicd...")
def on_popup_menu(self, status, button, timestamp):
""" Opens the right click menu for the tray icon. """
self.init_network_menu()
self.menu.popup(None, None, None, button, timestamp)
def set_from_file(self, path = None):
""" Sets a new tray icon picture. """
if not self.use_tray: return
if path != self.current_icon_path:
self.current_icon_path = path
gtk.StatusIcon.set_from_file(self, path)
def usage():
""" Print usage information. """
print """
wicd 1.50
wireless (and wired) connection daemon front-end.
Arguments:
\t-n\t--no-tray\tRun wicd without the tray icon.
\t-h\t--help\t\tPrint this help information.
\t-a\t--no-animate\tRun the tray without network traffic tray animations.
"""
def setup_dbus():
global bus, daemon, wireless, wired, dbus_manager
dbus_manager = DBusManager()
try:
dbus_manager.connect_to_dbus()
except DBusException:
print "Can't connect to the daemon, trying to start it automatically..."
misc.PromptToStartDaemon()
try:
dbus_manager.connect_to_dbus()
except DBusException:
gui.error(None, "Could not connect to wicd's D-Bus interface. " +
"Make sure the daemon is started.")
sys.exit(1)
dbus_ifaces = dbus_manager.get_dbus_ifaces()
daemon = dbus_ifaces['daemon']
wireless = dbus_ifaces['wireless']
wired = dbus_ifaces['wired']
return True
def main(argv):
""" The main frontend program.
Keyword arguments:
argv -- The arguments passed to the script.
"""
use_tray = True
animate = True
try:
opts, args = getopt.getopt(sys.argv[1:], 'nha', ['help', 'no-tray',
'no-animate'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, a in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-n', '--no-tray'):
use_tray = False
elif opt in ('-a', '--no-animate'):
animate = False
else:
usage()
sys.exit(2)
print 'Loading...'
setup_dbus()
if not use_tray:
the_gui = gui.appGui()
the_gui.standalone = True
mainloop = gobject.MainLoop()
mainloop.run()
sys.exit(0)
tray_icon = TrayIcon(use_tray, animate)
if daemon.GetNeedWiredProfileChooser():
daemon.SetNeedWiredProfileChooser(False)
tray_icon.icon_info.wired_profile_chooser()
bus = dbus_manager.get_bus()
bus.add_signal_receiver(tray_icon.icon_info.wired_profile_chooser,
'LaunchChooser', 'org.wicd.daemon')
bus.add_signal_receiver(tray_icon.icon_info.update_tray_icon,
'StatusChanged', 'org.wicd.daemon')
bus.add_signal_receiver(tray_icon.tr.tray_scan_ended, 'SendEndScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(tray_icon.tr.tray_scan_started,
'SendStartScanSignal', 'org.wicd.daemon.wireless')
print 'Done.'
mainloop = gobject.MainLoop()
mainloop.run()
if __name__ == '__main__':
main(sys.argv)
| [
"[email protected]"
]
| |
b34289eaf185e4d32c68ce971ed745443c0712dd | 9c6837404b15c71ef13b0615701dbde49806ffa3 | /app/app.py | 48f35b56eba471c5966b68c407bbd4fabbf14d2f | [
"MIT"
]
| permissive | gladunvv/send-messages-service | d43bd68af892aeb268e2f75b91756eaa5eed1976 | a467f2daab77feb5ad9c72e02d5aa12741fc20b7 | refs/heads/master | 2020-09-17T07:10:48.814024 | 2019-12-09T20:25:37 | 2019-12-09T20:25:37 | 224,031,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import flask
import os
app = flask.Flask(__name__)
app.config["DEBUG"] = True
import routes
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
]
| |
50d49eda3d0f6a9bf8a2664a0489184a0a528b18 | efcd21234f3291e8fc561f49a7c88fc57a63e952 | /tartiflette/execution/nodes/variable_definition.py | d39c40f25262e260c7aa9a9a91e664a5891a9398 | [
"MIT"
]
| permissive | tartiflette/tartiflette | 146214a43847d2f423bf74594643c1fdefc746f1 | 421c1e937f553d6a5bf2f30154022c0d77053cfb | refs/heads/master | 2023-09-01T02:40:05.974025 | 2022-01-20T14:55:31 | 2022-01-20T14:55:31 | 119,035,565 | 586 | 39 | MIT | 2023-09-11T07:49:27 | 2018-01-26T09:56:10 | Python | UTF-8 | Python | false | false | 2,799 | py | from functools import partial
from typing import Any, Callable
from tartiflette.coercers.inputs.compute import get_input_coercer
from tartiflette.coercers.literals.compute import get_literal_coercer
from tartiflette.coercers.variables import variable_coercer
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.utils.type_from_ast import schema_type_from_ast
__all__ = ("variable_definition_node_to_executable",)
class ExecutableVariableDefinition:
"""
Node representing a GraphQL executable variable definition.
"""
__slots__ = (
"name",
"graphql_type",
"default_value",
"coercer",
"definition",
)
def __init__(
self,
name: str,
graphql_type: "GraphQLType",
default_value: Any,
coercer: Callable,
definition: "VariableDefinitionNode",
) -> None:
"""
:param name: the name of the variable
:param graphql_type: the GraphQLType expected for the variable value
:param default_value: the default value of the variable
:param coercer: callable to use when coercing the user input value
:param definition: the variable definition AST node
:type name: str
:type graphql_type: GraphQLType
:type default_value: Any
:type coercer: Callable
:type definition: VariableDefinitionNode
"""
self.name = name
self.graphql_type = graphql_type
self.default_value = default_value
self.coercer = partial(coercer, self)
self.definition = definition
def variable_definition_node_to_executable(
schema: "GraphQLSchema", variable_definition_node: "VariableDefinitionNode"
) -> "ExecutableVariableDefinition":
"""
Converts a variable definition AST node into an executable variable
definition.
:param schema: the GraphQLSchema instance linked to the engine
:param variable_definition_node: the variable definition AST node to treat
:type schema: GraphQLSchema
:type variable_definition_node: VariableDefinitionNode
:return: an executable variable definition
:rtype: ExecutableVariableDefinition
"""
graphql_type = schema_type_from_ast(schema, variable_definition_node.type)
return ExecutableVariableDefinition(
name=variable_definition_node.variable.name.value,
graphql_type=graphql_type,
default_value=variable_definition_node.default_value
or UNDEFINED_VALUE,
coercer=partial(
variable_coercer,
input_coercer=partial(
get_input_coercer(graphql_type), variable_definition_node
),
literal_coercer=get_literal_coercer(graphql_type),
),
definition=variable_definition_node,
)
| [
"[email protected]"
]
| |
0fac912558de9a1141bb62d3223f1aa8fd825e70 | 1b9075ffea7d4b846d42981b41be44238c371202 | /2008/devel/desktop/xfce4/goodies/xfce4-notifyd/actions.py | 0be89f389aad103384a5f9e18a9beb460910be54 | []
| no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure('--libexecdir=/usr/lib/xfce4 \
--disable-static')
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "README")
| [
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
]
| MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2 |
4e078c68276aaed1c1699174d8b734d478bb44ce | ff85002de8fc3e8d38b96753f7358ea1dc8055af | /Infinite_sequence.py | 105c8cc00705bdc188dbf46bca2fbd0d97a61125 | []
| no_license | xlax007/Collection-of-Algorithms | d0ef8277e4f6dd5a27ed2a67bb720c3d867cbec9 | 4fe4d69f60b3b6f49624be135750f074216aacb9 | refs/heads/master | 2022-12-12T23:15:39.991983 | 2020-09-09T23:36:26 | 2020-09-09T23:36:26 | 294,251,463 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 20:27:27 2020
@author: alexi
"""
#https://codeforces.com/problemset/problem/675/A --- Alexis Galvan
def infinite_sequence():
numbers = list(map(int, input().split()))
if numbers[0] == numbers[1] or (numbers[0]+numbers[2]) == numbers[1]:
return 'YES'
elif numbers[2] == 0 or (numbers[0] < numbers[1] and numbers[2] <= 1) or (numbers[0] > numbers[1]) and numbers[2] > 1:
return 'NO'
else:
actual = numbers[0] + numbers[2]
divisor = numbers[1]-actual
if divisor % numbers[2] == 0:
return 'YES'
return 'NO'
A = infinite_sequence()
print(A) | [
"[email protected]"
]
| |
ac8d7504a26caa742184cb8d3821d953144997fa | 6847e4c855a76b9d5ed04879394adcebdb0467e1 | /fssp/migrations/0005_remove_request_response.py | 19c9061ec91e08f5c78513c05939bd8d3b7ef84a | []
| no_license | i7ionov/knd | 954227c78043841ac402b76121e0194f608f35b6 | 3f8ecd488454cedea4da3b4f72869c1dbcb24112 | refs/heads/master | 2023-06-04T08:30:22.596221 | 2021-07-02T10:52:35 | 2021-07-02T10:52:35 | 372,376,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # Generated by Django 2.2 on 2021-04-26 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fssp', '0004_request_token'),
]
operations = [
migrations.RemoveField(
model_name='request',
name='response',
),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.