blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bf79e845fd33b9d429835be29ba10711359f05a | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_43809.py | 46c9cb3640039a6bb4cfea1068a6ada8d5ad993f | []
| no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((433.535, 406.99, 429.526), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((447.418, 453.948, 479.653), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((467.378, 502.054, 542.146), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((425.115, 371.651, 562.235), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((530.778, 644.47, 656.946), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((438.198, 440.745, 460.993), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((437.203, 440.043, 459.487), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((417.537, 459.765, 463.054), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((416.476, 487.584, 468.18), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((388.748, 490.694, 464.949), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((364.119, 477.992, 469.551), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((347.157, 459.19, 457.378), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((447.57, 428.631, 436.946), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((248.272, 495.185, 473.038), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((365.766, 624.676, 574.431), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((365.766, 624.676, 574.431), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((388.012, 606.07, 572.577), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((408.6, 586.153, 567.134), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((424.005, 564.133, 555.504), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((432.827, 541.754, 538.488), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((437.011, 521.606, 517.32), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((437.9, 504.083, 493.54), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((217.476, 583.59, 601.835), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((645.545, 413.533, 365.206), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((467.141, 522.436, 498.105), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((467.141, 522.436, 498.105), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((473.843, 496.588, 509.467), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((487.861, 480.294, 529.023), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((473.617, 468.803, 551.414), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((447.62, 373.389, 475.163), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((495.698, 561.223, 632.523), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((452.855, 425.705, 485.242), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((452.922, 425.62, 485.253), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((476.857, 438.691, 475.378), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((467.91, 453.56, 452.365), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((446.426, 466.109, 437.907), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((419.309, 472.823, 431.381), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((390.943, 476.933, 432.429), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((362.458, 478.594, 435.299), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((442.544, 505.41, 424.321), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((280.447, 450.318, 449.997), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((484.07, 496.194, 442.294), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((480.872, 483.423, 464.935), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((471.95, 455.292, 514.011), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((465.295, 429.279, 564.23), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((490.236, 364.367, 522.473), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((436.596, 440.105, 664.814), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((494.831, 466.071, 491.063), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((506.405, 489.845, 501.717), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((502.773, 511.611, 519.465), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((500.601, 518.173, 546.899), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((524.956, 512.358, 559.623), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((525.638, 494.501, 537.966), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((481.929, 460.686, 478.022), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((568.241, 529.603, 597.438), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
]
| |
d2d04ec2e736b106410aec6e3f1cf8a40e3a840b | 748526cd3d906140c2975ed1628277152374560f | /B2BSWE/Primitives/isPowerOf2.py | bfd326ab508745b71729749b5d48dd20ece0bf68 | []
| no_license | librar127/PythonDS | d27bb3af834bfae5bd5f9fc76f152e13ce5485e1 | ec48cbde4356208afac226d41752daffe674be2c | refs/heads/master | 2021-06-14T10:37:01.303166 | 2021-06-11T03:39:08 | 2021-06-11T03:39:08 | 199,333,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py |
import math
class Solution:
def powerOfTwo(self, input):
'''
:type input: int
:rtype: bool
'''
if input <= 0:
return False
x = math.log2(input)
num = "{:.1f}".format(x)
decimal_num = num.split('.')[1]
return decimal_num == '0'
def powerOfTwo_1(self, input):
if input <= 0:
return False
return (input & (input -1)) == 0
s = Solution()
print(s.powerOfTwo(6)) | [
"[email protected]"
]
| |
82cfb880fa30a2676547051c4d517960728b2093 | b3fa4bb31add76bbff0b6f864f433ff9af7897b6 | /15.threeSum.py | 5aed0c99cb8b95952a413a89448621b7c0a709dd | []
| no_license | Aissen-Li/LeetCode | 7298225ba95d58194a5fc87c7ee3ef4d04ec4d4b | f08628e3ce639d1e3f35a2bd3af14cc2b67d7249 | refs/heads/master | 2020-12-30T08:03:17.277924 | 2020-09-25T08:20:53 | 2020-09-25T08:20:53 | 238,919,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | class Solution:
def threeSum(self, nums):
n = len(nums)
if not nums or n < 3:
return []
nums.sort()
res = []
for i in range(n):
if nums[i] > 0:
return res
if i > 0 and nums[i] == nums[i - 1]:
continue
L = i + 1
R = n - 1
while L < R:
if nums[i] + nums[L] + nums[R] == 0:
res.append([nums[i], nums[L], nums[R]])
while L < R and nums[L] == nums[L + 1]:
L = L + 1
while L < R and nums[R] == nums[R - 1]:
R = R - 1
L = L + 1
R = R - 1
elif nums[i] + nums[L] + nums[R] > 0:
R = R - 1
else:
L = L + 1
return res | [
"[email protected]"
]
| |
43c2a3b7eca6a04a9ff324a7a0d1c18b93ad80a9 | fbd66463324db417bf7b13f88b53336142517e54 | /tools/waf/wscript | ef37acaf316dec5fee314de20f51b3a89e655427 | [
"MIT"
]
| permissive | Python3pkg/AoikHotkey | 55f9a330741087b0eef2c2f76287b6ef5111743f | 226c9371cbd1190313b6a3ff9d2c3963f29937e8 | refs/heads/master | 2021-01-21T09:20:41.989597 | 2017-05-18T05:33:06 | 2017-05-18T05:33:06 | 91,652,541 | 0 | 0 | null | 2017-05-18T05:32:41 | 2017-05-18T05:32:40 | null | UTF-8 | Python | false | false | 44,904 | # coding: utf-8
"""
Waf commands module.
See :paramref:`aoikwafutil.create_cmd_task` for how relative paths are handled.
"""
from __future__ import absolute_import
# Standard imports
import os.path
from shutil import rmtree
import struct
import sys
# External imports
from aoikwafutil import add_options as _add_options
from aoikwafutil import build_ctx as _build_ctx
from aoikwafutil import chain_tasks as _chain_tasks
from aoikwafutil import config_ctx as _config_ctx
from aoikwafutil import create_cmd_task as _create_cmd_task
from aoikwafutil import create_node as _create_node
from aoikwafutil import get_full_python_version as _get_full_python_version
from aoikwafutil import get_python_path as _get_python_path
from aoikwafutil import git_clean as _git_clean
from aoikwafutil import mark_input as _IN
from aoikwafutil import pip_ins_req as _pip_ins_req
from aoikwafutil import pip_setup as _pip_setup
from aoikwafutil import print_ctx as _print_ctx
from aoikwafutil import print_text as _print_text
from aoikwafutil import print_title as _print_title
from aoikwafutil import virtualenv_setup as _virtualenv_setup
# Notice imports above are aliased to avoid being treated as Waf commands.
def get_pyhook_dir_name():
"""
Get pyHook directory name according to current Python version.
E.g.
- pyHook-py2.7-32bit
- pyHook-py2.7-64bit
- pyHook-py3.5-32bit
- pyHook-py3.5-64bit
:return: pyHook directory name.
"""
# Get prefix part, e.g. `pyHook-py2.7`
prefix = 'pyHook-py{0}.{1}'.format(
sys.version_info[0],
sys.version_info[1],
)
# Get integer width, e.g. 32 or 64
int_width = struct.calcsize('P') * 8
# If the integer width is 32
if int_width == 32:
# Get postfix part
postfix = '-32bit'
# If the integer width is 64
elif int_width == 64:
# Get postfix part
postfix = '-64bit'
# If the integer width is not 32 or 64
else:
# Get error message
msg = 'Error: Unsupported integer width: {0}.'.format(int_width)
# Raise error
raise ValueError(msg)
# Combine prefix and postfix parts to form the directory name
dir_name = prefix + postfix
# Return the directory name
return dir_name
def syspath_init():
"""
Prepare "sys.path" for import resolution.
:return: None.
"""
# Get this file's directory path
my_dir = os.getcwd()
assert os.path.isfile(os.path.join(my_dir, 'wscript'))
# Remove some paths from "sys.path" to avoid unexpected import resolution.
# For each path in the list
for path in ['', '.', my_dir]:
# If the path is in "sys.path"
if path in sys.path:
# Remove the path from "sys.path"
sys.path.remove(path)
# Get parent parent directory path
parent_parent_dir = os.path.dirname(os.path.dirname(my_dir))
# Get `src` directory path
src_dir = os.path.join(parent_parent_dir, 'src')
# Get pythonpath variable
pythonpath = os.environ.get('PYTHONPATH', '')
# Get `dep` directory path
dep_dir = os.path.join(src_dir, 'aoikhotkeydep')
# Assert the directory path exists
assert os.path.isdir(dep_dir), dep_dir
# If `aoikhotkeydep` directory path is not in `sys.path`
if dep_dir not in sys.path:
# Prepend the directory path to `sys.path`
sys.path.insert(0, dep_dir)
# Prepend the directory path to pythonpath variable
pythonpath = dep_dir + os.pathsep + pythonpath
# If the platform is Windows
if sys.platform.startswith('win'):
# Get `pyHook` directory name according to current Python version
pyhook_dir_name = get_pyhook_dir_name()
# Get `pyHook` directory path
pyhook_dir = os.path.join(dep_dir, 'pyHook_versions', pyhook_dir_name)
# Assert the directory path exists
assert os.path.isdir(pyhook_dir), pyhook_dir
# If `pyHook` directory path is not in `sys.path`
if pyhook_dir not in sys.path:
# Prepend the directory path to `sys.path`
sys.path.insert(0, pyhook_dir)
# Prepend the directory path to pythonpath variable
pythonpath += os.pathsep + pyhook_dir
# If `src` directory path is not in `sys.path`
if src_dir not in sys.path:
# Prepend the directory path to `sys.path`
sys.path.insert(0, src_dir)
# Prepend the directory path to pythonpath variable
pythonpath = src_dir + os.pathsep + pythonpath
# Update PYTHONPATH value
os.environ['PYTHONPATH'] = pythonpath
# Print title
print('----- sys.path -----')
# Print paths in `sys.path`
print('\n'.join(sys.path))
# Print title
print('===== sys.path =====')
syspath_init()
# -----
# Whether current Python is Python 2
_PY2 = (sys.version_info[0] == 2)
# =====
# -----
# `Waf` pre-defined variables.
# Application name
APPNAME = 'AoikHotkey'
# Version
VERSION = '0.5.0'
# Top directory relative path relative to this `wscript` file
top = './../..' # pylint: disable=C0103
# Output directory relative path relative to this `wscript` file
out = './../../build_waf' # pylint: disable=C0103
# =====
# -----
# Output directory relative path relative to top directory
_OUT_DIR = os.path.relpath(out, top).replace('\\', '/')
# Touch files directory relative path relative to top directory
_TOUCH_DIR = os.path.join(_OUT_DIR, '.touch').replace('\\', '/')
# =====
# -----
# Top package name.
# Some tools only work with package name instead of directory path.
_TOP_PACKAGE = 'aoikhotkey'
_TOP_PACKAGE_PATH = './src/' + _TOP_PACKAGE
# Extra code paths and directories that formatters and linters should take
_EXTRA_CODE_PATHS_AND_DIRS = [
# `setup.py` file
'./setup.py',
# `pytest`'s custom plugin module
'./tools/pytest/pytest_custom_plugin.py',
# `Waf` utility module
'./tools/waf/aoikwafutil.py',
# `Waf` commands module, i.e. this file
'./tools/waf/wscript',
]
_MAIN_PY_PATH = _TOP_PACKAGE_PATH + '/__main__.py'
# =====
# -----
# Task `pip_setup`'s `get-pip.py` file path
_PIP_SETUP_FILE = './tools/pip/get-pip.py'
# Task `pip_setup`'s touch file path for dirty checking.
# E.g. `{_TOUCH_DIR}/tools.pip.get-pip.py.py3.5.1.final.0.64bit`.
_PIP_SETUP_TOUCH = _TOUCH_DIR + '/tools.pip.get-pip.py.py' + \
_get_full_python_version()
# =====
# -----
# Task `virtualenv_setup`'s touch file path for dirty checking.
# E.g. `{_TOUCH_DIR}/tools.virtualenv.requirements.txt.py3.5.1.final.0.64bit`.
_VIRTUALENV_SETUP_TOUCH = _TOUCH_DIR + \
'/tools.virtualenv.requirements.txt.py' + _get_full_python_version()
# =====
# -----
# Development's virtual environment's directory path, relative to top
# directory. E.g. `./dev_venv.py3.5.1.final.0.64bit`.
_DEV_VENV_PATH = './build_venv/dev_venv.py' + _get_full_python_version()
# Development's virtual environment's `python` program path, relative to top
# directory. E.g. `./dev_venv.py3.5.1.final.0.64bit/bin/python`.
_DEV_VENV_PYTHON = _get_python_path(_DEV_VENV_PATH)
# =====
# -----
# Production's virtual environment's directory path, relative to top directory.
# E.g. `./prod_venv.py3.5.1.final.0.64bit`.
_PROD_VENV_PATH = './build_venv/prod_venv.py' + _get_full_python_version()
# Production's virtual environment's `python` program path, relative to top
# directory. E.g. `./prod_venv.py3.5.1.final.0.64bit/bin/python`.
_PROD_VENV_PYTHON = _get_python_path(_PROD_VENV_PATH)
# =====
# -----
# Tools' virtual environment's directory path, relative to top directory.
# E.g. `./tools_venv.py3.5.1.final.0.64bit`.
_TOOLS_VENV_PATH = './build_venv/tools_venv.py' + _get_full_python_version()
# Tools' virtual environment's `python` program path, relative to top
# directory. E.g. `./tools_venv.py3.5.1.final.0.64bit/bin/python`.
_TOOLS_VENV_PYTHON = _get_python_path(_TOOLS_VENV_PATH)
# =====
def options(ctx):
"""
Add command line options.
This is `Waf`'s pre-defined hook that will be called automatically.
:param ctx: OptionsContext object.
:return: None.
"""
# Add `aoikwafutil`'s command line options
_add_options(ctx=ctx)
def configure(ctx):
"""
Configure build environment.
This is `Waf`'s pre-defined command.
:param ctx: ConfigurationContext object.
:return: None.
"""
# Print context info
_print_ctx(ctx)
def build(ctx):
"""
Create task that builds wheel file.
This is `Waf`'s pre-defined command.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that builds wheel file
task = build_wheel(ctx=ctx)
# Return the created task
return task
# Non-pre-defined command functions are given plain Context object.
# Use decorator `_build_ctx` to get BuildContext object instead, because
# BuildContext object provides more methods.
@_build_ctx(pythonpath='src')
def build_wheel(ctx):
"""
Create task that builds wheel file.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that builds wheel file
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Current Python program path
sys.executable,
# Run `setup.py`
_IN('./setup.py'),
# Build wheel file
'bdist_wheel',
# Build wheel that is usable for both Python 2 and 3
'--universal',
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def build_sdist(ctx):
"""
Create task that builds source distribution file.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that builds source distribution file
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Current Python program path
sys.executable,
# Run `setup.py`
_IN('./setup.py'),
# Build source distribution file
'sdist',
# Pack in zip format
'--formats=zip',
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def pip_setup(ctx, always=False):
"""
Create task that sets up `pip`.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task that sets up `pip`
task = _pip_setup(
# Context
ctx=ctx,
# Current Python program path
python=sys.executable,
# `get-pip.py` file path
setup_file=_PIP_SETUP_FILE,
# Touch file path for dirty checking
touch=_PIP_SETUP_TOUCH,
# Whether import module for dirty checking
check_import=ctx.options.check_import,
# Whether always run
always=always or ctx.options.always,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def virtualenv_setup(ctx, always=False):
"""
Create task that sets up `virtualenv` package.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task that sets up `virtualenv` package
task = _virtualenv_setup(
# Context
ctx=ctx,
# Current Python program path
python=sys.executable,
# Touch file path for dirty checking
touch=_VIRTUALENV_SETUP_TOUCH,
# Whether import module for dirty checking
check_import=ctx.options.check_import,
# Task `pip_setup`'s `get-pip.py` file path
pip_setup_file=_PIP_SETUP_FILE,
# Task `pip_setup`'s touch file path for dirty checking
pip_setup_touch=_PIP_SETUP_TOUCH,
# Whether always run
always=always or ctx.options.always,
)
# Return the created task
return task
def _venv_pip_ins_req(
ctx,
venv_path,
venv_add_version=False,
req_path=None,
check_import=False,
check_import_module=None,
always=False,
):
"""
Create task that sets up virtual environment and packages.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param venv_path: Virtual environment directory relative path relative to
top directory.
:param venv_add_version: Whether add full Python version to virtual
environment path.
E.g.
- `.py2.7.11.final.0.32bit`
- `.py3.5.1.final.0.64bit`
:param req_path: Requirements file relative path relative to top directory.
If the value is not given, will compose the path.
If the file not exists, will create the file.
:param check_import: Whether import module for dirty checking.
:param check_import_module: Module name to import for dirty checking.
:param always: Whether always run.
:return: Created task.
"""
# Print title
_print_title('Check venv path')
# Normalize the virtual environment path
venv_path = os.path.normpath(venv_path).replace('\\', '/')
# If need add full Python version to virtual environment path
if venv_add_version:
# Add full Python version to virtual environment path
full_venv_path = venv_path + '.py' + _get_full_python_version()
# If not need add full Python version to virtual environment path
else:
# Use given virtual environment path
full_venv_path = venv_path
# Print info
_print_text('Use venv: {0}'.format(full_venv_path))
# Print end title
_print_title('Check venv path', is_end=True)
# Print title
_print_title('Check requirements file')
# If requirements file path is not given
if not req_path:
# Replace slashes with dots, for composing requirements file path.
# Notice the value not contains full Python version.
venv_path_dotted = venv_path.replace('\\', '.').replace('/', '.')
# Compose requirements file path
req_path = './tools/pip/' + venv_path_dotted + '.requirements.txt'
# Normalize the requirements file path
req_path = os.path.normpath(req_path).replace('\\', '/')
# Get requirements file node
req_node = _create_node(ctx, req_path)
# If the requirements file exists
if req_node.exists():
# Print info
_print_text('Use requirements file: {0}'.format(req_path))
# If the requirements file not exists
else:
# Print info
_print_text('Create requirements file: {0}'.format(req_path))
# If the requirements file's parent directory not exists
if not req_node.parent.exists():
# Create the requirements file's parent directory
req_node.parent.mkdir()
# Create requirements file
req_node.write('')
# Print end title
_print_title('Check requirements file', is_end=True)
# Replace slashes with dots, for composing touch file path.
req_path_dotted = req_path.replace('\\', '.').replace('/', '.')
# Replace slashes with dots, for composing touch file path.
full_venv_path_dotted = full_venv_path.replace('\\', '.').replace('/', '.')
# Create task
task = _pip_ins_req(
# Context
ctx=ctx,
# Python program path.
# This Python program is only for setting up `pip` and `virtualenv`.
# Packages listed in given requirements file are not set up in this
# Python program's environment.
python=sys.executable,
# Requirements file path
req_path=req_path,
# Virtual environment directory path.
# Will run this virtual environment's `pip`.
venv_path=full_venv_path,
# Touch file path for dirty checking
touch=(
_TOUCH_DIR + '/' + req_path_dotted + '.' +
full_venv_path_dotted
),
# Whether import module for dirty checking
check_import=check_import,
# Module name to import for dirty checking
check_import_module=check_import_module,
# Task `pip_setup`'s `get-pip.py` file path
pip_setup_file=_PIP_SETUP_FILE,
# Task `pip_setup`'s touch file path for dirty checking
pip_setup_touch=_PIP_SETUP_TOUCH,
# Task `virtualenv_setup`'s touch file path for dirty checking
virtualenv_setup_touch=_VIRTUALENV_SETUP_TOUCH,
# Whether always run
always=always or ctx.options.always,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def venv(ctx, always=False):
"""
Create task that sets up virtual environment and packages according to \
command line options.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task
task = _venv_pip_ins_req(
# Context
ctx=ctx,
# Virtual environment path.
# Use option `--venv`'s value. Default is `dev_venv`.
venv_path=ctx.options.venv or 'dev_venv',
# Whether add full Python version to virtual environment path.
# Use option `--venv-add-version`'s value.
venv_add_version=ctx.options.venv_add_version,
# Requirements file path.
# Use option `--req`'s value.
req_path=ctx.options.req_path,
# Whether always run
always=always or ctx.options.always,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def dev_venv(ctx, always=False):
"""
Create task that sets up development' virtual environment and packages.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task
task = _venv_pip_ins_req(
# Context
ctx=ctx,
# Virtual environment path
venv_path=_DEV_VENV_PATH,
# Do not add full Python version to virtual environment path because
# already added.
venv_add_version=False,
# Requirements file path
req_path='./tools/pip/dev_venv.requirements.txt',
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def dev_venv_run(ctx):
"""
Create task that runs project program in development's virtual environment.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up development's virtual environment
dev_venv_task = dev_venv(ctx)
# Create task that runs project program in development's virtual
# environment
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_DEV_VENV_PYTHON),
# Project program path
_IN(_MAIN_PY_PATH),
# Project program's command line argument
'0',
],
# Input items list
inputs=[
# Run after the task that sets up development's virtual environment
dev_venv_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def prod_venv(ctx, always=False):
"""
Create task that sets up production' virtual environment and packages.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task
task = _venv_pip_ins_req(
# Context
ctx=ctx,
# Virtual environment path
venv_path=_PROD_VENV_PATH,
# Do not add full Python version to virtual environment path because
# already added.
venv_add_version=False,
# Requirements file path
req_path='./tools/pip/prod_venv.requirements.txt',
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def prod_venv_run(ctx):
"""
Create task that runs project program in production's virtual environment.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up production's virtual environment.
prod_venv_task = prod_venv(ctx)
# Create task that runs project program in production's virtual
# environment.
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_PROD_VENV_PYTHON),
# Project program path
_IN(_MAIN_PY_PATH),
# Project program's command line argument
'0',
],
# Input items list
inputs=[
# Run after the task that sets up production's virtual environment
prod_venv_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def tools_venv(ctx, always=False):
"""
Create task that sets up tools' virtual environment and packages.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param always: Whether always run.
:return: Created task.
"""
# Create task
task = _venv_pip_ins_req(
# Context
ctx=ctx,
# Virtual environment path
venv_path=_TOOLS_VENV_PATH,
# Do not add full Python version to virtual environment path because
# already added.
venv_add_version=False,
# Requirements file path
req_path='./tools/pip/tools_venv.requirements.txt',
)
# Return the created task
return task
def _tools_venv_pip_ins_req(ctx, req_path, check_import_module):
"""
Create task that sets up tools' virtual environment and packages listed \
in given requirements file.
The virtual environment created is based on current Python version.
:param ctx: BuildContext object.
:param req_path: Requirements file relative path relative to top directory.
:param check_import_module: Module name to import for dirty checking.
Importing module for dirty checking is slower than merely
dirty-checking the touch file representing the package, but is more
accurate.
:return: Created task.
"""
# Create task
task = _venv_pip_ins_req(
# Context
ctx=ctx,
# Virtual environment path
venv_path=_TOOLS_VENV_PATH,
# Do not add full Python version to virtual environment path because
# already added.
venv_add_version=False,
# Requirements file path
req_path=req_path,
# Whether import module for dirty checking
check_import=ctx.options.check_import,
# Module name to import for dirty checking
check_import_module=check_import_module,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def autoflake(ctx):
"""
Create task that runs `autoflake` to tidy code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/autoflake/requirements.txt',
# Module name to import for dirty checking
check_import_module='autoflake',
)
# Create task that runs `autoflake` to tidy code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'autoflake',
# Remove unused imports
'--remove-all-unused-imports',
# Apply changes in-place
'--in-place',
# Traverse directories recursively
'--recursive',
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def autopep8(ctx):
"""
Create task that runs `autopep8` to tidy code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/autopep8/requirements.txt',
# Module name to import for dirty checking
check_import_module='autopep8',
)
# Create task that runs `autopep8` to tidy code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'autopep8',
# Apply changes in-place
'--in-place',
# Traverse directories recursively
'--recursive',
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def isort(ctx, check_only=False):
"""
Create task that runs `isort` to tidy code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/isort/requirements.txt',
# Module name to import for dirty checking
check_import_module='isort',
)
# Create task that runs `isort` to tidy code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'isort.main',
# Specify config file
'--settings-path',
# Config file path
_IN('./setup.cfg'),
# Whether check only instead of applying changes
'--check-only' if check_only else None,
# Whether show diff info
'--diff' if check_only else None,
# Add import statement to all files
'--add_import',
# Import statement to add to all files
'from __future__ import absolute_import',
# Apply changes in-place
'--apply',
# Traverse directories recursively
'--recursive',
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def isort_check(ctx):
"""
Create task that runs `isort` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Delegate call to `isort`, with argument `check_only` set to True
return isort(ctx=ctx, check_only=True)
@_build_ctx(pythonpath='src')
def pyflakes(ctx):
"""
Create task that runs `pyflakes` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/pyflakes/requirements.txt',
# Module name to import for dirty checking
check_import_module='pyflakes',
)
# Create task that runs `pyflakes` to lint code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'pyflakes',
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def flake8(ctx):
"""
Create task that runs `flake8` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/flake8/requirements.txt',
# Module name to import for dirty checking
check_import_module='flake8',
)
# Create task that runs `flake8` to lint code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'flake8',
# Specify config file
'--config',
# Config file path
_IN('./setup.cfg'),
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def pycodestyle(ctx):
"""
Create task that runs `pycodestyle` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/pycodestyle/requirements.txt',
# Module name to import for dirty checking
check_import_module='pycodestyle',
)
# Create task that runs `pycodestyle` to lint code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'pycodestyle',
# Specify config file
'--config',
# Config file path
_IN('./setup.cfg'),
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def pydocstyle(ctx):
"""
Create task that runs `pydocstyle` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/pydocstyle/requirements.txt',
# Module name to import for dirty checking
check_import_module='pydocstyle',
)
# Create task that runs `pydocstyle` to lint code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'pydocstyle',
# Be verbose
'--verbose',
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
# Other code files and directories
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def pylint(ctx):
"""
Create task that runs `pylint` to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/pylint/requirements.txt',
# Module name to import for dirty checking
check_import_module='pylint',
)
# Create task that runs `pylint` to lint code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'pylint',
# Specify config file
'--rcfile',
# Config file path
_IN('./tools/pylint/.pylintrc'),
# Project's top package.
# Notice `pylint` requires package directory to contain
# `__init__.py` file.
_TOP_PACKAGE,
# Other code files and directories.
# Notice `pylint` requires directories to contain `__init__.py`
# file.
_IN(_EXTRA_CODE_PATHS_AND_DIRS),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath=['src', './tools/pytest'])
def pytest(ctx):
"""
Create task that runs `pytest` to test code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/pytest/requirements.txt',
# Module name to import for dirty checking
check_import_module='pytest',
)
# Create task that runs `pytest` to test code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'pytest',
# Specify config file
'-c',
# Config file path
_IN('./setup.cfg'),
# Load plugin
'-p',
# Plugin name
'pytest_custom_plugin',
# Specify coverage plugin's start package
'--cov',
# Coverage plugin's start package
_TOP_PACKAGE,
# Specify coverage plugin's config file
'--cov-config',
# Coverage plugin's config file path
_IN('./setup.cfg'),
# Start directory path for discovering tests
_IN(_TOP_PACKAGE_PATH),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def tox(ctx):
"""
Create task that runs `tox` to test code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/tox/requirements.txt',
# Module name to import for dirty checking
check_import_module='tox',
)
# Create task that runs `tox` to test code
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'tox',
# Specify config file
'-c',
# Config file path
_IN('./tools/tox/tox.ini'),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def sphinx_setup(ctx):
"""
Create task that sets up `sphinx`.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up `sphinx`
task = _tools_venv_pip_ins_req(
# Context
ctx=ctx,
# Requirements file path
req_path='./tools/sphinx/requirements.txt',
# Module name to import for dirty checking
check_import_module='sphinx',
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def sphinx_apidoc(ctx):
"""
Create task that runs `sphinx.apidoc` to build documentation's `.rst` \
files.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that sets up packages
pip_task = sphinx_setup(ctx=ctx)
# Create output directory node
output_dir_node = _create_node(ctx, './build_sphinx/src')
# If the output directory exists
if output_dir_node.exists():
# Delete the output directory
rmtree(output_dir_node.abspath())
# Create task that builds documentation's `.rst` files
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'sphinx.apidoc',
# Overwrite existing files
'--force',
# Generate a full project with `sphinx-quickstart`
'--full',
# Put documentation for each module on its own page
'--separate',
# Include private modules
'--private',
# Specify project name
'--doc-project',
# Project name
APPNAME,
# Specify output directory
'--output-dir',
# Output directory path
output_dir_node,
# Source code directory path
_IN(_TOP_PACKAGE_PATH),
],
# Input items list
inputs=[
# Run after the task that sets up packages
pip_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def sphinx(ctx):
"""
Create task that runs `sphinx` to build documentation's HTML files.
:param ctx: BuildContext object.
:return: Created task.
"""
# Create task that builds documentation's `.rst` files
sphinx_apidoc_task = sphinx_apidoc(ctx)
# Create input directory node.
# Input is `.rst` files.
input_dir_node = _create_node(ctx, './build_sphinx/src')
# Create output directory node.
# Output is HTML files.
output_dir_node = _create_node(ctx, './build_sphinx/html')
# If the output directory exists
if output_dir_node.exists():
# Delete the output directory
rmtree(output_dir_node.abspath())
# Create task that runs `sphinx`
task = _create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
_IN(_TOOLS_VENV_PYTHON),
# Run module
'-m',
# Module name
'sphinx',
# Specify config file directory
'-c',
# Config file directory path
_IN('./tools/sphinx'),
# Overwrite existing files
'-a',
# Be verbose
'-v',
# Directory path containing input `.rst` files
input_dir_node,
# Directory path containing output HTML files
output_dir_node,
],
# Input items list
inputs=[
# Run after the task that builds documentation's `.rst` files
sphinx_apidoc_task,
],
# Whether always run
always=True,
)
# Return the created task
return task
@_build_ctx(pythonpath='src')
def tidy(ctx):
"""
Create task that runs all tools to tidy code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Run tasks below in a chain
tasks = _chain_tasks([
# Remove unused imports
autoflake(ctx),
# Format code according to PEP8
autopep8(ctx),
# Sort imports
isort(ctx),
])
# Return the last task
return tasks[-1]
@_build_ctx(pythonpath='src')
def lint(ctx):
"""
Create task that runs all tools to lint code.
:param ctx: BuildContext object.
:return: Created task.
"""
# Run tasks below in a chain
tasks = _chain_tasks([
# Check imports are sorted
isort_check(ctx),
# Check unused imports
pyflakes(ctx),
# Check code style
flake8(ctx),
# Check code style
pycodestyle(ctx),
# Check doc-string style
pydocstyle(ctx),
# Check code style
# pylint(ctx),
])
# Return the last task
return tasks[-1]
@_build_ctx(pythonpath='src')
def test(ctx):
"""
Create task that runs all tools to test code.
:param ctx: BuildContext object.
:return: Created task.
"""
#
return pytest(ctx)
@_build_ctx(pythonpath='src')
def doc(ctx):
"""
Create task that runs all tools to build documentation.
:param ctx: BuildContext object.
:return: Created task.
"""
# Delegate call to `sphinx`
return sphinx(ctx=ctx)
@_build_ctx(pythonpath='src')
def list_py(ctx):
"""
List Python files in the project.
:param ctx: BuildContext object.
:return: None.
"""
# Find python files in `src` directory
src_node_s = _create_node(ctx, '.').ant_glob('src/**/*.py')
# Find python files in `tools` directory
tools_node_s = _create_node(ctx, '.').ant_glob('tools/**/*.py')
# Combine the file nodes
all_node_s = src_node_s + tools_node_s
# Convert nodes to paths.
# Ignore `waflib`'s files.
all_path_s = [str(x) for x in all_node_s if 'waflib' not in str(x)]
# Print the paths
_print_text('\n'.join(all_path_s))
def print_plain_ctx(ctx):
"""
Print plain context's info.
:param ctx: Context object.
:return: None.
"""
# Print given context's info
_print_ctx(ctx)
@_config_ctx
def print_config_ctx(ctx):
"""
Print configuration context's info.
:param ctx: ConfigurationContext object.
:return: None.
"""
# Print given context's info
_print_ctx(ctx)
@_build_ctx
def print_build_ctx(ctx):
"""
Print build context's info.
:param ctx: BuildContext object.
:return: None.
"""
# Print given context's info
_print_ctx(ctx)
@_build_ctx
def clean(ctx):
"""
Delete all files untracked in git.
:param ctx: BuildContext object.
:return: None.
"""
# Delete all files untracked in git
return _git_clean(ctx=ctx)
| [
"[email protected]"
]
| ||
487bdb307568cb23e3126643028eaa4294e03dab | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /cache_replacement/environment/main.py | 2983e9f90eb7725c440e25f18d22c718efc03659 | [
"Apache-2.0",
"CC-BY-4.0"
]
| permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 2,138 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example script for running replacement policies."""
import argparse
import belady
import config as cfg
import environment
import numpy as np
import policy
import s4lru
import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("policy_type", help="type of replacement policy to use")
args = parser.parse_args()
trace_path = "traces/sample_trace.csv"
config = cfg.Config.from_files_and_bindings(["spec_llc.json"], [])
env = environment.CacheReplacementEnv(config, trace_path, 0)
if args.policy_type == "belady":
replacement_policy = belady.BeladyPolicy(env)
elif args.policy_type == "lru":
replacement_policy = policy.LRU()
elif args.policy_type == "s4lru":
replacement_policy = s4lru.S4LRU(config.get("associativity"))
elif args.policy_type == "belady_nearest_neighbors":
train_env = environment.CacheReplacementEnv(config, trace_path, 0)
replacement_policy = belady.BeladyNearestNeighborsPolicy(train_env)
elif args.policy_type == "random":
replacement_policy = policy.RandomPolicy(np.random.RandomState(0))
else:
raise ValueError(f"Unsupported policy type: {args.policy_type}")
state = env.reset()
total_reward = 0
steps = 0
with tqdm.tqdm() as pbar:
while True:
action = replacement_policy.action(state)
state, reward, done, info = env.step(action)
total_reward += reward
steps += 1
pbar.update(1)
if done:
break
print("Cache hit rate: {:.4f}".format(total_reward / steps))
| [
"[email protected]"
]
| |
092ea09c37d5f8ded3c80eafb8f1f4c4988bfae3 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002097.py | 452adaeab91b29a10e3af9dbbda3125353042348 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher70559(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.3.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.4.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher70559._instance is None:
CommutativeMatcher70559._instance = CommutativeMatcher70559()
return CommutativeMatcher70559._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 70558
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 70560
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.3.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 70561
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 71673
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.4.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 71674
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher70563.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 70564
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
if pattern_index == 1:
pass
# State 71675
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part002098 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
]
| |
af465b9c4bbf6fb6244987a2abd33838a930b1f7 | ebc7607785e8bcd6825df9e8daccd38adc26ba7b | /python/baekjoon/2.algorithm/sort/11650.좌표 정렬하기.py | 3e325291e776b19d22a4a7b48e7d9eef1090dc9f | []
| no_license | galid1/Algorithm | 18d1b72b0d5225f99b193e8892d8b513a853d53a | 5bd69e73332f4dd61656ccdecd59c40a2fedb4b2 | refs/heads/master | 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 | Python | UTF-8 | Python | false | false | 1,724 | py | # JAVA
# package com.example.java_study;
#
# import java.io.BufferedReader;
# import java.io.IOException;
# import java.io.InputStreamReader;
# import java.util.*;
#
# public class Main {
#
# public static int n;
# public static int[][] cs;
#
# public static void solve() {
# Arrays.stream(cs)
# .sorted((c1, c2) -> {
# if (c1[0] <= c2[0]) {
# if (c1[0] == c2[0]) {
# return c1[1] - c2[1];
# }
# return -1;
# } else {
# return 1;
# }
# })
# .forEach(coords -> {
# System.out.println(coords[0] + " " + coords[1]);
# });
#
#
# }
#
# public static void main(String[] args) throws IOException {
# BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
# StringTokenizer st = null;
#
# n = Integer.parseInt(br.readLine());
# cs = new int[n][2];
#
# for(int i = 0; i < n; i++) {
# st = new StringTokenizer(br.readLine());
# cs[i][0] = Integer.parseInt(st.nextToken());
# cs[i][1] = Integer.parseInt(st.nextToken());
# }
#
# solve();
#
# // 5
# // 3 4
# // 1 1
# // 1 -1
# // 2 2
# // 3 3
# }
# }
import sys
def solve():
global n, cs
cs.sort(key=lambda item: (item[0], item[1]))
for x, y in cs:
print(x, y)
n = int(sys.stdin.readline().strip())
cs = []
for _ in range(n):
cs.append(list(map(int, sys.stdin.readline().strip().split(" "))))
solve() | [
"[email protected]"
]
| |
bb4f0d486ca028b69cc5a9b45653e9d13b67f82d | 153ec5496256058d89587c001aea5ce3a6a8d523 | /tranquil-beach/design/hit_counter.py | 27cb85ac5de99ea260d2ae9d2991dfb57d71cc02 | []
| no_license | yokolet/tranquil-beach-python | 50911147b560385e2f93efb148e5adb0fb6dbe8b | e7f486114df17918e49d6452c7047c9d90e8aef2 | refs/heads/master | 2021-08-22T15:28:02.096061 | 2020-04-29T03:34:19 | 2020-04-29T05:01:11 | 175,543,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | class HitCounter:
def __init__(self):
"""
Initialize your data structure here.
"""
self.timestamps = [] # queue
def hit(self, timestamp: int) -> None:
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
"""
self.timestamps.append(timestamp)
def getHits(self, timestamp: int) -> int:
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
"""
while self.timestamps and self.timestamps[0] <= timestamp - 300:
self.timestamps.pop(0)
return len(self.timestamps)
| [
"[email protected]"
]
| |
1e5c56752fcd2c8d921c64f800968fe300259eec | 15e6385746ccf4b8eb6c6e302aca236021bb8781 | /pythonPart/le8_stringToInteger.py | bef8c255655d63b43f844a2ed8fde2329bf62b92 | []
| no_license | akb46mayu/Data-Structures-and-Algorithms | 11c4bbddc9b4d286e1aeaa9481eb6a620cd54746 | de98494e14fff3e2a468da681c48d60b4d1445a1 | refs/heads/master | 2021-01-12T09:51:32.618362 | 2018-05-16T16:37:18 | 2018-05-16T16:37:18 | 76,279,268 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
s = str
if len(s) == 0: return 0
ls = list(s.strip())
sign = -1 if ls[0] == '-' else 1
if ls[0] in ['+', '-']: del(ls[0])
res, i = 0, 0
maxval = 2**31 - 1
minval = -2**31
n = len(ls)
while i < n and ls[i].isdigit():
res = res * 10 + ord(ls[i]) - ord('0')
i += 1
return max(minval, min(sign * res, maxval))
| [
"[email protected]"
]
| |
41a5d17f5ff6f87d15a77667d46606d940268829 | 01926621374435f7daf622f1ef04a51f94e3e883 | /litex/soc/cores/pwm.py | 842f24d554f89887340f66ce6b97a104904f54b9 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | betrusted-io/litex | d717513e41ff6aba54ac172e886c21479aa41752 | 8109a8e91ca8321483ccc2f58bd4fed5379bbd18 | refs/heads/master | 2022-11-23T07:11:35.297128 | 2022-02-22T11:55:00 | 2022-02-22T11:55:00 | 231,203,917 | 3 | 0 | NOASSERTION | 2020-01-01T10:48:06 | 2020-01-01T10:48:05 | null | UTF-8 | Python | false | false | 2,638 | py | #
# This file is part of LiteX.
#
# Copyright (c) 2015-2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.cdc import MultiReg
from litex.soc.interconnect.csr import *
# Pulse Width Modulation ---------------------------------------------------------------------------
class PWM(Module, AutoCSR):
"""Pulse Width Modulation
Provides the minimal hardware to do Pulse Width Modulation.
Pulse Width Modulation can be useful for various purposes: dim leds, regulate a fan, control
an oscillator. Software can configure the PWM width and period and enable/disable it.
"""
def __init__(self, pwm=None, clock_domain="sys", with_csr=True,
default_enable = 0,
default_width = 0,
default_period = 0):
if pwm is None:
self.pwm = pwm = Signal()
self.enable = Signal(reset=default_enable)
self.width = Signal(32, reset=default_width)
self.period = Signal(32, reset=default_period)
# # #
counter = Signal(32, reset_less=True)
sync = getattr(self.sync, clock_domain)
sync += [
If(self.enable,
counter.eq(counter + 1),
If(counter < self.width,
pwm.eq(1)
).Else(
pwm.eq(0)
),
If(counter >= (self.period - 1),
counter.eq(0)
)
).Else(
counter.eq(0),
pwm.eq(0)
)
]
if with_csr:
self.add_csr(clock_domain)
def add_csr(self, clock_domain):
self._enable = CSRStorage(description="""PWM Enable.\n
Write ``1`` to enable PWM.""",
reset = self.enable.reset)
self._width = CSRStorage(32, reset_less=True, description="""PWM Width.\n
Defines the *Duty cycle* of the PWM. PWM is active high for *Width* ``{cd}_clk`` cycles and
active low for *Period - Width* ``{cd}_clk`` cycles.""".format(cd=clock_domain),
reset = self.width.reset)
self._period = CSRStorage(32, reset_less=True, description="""PWM Period.\n
Defines the *Period* of the PWM in ``{cd}_clk`` cycles.""".format(cd=clock_domain),
reset = self.period.reset)
n = 0 if clock_domain == "sys" else 2
self.specials += [
MultiReg(self._enable.storage, self.enable, n=n),
MultiReg(self._width.storage, self.width, n=n),
MultiReg(self._period.storage, self.period, n=n),
]
| [
"[email protected]"
]
| |
d55c5647d9849a1fd1d7068e75cf3ca1f4954fe6 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-838.py | e806406e24534484ffe09e2e69ee5a5a73bc366a | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,348 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
$Exp.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
]
| |
b747f994ed9204d8da9078e60f0ced364fd18a86 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2669-2722/right-branch-2722/prototype/editor/coursemanager.py | 8e30ec545293639879b92768dc70e4d6e532dc29 | []
| no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 94,459 | py | from os.path import exists
from os import mkdir, sep, environ
from urllib import quote
import sys
from cgi import escape
from string import strip, join
from Cheetah.Template import Template
from xmlreader import readConfig
from shutil import rmtree, copyfile
import marshal
import cgitb
cgitb.enable( display=1, logdir="/var/tmp/httpd/exe" )
debug = 0
class CourseManager:
"""Manages courses for the web page.
Responsible for creating, loading, saving, and displaying courses."""
def __init__( self, preview_dir="", coursetype="" ):
if environ.has_key('QUERY_STRING'):
self.referer = environ.get('QUERY_STRING')
else:
self.referer = None
if debug: print "script: %s <br>\n" % self.referer
self.reset()
self.www_eXe_root = "/python/eXe/"
self.doc_root = "/var/www/html" + self.www_eXe_root
self.cmd = ""
self.startcgi = "start.pyg"
self.previewcgi = self.www_eXe_root + "preview.pyg"
self.preview_start_cgi = self.www_eXe_root + "start.pyg"
self.speakitcgi = self.www_eXe_root + "speak_it.pyg"
self.publish_template = self.doc_root + "idevices" + sep + "publish_content.template"
self.coursexmlfile = self.doc_root + "courses.xml"
self.idevice_templatexmlfile= self.doc_root + "idevices_template.xml"
self.topicxmlfile = "topics.xml"
self.sectionxmlfile = "sections.xml"
self.unitxmlfile = "units.xml"
self.idevicexmlfile = "idevices.xml"
self.preview_cgi_string = ""
self.settemplate( preview_dir, coursetype )
self.page_counter = 0
self.current_page = 0
self.url_dump_path = ""
self.page_url_list = []
def reset( self ):
reload(sys)
sys.setdefaultencoding( 'iso-8859-1' )
del sys.setdefaultencoding
self.content_dict={ "heading":"", "content":"", "crumb":"" }
self.dict = {"title":"", "shorttitle":"", "coursetype":"", "graphic":"","courseidentifier":"","description":"" , "parse_h3":""}
self.topic_dict = {}
self.section_dict = {}
self.unit_dict = {}
self.idevice_dict = {}
self.orderby = {}
def settemplate( self, preview_dir="", coursetype="" ):
self.preview_dir = preview_dir
base_directory = ""
if coursetype<>"":
base_directory += coursetype + sep
if preview_dir<>"":
self.content_template_directory = base_directory + "preview" + sep + preview_dir + sep
self.preview_cgi_string = "&preview_dir=%s" % preview_dir
self.content_dict["preview_dir"] = preview_dir
else:
self.content_template_directory = base_directory + "idevices" + sep
self.template_directory = base_directory + "idevices" + sep
self.theme_template = self.content_template_directory + "index_content.template"
if debug:
print "Theme template:%s<br> \n" % self.theme_template
print "preview_cgi_string=%s\n" %( self.preview_cgi_string )
self.courseForm = self.doc_root + self.template_directory + "course_form.template"
self.topicForm = self.doc_root + self.template_directory + "topic_form.template"
self.sectionForm = self.doc_root + self.template_directory + "section_form.template"
self.unitForm = self.doc_root + self.template_directory + "unit_form.template"
self.eXe_content_template = self.doc_root + self.content_template_directory + "index_content.template"
self.course_content_template = self.doc_root + self.content_template_directory + "course_content.template"
self.topic_content_template = self.doc_root + self.content_template_directory + "topic_content.template"
self.section_content_template= self.doc_root + self.content_template_directory + "section_content.template"
self.unit_content_template = self.doc_root + self.content_template_directory + "unit_content.template"
self.course_xml_template = self.doc_root + self.template_directory + "course_xml.template"
self.topic_xml_template = self.doc_root + self.template_directory + "topic_xml.template"
self.section_xml_template = self.doc_root + self.template_directory + "section_xml.template"
self.unit_xml_template = self.doc_root + self.template_directory + "unit_xml.template"
def showexe( self, templatefile, heading="", content="", crumb="", preview="", outline="", speakit="", return_str=0, parse_h3=0, publish_mode=0, current_url="" ):
if exists( templatefile ):
if debug: print "in showexe<br>\n"
self.content_dict["header_string"] = self.dict["title"]
self.content_dict["course_banner"] = self.dict["graphic"]
if debug: print "course graphic:%s<br>\n" %self.content_dict["course_banner"]
self.content_dict["rightnav_string"] = ""
self.content_dict["footer_string"] = ""
if debug: print "in showexe: current_url=%s<br>\n" % current_url
self.set_related_urls( publish_mode=publish_mode, current_url="%s"%(current_url) )
if self.preview_dir<>"":
self.content_dict["leftnav_string"]= """<ul id="menu">\n""" + self.show_course_topics( ret_str=1, for_topic_tree=1, publish_mode=publish_mode )+"</ul>\n"
self.content_dict["preview_dir"] = self.preview_dir
else:
self.content_dict["leftnav_string"] = ""
self.content_dict["heading"]= heading
if self.dict["parse_h3"]=="yes" or parse_h3<>0:
content = self.extract_header( content )
if debug: print "content after parse_h3=%s <br>\n" % content
self.content_dict["content"] = content
if debug: print content
if self.dict["coursetype"]=="hebrew":
crumb = self.process_breadcrumb( crumb )
self.content_dict["crumb"] = crumb
self.content_dict["preview"] = preview
if speakit<>"":
self.content_dict["speakit"] = speakit
self.content_dict["outline"] = outline
FormHandle = open(templatefile, "r")
FormInput = FormHandle.read()
FormHandle.close()
x = Template( FormInput, searchList=[self.content_dict, self.dict, self.topic_dict, self.section_dict, self.unit_dict, self.orderby])
if return_str:
return x
else:
print x
else:
print "Error, template file:%s not exist!! \n" %templatefile
return
def process_breadcrumb( self, crumb ):
"""in case some title's wording is from right to left, use this function to solve ordering problem"""
crumb_list = crumb.split( '->' )
tmp_crum = "<table><tr>"
for item in crumb_list[:-1]:
tmp_crum += """<td>%s</td><td>-></td>""" % item
tmp_crum += """<td>%s</td> """ %crumb_list[-1]
tmp_crum += "</tr></table>"
return tmp_crum
def xml_string( self, template_file, dict, action="" ):
dict["www_eXe_root"] = self.www_eXe_root
if exists( template_file ):
if action=="encode+escape":
for item in dict.keys():
if item[:7]<>"graphic" and item[:5]<>"_file":
try:
dict[item]= escape( dict[item].encode() )
except UnicodeError:
dict[item]= escape( dict[item] )
elif action=="escape":
for item in dict.keys():
if item[:7]<>"graphic" and item[:5]<>"_file":
try:
dict[item]= escape( dict[item].strip() )
except:
if debug: print "dict[%s]:%s<p>\n" %( item, dict[item])
dict[item] = ', '.join( [ escape(tmp_item) for tmp_item in dict[item] ] )
elif action=="encode":
for item in dict.keys():
if item[:7]<>"graphic" and item[:5]<>"_file":
dict[item]= dict[item].encode()
FormHandle = open(template_file, "r")
FormInput = FormHandle.read()
FormHandle.close()
t = Template( FormInput, searchList=[dict, self.dict, self.topic_dict, self.section_dict, self.unit_dict, self.content_dict] )
return str(t)
else:
print "template file:%s not exist" %template_file
def savexmlfile( self, action, roottag, dict, index_identifier, targetxmlfile, xml_template, parse_h3=0 ):
if debug: print "in savexmlfile parse_h3=%d <br>\n" %parse_h3
x = '<?xml version="1.0" encoding="iso-8859-1"?>\n<%s multiple="true">\n' % roottag
if exists( targetxmlfile ):
doc = readConfig( targetxmlfile )
if doc.has_key(roottag):
if action == "up" or action == "down":
found = 0
index = 0
for node in doc[roottag]:
index = index + 1
if node[index_identifier] == dict[index_identifier]:
found = index
break
if found == 0:
print "Sorry, this %s identifier:%s is not found <br>\n" % ( roottag, index_identifier )
return
elif action == "up" and found == 1:
print "First item can not be moved upward <br>\n"
return
elif action =="down":
if found == doc[roottag].__len__():
print "Last one can not be moved downward <br>\n"
return
i = 1
for node in doc[roottag]:
if action == "update" and node[index_identifier]==dict[index_identifier]:
t = self.xml_string( xml_template, dict, "escape" )
x = x + t
elif action=="delete" and node[index_identifier]==dict[index_identifier]:
i = i + 1
continue
elif ( action=="up" and i==(found-1) ) or (action=="down" and i==found ) :
up_t = self.xml_string( xml_template, node, "escape" )
elif ( action=="up" and i == found ) or ( action=="down" and i ==(found+1) ):
down_t = self.xml_string( xml_template, node, "escape" )
x = x + down_t + up_t
else:
t = self.xml_string( xml_template, node, "escape" )
x = x + t
i = i + 1
if action == "add":
t = self.xml_string( xml_template, dict, "escape" )
x = x + t
try:
bkup_targetxmlfile = targetxmlfile + action + dict[index_identifier]
import shutil
shutil.copyfile( targetxmlfile, bkup_targetxmlfile )
except:
pass
x = x + "\n</%s>" %roottag
f = open( targetxmlfile ,"w" )
f.write( "%s" %x )
f.flush()
f.close
self.create_manifest_xml()
self.update_url_list()
def update_url_list( self ):
"""use this function to update url_list dictionary"""
self.page_url_list = []
self.page_url_list.append( ( "courseidentifier=%s"%self.dict["courseidentifier"], ".", sep + "index.html" ) )
if self.url_dump_path=="":
self.url_dump_path = self.doc_root + self.dict["courseidentifier"] + sep + 'url_dump'
if self.dict["courseidentifier"]:
topicxmlfilepath = self.doc_root + self.dict["courseidentifier"] + sep + 'topics.xml'
interactivexmlfilepath = self.doc_root + self.dict["courseidentifier"] + sep + 'interactive_devices.xml'
else:
print "Error while processing %s course topic <br>\n" % ( self.dict["title"] )
return
tmp_list = []
if exists( interactivexmlfilepath ):
interactive_doc = readConfig( interactivexmlfilepath )
if interactive_doc.has_key( "interactive_devices" ) and interactive_doc.__len__()>0:
for item in interactive_doc["interactive_devices"]:
if debug: print "item[0:8]:%s<br>\n" % item[0:8]
if item[0:8] =="orderby_": #save the field name, url_dump_path and a #
order_name = item[8:]
if debug: print "Order_name:%s<br>\n" % order_name
tmp_tuple = ( order_name, "%s_%s"% (self.url_dump_path, order_name), [] )
tmp_list.append( tmp_tuple )
if debug: print "Orderby list:%s<br>\n" %tmp_list
if exists( topicxmlfilepath ):
doc = readConfig( topicxmlfilepath )
if doc.has_key( "topics" ) and doc.__len__()>0:
for node in doc["topics"]:
tmp_url = "courseidentifier=%s&topicidentifier=%s"%( self.dict["courseidentifier"], node["topicidentifier"] )
local_tmp_url = "%s" % node["topicidentifier"]
local_url_name = sep + "topic%s.html" % node["topicidentifier"]
self.page_url_list.append( ( tmp_url, local_tmp_url, local_url_name ) )
if tmp_list.__len__()>0:
for item in tmp_list:
tmp_tuple = ( node[ item[0] ], tmp_url, local_tmp_url, local_url_name )
item[2].append( tmp_tuple )
if debug: print "1:%s 2:%s 3:%s<br>\n" %( item[0], item[1], item[2] )
sectionxmlfilepath = self.doc_root + self.dict["courseidentifier"] + sep + node["topicidentifier"] + sep + 'sections.xml'
if exists( sectionxmlfilepath ):
doc1 = readConfig( sectionxmlfilepath )
if doc1.has_key( "sections" ) and doc1.__len__()>0:
for node1 in doc1["sections"]:
tmp_url1 = "courseidentifier=%s&topicidentifier=%s§ionidentifier=%s"%( self.dict["courseidentifier"], node["topicidentifier"], node1["sectionidentifier"] )
local_tmp_url1 = node["topicidentifier"] + sep + node1["sectionidentifier"]
local_url1_name = sep + "section%s.html" % node1["sectionidentifier"]
self.page_url_list.append( ( tmp_url1, local_tmp_url1, local_url1_name ) )
if tmp_list.__len__() > 0:
for item in tmp_list:
tmp_tuple = ( node1[ item[0] ], tmp_url1, local_tmp_url1, local_url1_name )
item[2].append( tmp_tuple )
if debug: print "1:%s 2:%s 3:%s<br>\n" %( item[0] , item[1], item[2])
unitxmlfilepath = self.doc_root + self.dict["courseidentifier"] + sep + node["topicidentifier"] + sep + node1["sectionidentifier"] + sep +'units.xml'
if exists( unitxmlfilepath ):
doc2 = readConfig( unitxmlfilepath )
if doc2.has_key( "units" ) and doc2.__len__()>0:
for node2 in doc2["units"]:
tmp_url2 = "courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s"%( self.dict["courseidentifier"], node["topicidentifier"], node1["sectionidentifier"], node2["unitidentifier"] )
local_tmp_url2 = node["topicidentifier"] + sep + node1["sectionidentifier"] + sep + node2["unitidentifier"]
local_url2_name = sep + "unit%s.html"% node2["unitidentifier"]
self.page_url_list.append( ( tmp_url2, local_tmp_url2, local_url2_name ) )
if tmp_list.__len__()>0:
for item in tmp_list:
tmp_tuple = ( node2[ item[0] ], tmp_url2, local_tmp_url2, local_url2_name )
item[2].append( tmp_tuple )
if debug: print "1:%s 2:%s 3:%s<br>\n" %( item[0] , item[1], item[2] )
if debug:
print "page_url_list: %s<br>\n" % self.page_url_list
print "Url_dump_path: %s<br>\n" %self.url_dump_path
FileHandle = open( self.url_dump_path, "w")
marshal.dump( self.page_url_list, FileHandle )
FileHandle.close
if tmp_list.__len__()>0:
for item in tmp_list:
FileHandle = open( item[1], "w")
item[2].sort() #item[2] is a list containing sort_field, url, local_url#
if debug: print "keyword index file after sort:%s list:%s<br>\n" %( item[1], item[2] )
marshal.dump( item[2], FileHandle )
FileHandle.close
pass
def create_menu_file(self, preview_dir):
tmp_menu_string = """<ul id="menu">\n""" + self.show_course_topics( ret_str=1, for_topic_tree=1, for_menu=1 )+"</ul>\n"
menu_file = self.doc_root + self.dict["courseidentifier"] + sep + "menu_string." + preview_dir
f = open( menu_file ,"w" )
f.write( "%s" %tmp_menu_string )
f.flush()
f.close
return tmp_menu_string
def read_topic_dict(self):
doc = readConfig( self.topic_xml_template )
for item in doc["topic"]:
self.topic_dict[item] = ""
def read_section_dict(self):
doc = readConfig( self.section_xml_template )
for item in doc["section"]:
self.section_dict[item] = ""
def read_unit_dict(self):
doc = readConfig( self.unit_xml_template )
for item in doc["unit"]:
self.unit_dict[item] = ""
def get_course_detail( self, course_identifier ):
"""get course_detail and get the page_url_list"""
if exists( self.coursexmlfile):
doc = readConfig( self.coursexmlfile )
if doc.__len__()>0:
for node in doc["courses"]:
if node["courseidentifier"] == course_identifier:
if node["coursetype"]<>"":
self.settemplate( preview_dir=self.preview_dir, coursetype=node["coursetype"])
for item in node.keys():
try:
self.dict[item] = node[item]
except:
pass
interactivexmlfilepath = self.doc_root + course_identifier + sep + 'interactive_devices.xml'
helpxmlfilepath = self.doc_root + course_identifier + sep + 'help_devices.xml'
if exists( helpxmlfilepath ):
help_doc = readConfig( helpxmlfilepath )
if help_doc.has_key("help_devices"):
for item in help_doc["help_devices"]:
pass
"""get the page_url_list"""
self.url_dump_path = self.doc_root + self.dict["courseidentifier"] + sep + 'url_dump'
if not exists( self.url_dump_path ):
self.update_url_list()
FileHandle = open( self.url_dump_path, "r")
self.page_url_list = marshal.load( FileHandle )
FileHandle.close()
if exists( interactivexmlfilepath ):
interactive_doc = readConfig( interactivexmlfilepath )
if interactive_doc.has_key( "interactive_devices" ) and interactive_doc.__len__()>0:
for item in interactive_doc["interactive_devices"]:
if debug: print "item[0:8]:%s<br>\n" % item[0:8]
if item[0:8] =="orderby_": #save the field name, url_dump_path and a #
"""get the page_url_list"""
orderby_url_dump_path = self.url_dump_path + item[7:]
if debug: print "orderby_url_dump_path: %s<br/>\n"%orderby_url_dump_path
if exists( orderby_url_dump_path ):
FileHandle = open( orderby_url_dump_path, "r")
self.orderby[item[8:]] = marshal.load( FileHandle )
FileHandle.close()
def set_related_urls( self, publish_mode=0, current_url="" ):
if debug: print "page_url_list:%s <br>\n" % self.page_url_list
if debug: print "publish_mode:%d <br>\n" % publish_mode
if debug: print "current_url:%s <br>\n" % current_url
query_string = self.referer.split( "?" )[-1]
if debug: print "query_string: %s <br>\n" % query_string
if publish_mode==0:
if query_string.strip()=="":
return
query_items = query_string.split( "&" )
for item in query_items:
key, value = item.split( "=" )
if key.__contains__("identifier"):
current_url += "%s&" %item
self.content_dict[key]=value
current_url = current_url[0:-1] #get rid of last "&" char in url#
self.current_url = current_url
if self.current_url =="":
self.current_url = "."
if debug: print "current_url: %s <br>\n" % self.current_url
try:
i = 0
self.current_page = 0
for item in self.page_url_list:
if item[ publish_mode ] == self.current_url:
self.current_page = i
break
i = i + 1
if self.current_page <> i: #no match url#
if debug: print "current page not found"
return
if debug: print "current page: %s <br>\n" % self.current_page
if self.current_page == 0: #the first page#
if publish_mode==1:
self.content_dict["nextpage"] = self.page_url_list[1][1] + self.page_url_list[1][2]
else:
cmd = "cmd=view_" + self.page_url_list[ 1 ][0].split( "&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["nextpage"] = self.startcgi + "?" + cmd + "&" + self.page_url_list[ 1 ][0] + self.preview_cgi_string
self.content_dict["previouspage"]=""
if debug :
print "next page: %s <br>\n" % self.content_dict["nextpage"]
elif self.current_page<(self.page_url_list.__len__() - 1) :
if publish_mode==1:
self.content_dict["nextpage"] = "../"*(self.page_url_list[self.current_page][1].count("/")+1 ) +self.page_url_list[ self.current_page + 1 ][1] + self.page_url_list[ self.current_page + 1][2]
self.content_dict["previouspage"] = "../"*(self.page_url_list[self.current_page][1].count("/") +1 ) +self.page_url_list[ self.current_page - 1 ][1] +self.page_url_list[ self.current_page - 1 ][2]
else:
cmd = "cmd=view_" + self.page_url_list[ self.current_page + 1 ][0].split("&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["nextpage"] = self.startcgi + "?" + cmd + "&" + self.page_url_list[ self.current_page + 1 ][0] + self.preview_cgi_string
cmd = "cmd=view_" + self.page_url_list[ self.current_page -1 ][0].split( "&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["previouspage"] = self.startcgi + "?" + cmd + "&" + self.page_url_list[ self.current_page - 1 ][0] + self.preview_cgi_string
if debug :
print "next page: %s <br>\n" % self.content_dict["nextpage"]
print "previous page: %s <br>\n" % self.content_dict["previouspage"]
elif self.current_page==(self.page_url_list.__len__() - 1 ): #the last page#
if publish_mode==1:
self.content_dict["previouspage"] = "../"*( self.page_url_list[self.current_page][1].count("/") +1 ) +self.page_url_list[ self.current_page - 1 ][1] + self.page_url_list[ self.current_page - 1 ][2]
else:
cmd = "cmd=view_" + self.page_url_list[ self.current_page - 1 ][0].split( "&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["previouspage"] = self.startcgi + "?" + cmd + "&" + self.page_url_list[ self.current_page - 1 ][0] + self.preview_cgi_string
self.content_dict["nextpage"]=""
if debug :
print "previous page: %s <br>\n" % self.content_dict["previouspage"]
else:
if debug: print "No previous, next page<br>\n"
except:
pass
if self.orderby.__len__() >0:
for item in self.orderby:
tmp_current_page = 0
for item1 in self.orderby[item]:
if debug: print item1[0] + ', ' + item1[1] + ', '+item1[2]+ "<br>\n"
if item1[publish_mode+1]==self.current_url:
if debug:
print "order_by_%s current_page:%s<br>\n" % ( item, tmp_current_page )
if tmp_current_page == 0 :
if publish_mode==1:
self.content_dict["nextpage_orderby_%s" % item ] = "../"*(self.orderby[item][1][2].count("/")+1 ) +self.orderby[item][1][2] + self.orderby[item][1][3]
else:
cmd = "cmd=view_" + self.orderby[item][ tmp_current_page + 1 ][1].split("&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["nextpage_orderby_%s" % item ] = self.startcgi + "?" + cmd + "&" + self.orderby[item][1][1] + self.preview_cgi_string
self.content_dict["previouspage_orderby_%s" %item ] = ""
elif tmp_current_page < ( self.orderby[item].__len__() - 1 ):
if publish_mode==1:
self.content_dict[ "nextpage_orderby_%s" % item ] = "../"*(self.orderby[item][tmp_current_page][2].count("/")+1 ) +self.orderby[item][ tmp_current_page + 1 ][2] + self.orderby[item][ tmp_current_page + 1][3]
self.content_dict[ "previouspage_orderby_%s" %item ] = "../"*(self.orderby[item][tmp_current_page][2].count("/") +1 ) +self.orderby[item][ tmp_current_page - 1 ][2] +self.orderby[item][ tmp_current_page - 1 ][3]
else:
cmd = "cmd=view_" + self.orderby[item][ tmp_current_page + 1 ][1].split("&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict[ "nextpage_orderby_%s" % item ] = self.startcgi + "?" + cmd + "&" + self.orderby[item][tmp_current_page+1][1] + self.preview_cgi_string
cmd = "cmd=view_" + self.orderby[item][ tmp_current_page - 1 ][1].split("&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["previouspage_orderby_%s" % item ] = self.startcgi + "?" + cmd + "&" + self.orderby[item][tmp_current_page - 1 ][1] + self.preview_cgi_string
else:
if publish_mode==1:
self.content_dict["previouspage_orderby_%s" %item ] = "../"*(self.orderby[item][tmp_current_page][2].count("/") +1 ) +self.orderby[item][ tmp_current_page - 1 ][2] +self.orderby[item][ tmp_current_page - 1 ][3]
else:
cmd = "cmd=view_" + self.orderby[item][ tmp_current_page - 1 ][1].split( "&" )[-1].split( "=" )[0].replace( "identifier","" )
self.content_dict["previouspage_orderby_%s" % item ] = self.startcgi + "?" + cmd + "&" + self.orderby[item][tmp_current_page - 1 ][1] + self.preview_cgi_string
self.content_dict[ "nextpage_orderby_%s" % item ] = ""
if debug :
print "next page orderby_%s: %s <br>\n" % ( item, self.content_dict["nextpage_orderby_%s"%item] )
print "previous page orderby_%s: %s <br>\n" % ( item, self.content_dict["previouspage_orderby_%s" % item] )
break
else:
tmp_current_page += 1
def get_topic_detail( self, topicidentifier ):
"""given course identifier and topic_identifier to decide where and read the topics.xml
"""
course_topicxmlfile = self.doc_root + self.dict["courseidentifier"] + '/' + self.topicxmlfile
try:
if exists( course_topicxmlfile ):
self.read_topic_dict()
doc = readConfig( course_topicxmlfile )
if doc.__len__()>0:
for node in doc["topics"]:
if node["topicidentifier"] == topicidentifier:
for item in self.topic_dict.keys():
try:
self.topic_dict[item] = node[item]
except:
pass
break
else:
print "Error, can't get the detail of this topic"
except:
print "Error, can't get the detail of this topic"
return
def get_section_detail( self, sectionidentifier ):
"""given courseidentifier, topicidentifier and sectionidentifier to decide where and read the sections.xml
"""
course_sectionxmlfile = self.doc_root + self.topic_dict["courseidentifier"] + '/'+ self.topic_dict["topicidentifier"]+'/'+ self.sectionxmlfile
try:
if exists( course_sectionxmlfile ):
self.read_section_dict()
doc = readConfig( course_sectionxmlfile )
if doc.__len__()>0:
for node in doc["sections"]:
if node["sectionidentifier"] == sectionidentifier:
for item in self.section_dict.keys():
try:
self.section_dict[item] = node[item]
except:
pass
break
else:
print "Error, can't get the detail of this section"
except:
print "Error, can't get the detail of this section"
return
def get_unit_detail( self, unitidentifier ):
"""given courseidentifier, topicidentifier, topicidentifier and unitidentifier to decide where and read the units.xml
"""
course_unitxmlfile = self.doc_root + self.section_dict["courseidentifier"] + '/'+ self.section_dict["topicidentifier"]+'/'+ self.section_dict["sectionidentifier"] + '/' + self.unitxmlfile
try:
if exists( course_unitxmlfile ):
self.read_unit_dict()
doc = readConfig( course_unitxmlfile )
if doc.__len__()>0:
for node in doc["units"]:
if node["unitidentifier"] == unitidentifier:
for item in self.unit_dict.keys():
try:
self.unit_dict[item] = node[item]
except:
pass
break
else:
print "Error, can't get the detail of this unit"
except:
print "Error, can't get the detail of this unit"
return
def create_manifest_xml( self ):
"""Generate imsmanifest.xml for content packaging"""
imsmanifest_xml = self.doc_root + "manifest/imsmanifest_xml.template"
metadata_xml = self.doc_root + "manifest/metadata_xml.template"
organizations_xml = self.doc_root + "manifest/organizations_xml.template"
organization_xml = self.doc_root + "manifest/organization_xml.template"
resources_xml = self.doc_root + "manifest/resources_xml.template"
resource_xml = self.doc_root + "manifest/resource_xml.template"
item_xml = self.doc_root + "manifest/item_xml.template"
unit_item_xml = self.doc_root + "manifest/unit_item_xml.template"
organizations_dict = { "default":"", "organization":"" }
resources_dict = {"resource":""}
manifest_dict = {}
manifest_dict["metadata"] = self.xml_string( metadata_xml, self.dict )
manifest_dict["organizations"] = ""
manifest_dict["resources"] = ""
organization = 1
item = 0
resource = 0
course_topicxmlfile = self.doc_root + self.dict["courseidentifier"] + '/' + self.topicxmlfile
if exists( course_topicxmlfile ):
organizations_dict["default"] = "TOC1"
organizations_dict["organization"] = ""
doc = readConfig( course_topicxmlfile )
if doc.__len__()>0:
for node in doc["topics"]:
organization_dict = {}
resource_dict = {}
organization_dict["identifier"] = "TOC%d" % organization
organization += 1
resource += 1
organization_dict["idref"] = "RESOURCE%d" % resource
organization_dict["title"] = node["title"]
organization_dict["item"] = ""
resource_dict["identifier"] = organization_dict["idref"]
resource_dict["href"] = resource_dict["filehref"] = "%s.htm" % node["topicidentifier"]
resources_dict["resource"] += self.xml_string( resource_xml, resource_dict )
course_sectionxmlfile = self.doc_root + self.dict["courseidentifier"] + '/' + node["topicidentifier"] + '/' + self.sectionxmlfile
if exists( course_sectionxmlfile ):
doc_section = readConfig( course_sectionxmlfile )
if doc_section.__len__()>0:
for node_section in doc_section["sections"]:
section_item_dict = {}
section_item_dict["title"] = node_section["title"]
item += 1
section_item_dict["identifier"] = "ITEM%d" % ( item )
resource += 1
section_item_dict["idref"] = "RESOURCE%d" %( resource )
section_item_dict["title"]= node_section["title"]
section_item_dict["item"] = ""
resource_dict["identifier"] = section_item_dict["idref"]
resource_dict["href"] = resource_dict["filehref"] = "%s/%s.htm" % ( node["topicidentifier"], node_section["sectionidentifier"] )
resources_dict["resource"] += self.xml_string( resource_xml, resource_dict )
course_unitxmlfile = self.doc_root + self.dict["courseidentifier"] + '/' + node["topicidentifier"] + '/' + node_section["sectionidentifier"] +'/'+ self.unitxmlfile
if exists( course_unitxmlfile ):
doc_unit = readConfig( course_unitxmlfile )
try:
for node_unit in doc_unit["units"]:
unit_item_dict = {}
unit_item_dict["title"] = node_unit["title"]
item += 1
unit_item_dict["identifier"] = "ITEM%d" % ( item )
resource += 1
unit_item_dict["idref"] = "RESOURCE%d" %( resource )
section_item_dict["item"] += self.xml_string( unit_item_xml, unit_item_dict )
resource_dict["identifier"] = unit_item_dict["idref"]
resource_dict["href"] = resource_dict["filehref"] = "%s/%s/%s.htm" % ( node["topicidentifier"], node_section["sectionidentifier"], node_unit["unitidentifier"] )
resources_dict["resource"] += self.xml_string( resource_xml, resource_dict )
except:
pass
organization_dict["item"] += self.xml_string( item_xml, section_item_dict )
organizations_dict["organization"] += self.xml_string( organization_xml, organization_dict )
manifest_dict["resources"] = self.xml_string( resources_xml, resources_dict )
manifest_dict["organizations"] = self.xml_string( organizations_xml, organizations_dict )
manifest_string = self.xml_string( imsmanifest_xml, manifest_dict )
targetxmlfile = self.doc_root + self.dict["courseidentifier"] + '/imsmanifest.xml'
f = open( targetxmlfile ,"w" )
f.write( "%s" %manifest_string )
f.flush()
f.close
def max_identifier( self, xmlfile, root_name, identifier_name ):
if exists( xmlfile ):
try:
doc = readConfig( xmlfile )
except:
doc = None
import locale
maxidentifier = 0
if exists( xmlfile ) and doc:
if doc.__len__()>0:
for node in doc[root_name]:
t = locale.atoi( '%s' %( node[identifier_name]) )
if t > maxidentifier:
maxidentifier = t
return maxidentifier
def create_dir( self, tmpidentifier ):
if tmpidentifier<>"":
if exists( tmpidentifier ) :
print "Error, %s directory already exists" % tmpidentifier
return 0
else:
try:
mkdir( tmpidentifier )
try:
tmp_img_dir = tmpidentifier + '/images/'
tmp_file_dir = tmpidentifier + '/files/'
mkdir( tmp_img_dir )
mkdir( tmp_file_dir )
return 1
except:
print "Error while creating directory %s" %tmp_img_dir
return 1
except:
print "Error while creating course directory %s" %( tmpidentifier)
return 0
else:
return 0
def process_graphic( self, form, target_dir , graphic, new_graphic ):
graphic_file = form[graphic].value
try:
fs = form[new_graphic]
if fs.file and fs.filename<>"":
import tempfile
tmpfile = tempfile.mktemp()
f = open(tmpfile, 'wb')
while 1:
line = fs.file.readline()
if line:
f.write(line)
else:
break
f.close()
import re
reslash = re.compile( r'\\')
graph_name_list = reslash.split( fs.filename )
fs.filename = graph_name_list[-1]
targetfile = target_dir + fs.filename
import shutil
try:
shutil.copyfile( tmpfile, targetfile )
graphic_file = fs.filename
except:
print "Error while creating upload file"
else:
pass
except:
pass
return graphic_file
def process_file( self, form, target_dir , processfile, new_processfile ):
resultfile = form[processfile].value
try:
fs = form[new_processfile]
if fs.file and fs.filename<>"":
import tempfile
tmpfile = tempfile.mktemp()
f = open(tmpfile, 'wb')
while 1:
line = fs.file.readline()
if line:
f.write(line)
else:
break
f.close()
import re
reslash = re.compile( r'\\')
graph_name_list = reslash.split( fs.filename )
fs.filename = graph_name_list[-1]
targetfile = target_dir + fs.filename
import shutil
try:
shutil.copyfile( tmpfile, targetfile )
resultfile = fs.filename
if debug: print "UPload file::%s" % resultfile
except:
print "Error while creating upload file"
else:
pass
except:
pass
return resultfile
def show_course_topics( self, ret_str=0, for_topic_tree=0, publish_course=0, for_course="", for_menu=0, for_url_list=0, publish_mode=0 ):
if self.dict["courseidentifier"]:
topicxmlfilepath = self.doc_root + self.dict["courseidentifier"] + '/topics.xml'
else:
print "Error while processing %s course topic <br>\n" % ( self.dict["title"] )
return
if not self.content_dict.has_key("sectionidentifier") and self.content_dict.has_key("topicidentifier"):
check_active = 1
else:
check_active = 0
x = ""
if exists( topicxmlfilepath ):
if debug:
print "preview_cgi_string=%s<br>\n" %( self.preview_cgi_string )
doc = readConfig( topicxmlfilepath )
if doc.has_key( "topics" ):
x+= "<ul>\n"
if publish_course<>0: ##"""show the topic links in the content"""
for node in doc["topics"]:
self.get_topic_detail( node["topicidentifier"] )
if self.dict["coursetype"]=="hebrew":
tmp_topic_sections = ""
else:
tmp_topic_sections = self.show_topic_sections( ret_str=1, for_topic_tree=1, publish_course="%s" %publish_course, for_course="%s"%for_course )
if tmp_topic_sections<>"":
tmp_span=">>"
else:
tmp_span=""
x = x +'<li><a href="%s/topic%s.html">%s %s</a>\n'\
% ( node["topicidentifier"], node["topicidentifier"], node["title"], tmp_span )
if for_topic_tree>0:
x += tmp_topic_sections
x += "</li>\n"
elif self.preview_cgi_string =="" and for_menu==0: ##"""edit mode"""
for node in doc["topics"]:
x = x +'<p><a href="%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s">%s</a>\
<a href="%s?cmd=up_topic&courseidentifier=%s&topicidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_topic&courseidentifier=%s&topicidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_topic&courseidentifier=%s&topicidentifier=%s">edit</a> \
<a href="%s?cmd=delete_topic&courseidentifier=%s&topicidentifier=%s">delete</a> \
</p>\n' % ( self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["title"], \
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"] )
if for_topic_tree>0:
self.get_topic_detail( node["topicidentifier"] )
x += self.show_topic_sections( ret_str=1, for_topic_tree=1 )
else: ##"""preview mode or publish mode for leftnav_string """
if publish_mode<>0:
if self.current_page ==0 :
tmp_padding =""
else:
tmp_padding = "../"*(self.page_url_list[self.current_page][1].count("/")+1 )
for node in doc["topics"]:
self.get_topic_detail( node["topicidentifier"] )
if self.dict["coursetype"]=="hebrew":
tmp_topic_sections = ""
else:
tmp_topic_sections = self.show_topic_sections( ret_str=1, for_topic_tree=1, for_menu=1, publish_mode=publish_mode )
if tmp_topic_sections<>"":
tmp_span=">>"
else:
tmp_span=""
if publish_mode==0:
if check_active and self.content_dict["topicidentifier"]==node["topicidentifier"] :
x = x + """<li id="active">"""
else:
x = x + "<li>"
x = x +'<a href="%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s%s">%s %s</a>\n'\
% ( self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], self.preview_cgi_string, node["title"], tmp_span )
else: #generating leftnav_string#
if ( tmp_span=="" and self.content_dict.has_key("topicidentifier") and self.content_dict["topicidentifier"]==node["topicidentifier"] ) or self.page_url_list[self.current_page][1]==node["topicidentifier"] :
x = x + """<li id="active">"""
else:
x = x + "<li>"
x = x +'<a href="%s%s/topic%s.html">%s %s</a>\n' % ( tmp_padding ,node["topicidentifier"], node["topicidentifier"], node["title"], tmp_span)
if for_topic_tree>0:
x += tmp_topic_sections
x += "</li>\n"
x+= "</ul>\n"
if ret_str:
return x
else:
print x
def show_orderby( self, ret_str=0, for_topic_tree=0, publish_course=0, for_course="", for_menu=0, for_url_list=0, orderby="" ):
tmp_url_dump_path = self.doc_root + self.dict["courseidentifier"] + sep + 'url_dump_' + orderby
if debug: print "tmp_url_dump_path:%s<br>\n" %tmp_url_dump_path
if not exists( tmp_url_dump_path ):
if debug: print "tmp_url_dump_path: Not exit<br>\n"
return ""
FileHandle = open( tmp_url_dump_path, "r")
tmp_page_url_list = marshal.load( FileHandle )
FileHandle.close()
if debug: print "tmp_page_url_list:%s<br>\n" % tmp_page_url_list
x = ""
whitespace = " "*8*for_topic_tree
for item in tmp_page_url_list:
node = {}
node["title"] = item[0].encode()
tmp_url = item[1].encode()
args = tmp_url.split("&")
for arg in args:
key, value = arg.split("=")
node[key] = value
x+= "<ul>\n"
if publish_course<>0: ##"""publish mode"""
if node.has_key("unitidentifier"):
x = x +'<li><a href="%s/%s/%s/unit%s.html">%s</a>\n'\
% ( node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"], node["unitidentifier"], node["title"] )
elif node.has_key("sectionidentifier"):
x = x +'<li><a href="%s/%s/section%s.html">%s</a>\n'\
% ( node["topicidentifier"], node["sectionidentifier"], node["sectionidentifier"], node["title"] )
elif node.has_key("topicidentifier"):
x = x +'<li><a href="%s/topic%s.html">%s</a>\n'\
% ( node["topicidentifier"], node["topicidentifier"], node["title"] )
x += "</li>\n"
elif self.preview_cgi_string =="" and for_menu==0: ##"""edit mode"""
if node.has_key("unitidentifier"):
x = x + ' <p>%s<a href="%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">%s</a>\
<a href="%s?cmd=up_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">edit</a> \
<a href="%s?cmd=delete_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">delete</a> \
</p>\n' % ( whitespace, self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"],node["title"], \
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"] )
elif node.has_key("sectionidentifier"):
x = x + ' <p>%s<a href="%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">%s</a>\
<a href="%s?cmd=up_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">edit</a> \
<a href="%s?cmd=delete_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">delete</a> \
</p>\n' % ( whitespace, self.startcgi, self.dict["courseidentifier"] ,node["topicidentifier"],node["sectionidentifier"],node["title"], \
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],node["sectionidentifier"] )
elif node.has_key("topicidentifier"):
x = x +'<p><a href="%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s">%s</a>\
<a href="%s?cmd=up_topic&courseidentifier=%s&topicidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_topic&courseidentifier=%s&topicidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_topic&courseidentifier=%s&topicidentifier=%s">edit</a> \
<a href="%s?cmd=delete_topic&courseidentifier=%s&topicidentifier=%s">delete</a> \
</p>\n' % ( self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["title"], \
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"],\
self.startcgi, self.dict["courseidentifier"], node["topicidentifier"] )
else: ##"""preview mode """
if node.has_key("unitidentifier"):
x = x + ' <li><a href="%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s%s">%s</a></li> \n'\
% ( self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], node["sectionidentifier"], node["unitidentifier"], self.preview_cgi_string, node["title"] )
elif node.has_key("sectionidentifier"):
x = x + ' <li><a href="%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s%s">%s</a>\n'\
% ( self.startcgi, self.dict["courseidentifier"] , node["topicidentifier"],node["sectionidentifier"], self.preview_cgi_string, node["title"] )
elif node.has_key("topicidentifier"):
x = x +'<li><a href="%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s%s">%s</a>\n'\
% ( self.startcgi, self.dict["courseidentifier"], node["topicidentifier"], self.preview_cgi_string, node["title"] )
x += "</li>\n"
x+= "</ul>\n"
if ret_str:
return x
else:
print x
def show_course_content(self, return_str=0, return_content=0, orderby="" ):
if exists( self.course_content_template ):
content = self.xml_string( self.course_content_template, self.dict )
if self.preview_cgi_string=="":
adminlink = '<center><a href="start.pyg?cmd=edit_course&courseidentifier=%s">edit course info</a></center><br>\n' % ( self.dict["courseidentifier"] )
content = adminlink + content
if self.dict["coursetype"]=="hebrew":
content = content + '<center><br><a href="start.pyg?cmd=edit_topic&courseidentifier=%s">Add a new word</a></center><br>\n' %(self.dict["courseidentifier"])
else:
content = content + '<center><br><a href="start.pyg?cmd=edit_topic&courseidentifier=%s">Add a new topic</a></center><br>\n' %(self.dict["courseidentifier"])
if return_content:
return content
else:
if orderby<>"":
x = "<center><H1>Show words by %s</H1><p></center>" % orderby + str( self.show_orderby( ret_str=1, orderby="%s"%orderby ) )
else:
x = str( self.show_course_topics( 1 ) )
content = content + x
if self.preview_dir=="":
heading = self.dict["title"]
else:
heading = self.dict["title"] + " <span style='color: #ccc;'>Preview</span>" #bas: lightens preview tag
crumb = ""
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
speakit = self.speakitcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s%s" % ( self.dict["courseidentifier"], self.preview_cgi_string )
to_parse_h3 = 0
try:
if self.dict["parse_h3"]=="yes":
to_parse_h3=1
except:
pass
if return_str:
return self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, return_str=1, parse_h3=to_parse_h3 )
else:
self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, parse_h3=to_parse_h3 )
else:
print "Error:template file:%s not exist" %self.course_content_template
def show_topic_content(self, return_str=0, return_content=0 ):
if exists( self.topic_content_template ):
if debug:
print "topic_content_template:%s <br>\n" % self.topic_content_template
content = self.xml_string( self.topic_content_template, self.topic_dict )
if self.preview_cgi_string=="":
if self.dict["coursetype"]=="hebrew": #ugly, will change in new version#
adminlink = '<center><a href="start.pyg?cmd=edit_topic&courseidentifier=%s&topicidentifier=%s">edit this word</a></center><br>\n'\
% ( self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"])
else:
adminlink = '<center><a href="start.pyg?cmd=edit_topic&courseidentifier=%s&topicidentifier=%s">edit this topic</a></center><br>\n'\
% ( self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"])
content = adminlink + content
id_string = "courseidentifier=%s&topicidentifier=%s" %( self.topic_dict["courseidentifier"],self.topic_dict["topicidentifier"] )
if self.dict["coursetype"]<>"hebrew":
content += self.show_idevices_option( id_string )
if self.dict["coursetype"]<>"hebrew":
content += "\n<br>" + self.show_idevices("topic")
if self.preview_cgi_string=="":
if self.dict["coursetype"]=="hebrew":
content += '\n<center><br><a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s">Add a new word</a></center><br>\n' %(self.startcgi, self.dict["courseidentifier"], self.topic_dict["topicidentifier"])
else:
content += '\n<center><br><a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s">Add a new section</a></center><br>\n' %(self.startcgi, self.dict["courseidentifier"], self.topic_dict["topicidentifier"])
if not return_content:
if self.dict["coursetype"]<>"hebrew":
x = str( self.show_topic_sections( 1 ) )
content = content + x
else:
return content
heading = ""
crumb = "<a href=%s?cmd=view_course&courseidentifier=%s%s>%s</a> -> %s \n" % ( self.startcgi, self.dict["courseidentifier"], self.preview_cgi_string, self.dict["title"], self.topic_dict["title"])
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
speakit = self.speakitcgi + "?courseidentifier=%s&topicidentifier=%s" % ( self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"] )
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s%s" % ( self.dict["courseidentifier"], self.preview_cgi_string )
to_parse_h3 = 0
try:
if self.dict["parse_h3"]=="yes":
to_parse_h3=1
except:
pass
if return_str:
return self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, return_str=1, parse_h3=to_parse_h3 )
else:
self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, parse_h3=to_parse_h3 )
else:
print "Error:topic template file:%s not exist" %self.topic_content_template
def show_section_content(self, return_str=0, return_content=0 ):
if exists( self.section_content_template ):
content = self.xml_string( self.section_content_template, self.section_dict )
if debug:
print "content:%s <br>\n" %content
if self.preview_cgi_string=="":
if self.dict["coursetype"]=="hebrew":
adminlink = '<center><a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">edit this word</a></center><br>\n'\
% ( self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"])
else:
adminlink = '<center><a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">edit this section</a></center><br>\n'\
% ( self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"])
content = adminlink + content
id_string = "courseidentifier=%s&topicidentifier=%s§ionidentifier=%s" %( self.section_dict["courseidentifier"],self.section_dict["topicidentifier"],self.section_dict["sectionidentifier"] )
if self.dict["coursetype"]<>"hebrew":
content = content + self.show_idevices_option( id_string )
content += "\n<br>" + self.show_idevices("section")
if self.preview_cgi_string=="":
if self.dict["coursetype"]=="hebrew":
content += '<center><br><a href="%s?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">Add a new word</a></center><br>\n' %(self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"])
else:
content += '<center><br><a href="%s?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">Add a new unit</a></center><br>\n' %(self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"])
if not return_content:
if self.dict["coursetype"]<>"hebrew":
content += str( self.show_section_units( 1 ) )
else:
return content
heading = ""
crumb = "<a href=%s?cmd=view_course&courseidentifier=%s%s>%s</a> -> <a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s%s>%s</a> -> %s \n" \
% ( self.startcgi, self.dict["courseidentifier"], self.preview_cgi_string, self.dict["title"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], self.preview_cgi_string, self.topic_dict["title"],\
self.section_dict["title"])
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
speakit = self.speakitcgi + "?courseidentifier=%s&topicidentifier=%s§ionidentifier=%s" % ( self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"] )
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s%s" % ( self.dict["courseidentifier"], self.preview_cgi_string )
to_parse_h3 = 0
try:
if self.dict["parse_h3"]=="yes":
to_parse_h3=1
except:
pass
if return_str:
return self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, return_str=1, parse_h3=to_parse_h3 )
else:
self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, parse_h3=to_parse_h3 )
else:
print "Error:section template file:%s not exist" %self.section_content_template
def show_unit_content(self, return_str=0, return_content=0 ):
if exists( self.unit_content_template ):
content = self.xml_string( self.unit_content_template, self.unit_dict )
if self.preview_cgi_string =="":
if self.dict["coursetype"]=="hebrew":
adminlink = '<center><a href="start.pyg?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">edit this word</a></center><br>\n'\
% ( self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"], self.unit_dict["unitidentifier"])
else:
adminlink = '<center><a href="start.pyg?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">edit this unit</a></center><br>\n'\
% ( self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"], self.unit_dict["unitidentifier"])
content = adminlink + content
id_string = "courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s" %( self.unit_dict["courseidentifier"],self.unit_dict["topicidentifier"],self.unit_dict["sectionidentifier"],self.unit_dict["unitidentifier"] )
if self.dict["coursetype"]<>"hebrew":
content = content + self.show_idevices_option( id_string )
content += "\n<br>" + self.show_idevices("unit")
heading = ""
crumb = "<a href=%s?cmd=view_course&courseidentifier=%s%s>%s</a> ->\
<a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s%s>%s</a>->\
<a href=%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s%s>%s</a>\
-> %s\n" \
% ( self.startcgi, self.dict["courseidentifier"], self.preview_cgi_string, self.dict["title"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"],self.preview_cgi_string, self.topic_dict["title"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], self.preview_cgi_string, self.section_dict["title"],\
self.unit_dict["title"])
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
speakit = self.speakitcgi + "?courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s" % ( self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"], self.unit_dict["unitidentifier"] )
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s%s" % ( self.dict["courseidentifier"], self.preview_cgi_string )
to_parse_h3 = 0
try:
if self.dict["parse_h3"]=="yes":
to_parse_h3=1
except:
pass
if return_content:
return content
elif return_str:
return self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, return_str=1, parse_h3=to_parse_h3 )
else:
self.showexe( self.theme_template, heading, content, crumb, preview, outline, speakit, parse_h3=to_parse_h3 )
else:
print "Error:unit template file:%s not exist" %self.unit_content_template
def view_course( self, form, return_str=0, return_content=0 ):
"""Take an course file name as a parameter and creates and displays an course object for it"""
try:
course_identifier = form["courseidentifier"].value
self.get_course_detail( course_identifier )
if form.has_key("orderby"):
order_key = form["orderby"].value
else:
order_key = ""
if debug: print "order_key:%s <br />\n" % order_key
except:
print "Can't get the course detail \n"
return
if return_str:
return self.show_course_content( return_str=1 )
elif return_content: #return the content for text to speech#
return self.show_course_content( return_content=1 )
else:
self.show_course_content( return_str, orderby="%s" % order_key )
def view_topic( self, form, return_str=0, return_content=0 ):
"""Take an course file name as a parameter and creates and displays an course object for it"""
courseidentifier = form["courseidentifier"].value
topicidentifier = form["topicidentifier"].value
self.get_course_detail( courseidentifier )
self.get_topic_detail( topicidentifier )
if return_str:
return self.show_topic_content( return_str=1 )
elif return_content:
return self.show_course_content( return_content=1 )
else:
self.show_topic_content( return_str )
def view_section( self, form, return_str=0, return_content=0 ):
courseidentifier = form["courseidentifier"].value
topicidentifier = form["topicidentifier"].value
sectionidentifier = form["sectionidentifier"].value
self.get_course_detail( courseidentifier )
self.get_topic_detail( topicidentifier )
self.get_section_detail( sectionidentifier )
if return_str:
return self.show_section_content( return_str=1 )
elif return_content:
return self.show_course_content( return_content=1 )
else:
self.show_section_content( return_str )
def view_unit( self, form, return_str=0, return_content=0 ):
self.get_course_detail( form["courseidentifier"].value )
self.get_topic_detail( form["topicidentifier"].value )
self.get_section_detail( form["sectionidentifier"].value )
self.get_unit_detail( form["unitidentifier"].value )
if return_str:
return self.show_unit_content( return_str=1 )
elif return_content:
return self.show_course_content( return_content=1 )
else:
self.show_unit_content( return_str )
def show_topic_sections( self, ret_str = 0, for_topic_tree=0, publish_course=0, for_course="", for_topic="", for_menu=0, publish_mode=0):
self.sectionxmlfilepath = self.doc_root + self.topic_dict["courseidentifier"] + '/'+ self.topic_dict["topicidentifier"]+'/'+ self.sectionxmlfile
x = ""
whitespace = " "*8*for_topic_tree
if not self.content_dict.has_key("unitidentifier") and self.content_dict.has_key("sectionidentifier") :
check_active = 1
else:
check_active = 0
if exists( self.sectionxmlfilepath ):
doc = readConfig( self.sectionxmlfilepath )
if doc.has_key("sections"):
if self.content_dict.has_key("topicidentifier"):
if self.topic_dict["topicidentifier"]==self.content_dict["topicidentifier"]:
x += """ <ul id="submenu_topic">\n"""
else:
x += """ <ul id="hidden">\n"""
else:
x += """ <ul id="hidden">\n"""
if publish_course<>0: ##"""publish mode"""
for node in doc["sections"]:
self.get_section_detail( node["sectionidentifier"] )
tmp_section_units = self.show_section_units(ret_str=1, for_topic_tree=1, publish_course="%s" % publish_course, for_course="%s"%for_course, for_topic="%s" %for_topic, publish_mode=publish_mode )
if tmp_section_units<>"":
tmp_span= ">>"
else:
tmp_span=""
if check_active and self.content_dict["sectionidentifier"]==node["sectionidentifier"]:
x = x + """ <li id="active">"""
else:
x = x + " <li>"
if for_course<>"":
x = x + '<a href="%s/%s/section%s.html">%s</a> \n'\
% ( self.topic_dict["topicidentifier"], node["sectionidentifier"] , node["sectionidentifier"],node["title"] )
if for_topic<>"":
x = x + '<a href="%s/section%s.html">%s\n'\
% ( node["sectionidentifier"] , node["sectionidentifier"],node["title"] )
else:
x = x + '<a href="%s/%s/section%s.html">%s %s</a> \n'\
% ( self.topic_dict["topicidentifier"], node["sectionidentifier"], node["sectionidentifier"],node["title"], tmp_span )
if for_topic_tree>0:
x += tmp_section_units
x += " </li>\n"
elif self.preview_cgi_string=="" and for_menu==0: ##"""edit_mode"""
for node in doc["sections"]:
x = x + ' <p>%s<a href="%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">%s</a>\
<a href="%s?cmd=up_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">edit</a> \
<a href="%s?cmd=delete_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s">delete</a> \
</p>\n' % ( whitespace, self.startcgi, self.topic_dict["courseidentifier"] ,self.topic_dict["topicidentifier"],node["sectionidentifier"],node["title"], \
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"],node["sectionidentifier"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"],node["sectionidentifier"] )
if for_topic_tree>0:
self.get_section_detail( node["sectionidentifier"] )
x += self.show_section_units(ret_str=1, for_topic_tree=1 )
else: ##"""preview_mode"""
if debug:
print "publish_mode:%s<br>\n" % publish_mode
if publish_mode<>0:
if self.current_page ==0 :
tmp_padding =""
else:
tmp_padding = "../"*(self.page_url_list[self.current_page][1].count("/")+1 )
if debug:
print "tmp_padding in publish unit:%s<br>\n" % tmp_padding
for node in doc["sections"]:
self.get_section_detail( node["sectionidentifier"] )
tmp_section_units = self.show_section_units(ret_str=1, for_topic_tree=1, for_menu=1, publish_mode=publish_mode )
if tmp_section_units<>"":
tmp_span= ">>"
else:
tmp_span=""
if publish_mode==0:
if check_active and self.content_dict["sectionidentifier"]==node["sectionidentifier"]:
x = x + """ <li id="active">"""
else:
x = x + " <li>"
x = x + '<a href="%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s%s">%s %s</a>\n'\
% ( self.startcgi, self.topic_dict["courseidentifier"] ,self.topic_dict["topicidentifier"],node["sectionidentifier"], self.preview_cgi_string, node["title"], tmp_span )
else:
if self.page_url_list[self.current_page][1].count("/")==1 and self.page_url_list[self.current_page][1]=="%s/%s"%(self.topic_dict["topicidentifier"],node["sectionidentifier"] ) :
x = x + """<li id="active">"""
else:
x = x + "<li>"
x = x +'<a href="%s%s/%s/section%s.html">%s %s</a>\n' % ( tmp_padding, self.topic_dict["topicidentifier"] ,node["sectionidentifier"], node["sectionidentifier"], node["title"], tmp_span)
if for_topic_tree>0:
x += tmp_section_units
x += " </li>\n"
x += " </ul>\n"
if ret_str:
return x
else:
print x
def show_section_units( self, ret_str = 0, for_topic_tree=0, publish_course=0, for_course="", for_topic="" , for_section="", for_menu=0, publish_mode=0 ):
self.unitxmlfilepath = self.doc_root + self.section_dict["courseidentifier"] + '/'+ self.section_dict["topicidentifier"]+'/'+ self.section_dict["sectionidentifier"]+'/'+ self.unitxmlfile
if debug: print "in show_section_units: unixxmlfile:%s<br>\n" %self.unitxmlfilepath
whitespace = " "*16*for_topic_tree
x = ""
if exists( self.unitxmlfilepath ):
doc = readConfig( self.unitxmlfilepath )
if doc.has_key("units"):
if self.content_dict.has_key("sectionidentifier"):
if self.section_dict["sectionidentifier"]==self.content_dict["sectionidentifier"]:
x += """ <ul id="submenu_section">\n"""
else:
x += """ <ul id="hidden">\n"""
else:
x += """ <ul id="hidden">\n"""
if publish_course<>0 and publish_mode==0: ##"""publish mode"""
for node in doc["units"]:
if self.content_dict.has_key("unitidentifier") and self.content_dict["unitidentifier"]==node["unitidentifier"]:
x = x + """ <li id="active">"""
else:
x = x + " <li>"
if for_section<>"":
x = x + '<a href="%s/unit%s.html">%s</a></a> \n'\
% ( node["unitidentifier"], node["unitidentifier"], node["title"] )
elif for_topic<>"":
x = x + '<a href="%s/%s/unit%s.html">%s</a> \n'\
% ( self.section_dict["sectionidentifier"], node["unitidentifier"], node["unitidentifier"], node["title"] )
elif for_course<>"":
x = x + '<a href="%s/%s/%s/unit%s.html">%s</a>\n'\
% ( self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"], node["unitidentifier"], node["title"] )
else:
x = x + '<a href="%s/%s/%s/unit%s.html">%s</a>\n'\
% ( self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"],node["unitidentifier"], node["title"] )
x+= " </li>\n"
elif self.preview_cgi_string=="" and for_menu==0 and publish_mode==0: ##"""edit mode"""
for node in doc["units"]:
x = x + ' <p>%s<a href="%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">%s</a>\
<a href="%s?cmd=up_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">edit</a> \
<a href="%s?cmd=delete_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s">delete</a> \
</p>\n' % ( whitespace, self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"],node["title"], \
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"] )
else: ##"""previewmode"""
if debug: print "in show_section_units, publish_mode=%d, current_page=%d <br><br>\n" % (publish_mode, self.current_page)
tmp_padding = ""
if publish_mode<>0:
if self.current_page ==0 :
tmp_padding =""
else:
tmp_padding = "../"*(self.page_url_list[self.current_page][1].count("/")+1 )
if debug: print "tmp_padding=%s <br><br>\n" % tmp_padding
for node in doc["units"]:
if debug: print "unixidentifier=%s <br><br>\n" % node["unitidentifier"]
if publish_mode==0:
if self.content_dict.has_key("unitidentifier") and self.content_dict["unitidentifier"]==node["unitidentifier"]:
x = x + """ <li id="active">"""
else:
x = x + " <li>"
x = x + '<a href="%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s%s">%s</a></li> \n'\
% ( self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["unitidentifier"], self.preview_cgi_string, node["title"] )
else:
if debug: print " ooooooo...<br>\n"
if self.page_url_list[self.current_page][1].count("/")==2 and self.page_url_list[self.current_page][1]=="%s/%s/%s"%(self.topic_dict["topicidentifier"],self.section_dict["sectionidentifier"], node["unitidentifier"]) :
x = x + """<li id="active">"""
x = x +'<a href="%s%s/%s/%s/unit%s.html">%s</a>\n' % ( tmp_padding, self.topic_dict["topicidentifier"] , self.section_dict["sectionidentifier"], node["unitidentifier"],node["unitidentifier"], node["title"] )
if debug: print "<p> x=%s<br>\n" % x
else:
x = x + "<li>"
x = x +'<a href="%s%s/%s/%s/unit%s.html">%s</a>\n' % ( tmp_padding, self.topic_dict["topicidentifier"] , self.section_dict["sectionidentifier"], node["unitidentifier"],node["unitidentifier"], node["title"] )
x += " </li>\n"
x += " </ul>\n"
if ret_str:
return x
else:
print x
def show_idevices_option( self, id_string ):
""" read files from idevices directory and generate the select list"""
x = """<center>Add <select style="width:120px;font:10px verdana, arial, sans-serif;text-decoration:none;background-color:#cccccc;" name=url onchange="javascript:if( options[selectedIndex].value != 'Idevices') document.location = options[selectedIndex].value">\n"""
x += '<option value="Idevices" selected>Idevices</option>\n'
doc = readConfig( self.idevice_templatexmlfile )
for node in doc["idevices"]:
x += '<option value="%s?cmd=show_idev_form&idevice_shortname=%s&%s">%s</option>\n' %( self.startcgi, node["idevice_shortname"], id_string, node["idevice_name"] )
x += '</select></center>\n'
return x
def show_idev_form( self, form ):
if form.has_key("unitidentifier"):
place="unit"
self.read_unit_dict()
self.unit_read_form( form )
elif form.has_key("sectionidentifier"):
place="section"
self.read_section_dict()
self.section_read_form( form )
else:
place="topic"
self.read_topic_dict()
self.topic_read_form( form )
crumb = "<H3>"
if form.has_key("courseidentifier"):
self.get_course_detail( form["courseidentifier"].value )
crumb += "<a href=%s?cmd=view_course&courseidentifier=%s>%s</a> ->"% ( self.startcgi, self.dict["courseidentifier"], self.dict["title"])
else:
print "Error, cant get course detail of this unit"
return
if form.has_key("topicidentifier") and form["topicidentifier"].value<>"" :
self.get_topic_detail( form["topicidentifier"].value )
crumb += "<a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s>%s</a> ->" % (self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], self.topic_dict["title"] )
else:
print "Error, can't get topic detail of this unit"
return
if form.has_key("sectionidentifier") and form["sectionidentifier"].value<>"" :
self.get_section_detail( form["sectionidentifier"].value )
crumb += "<a href=%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s>%s</a> ->"\
%(self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], self.section_dict["title"] )
if form.has_key("unitidentifier") and form["unitidentifier"].value<>"":
self.get_unit_detail( form["unitidentifier"].value )
crumb += "<a href=%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s>%s</a> ->"\
%(self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"], self.unit_dict["unitidentifier"], self.unit_dict["title"])
heading = ""
content = ""
if form.has_key("idevice_shortname"):
idevice_xml_file = self.doc_root + "/idevices/" + form["idevice_shortname"].value + "_xml.template"
idevice_form = self.doc_root + "/idevices/" + form["idevice_shortname"].value + "_form.template"
if exists( idevice_xml_file ):
doc = readConfig( idevice_xml_file )
for item in doc["idevice"]:
self.idevice_dict[item] = ""
if debug: print item + "<br>\n"
if form.has_key( "subunitidentifier" ):
crumb += "Update %s</h3>\n" % form["idevice_shortname"].value
self.get_idevice_detail( form["subidentifier"].value, form["idevice_shortname"].value, place )
else:
try:
crumb += "Add new %s</h3>\n" % form["idevice_shortname"].value
except:
print doc
return
content = self.xml_string( idevice_form, self.idevice_dict )
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s" % self.dict["courseidentifier"]
self.showexe( self.theme_template, heading, content, crumb, preview, outline )
def get_idevice_detail( self, subunitidentifier, tagname, place ):
if place=="topic":
idevicesxmlfilepath = self.doc_root + self.topic_dict["courseidentifier"] + '/'+ self.topic_dict["topicidentifier"] + '/'+ self.idevicexmlfile
if place=="section":
idevicesxmlfilepath = self.doc_root + self.section_dict["courseidentifier"] + '/'+ self.section_dict["topicidentifier"] + '/'+ self.section_dict["sectionidentifier"] + '/' + self.idevicexmlfile
elif place=="unit":
idevicesxmlfilepath = self.doc_root + self.unit_dict["courseidentifier"] + '/'+ self.unit_dict["topicidentifier"] + '/'+ self.unit_dict["sectionidentifier"] + '/' + self.unit_dict["unitidentifier"]+ "/" + self.idevicexmlfile
if exists( idevicesxmlfilepath ):
doc = readConfig( idevicesxmlfilepath )
for node in doc["idevices"]:
if node.has_key("title"):
node["idevice_name"]= node["title"]
node["idevice_shortname"] = node["idevice_name"].replace(" ","_")
if node["idevice_shortname"] == tagname and node["subunitidentifier"]==subunitidentifier:
self.idevice_dict.update( node )
return self.idevice_dict
else:
print "Error, can't get the detail of this resource:%s, tag:%s" %( idevicesxmlfilepath, tagname )
def process_idevice( self, form, action ):
resource_dict = {}
self.read_unit_dict()
for item in form.keys():
try:
resource_dict[item]=form[item].value
except: #item is a list#
resource_dict[item]= form.getlist( item )
if resource_dict.has_key("courseidentifier"):
self.get_course_detail( resource_dict["courseidentifier"] )
else:
print "Error, cant get course detail of this unit"
return
if resource_dict.has_key("topicidentifier") and resource_dict["topicidentifier"]<>"":
self.get_topic_detail( resource_dict["topicidentifier"] )
place="topic"
else:
print "Error, can't get topic detail of this unit"
return
if resource_dict.has_key("sectionidentifier") and resource_dict["sectionidentifier"]<>"":
self.get_section_detail( resource_dict["sectionidentifier"] )
place = "section"
if resource_dict.has_key("unitidentifier") and resource_dict["unitidentifier"]<>"":
self.get_unit_detail( resource_dict["unitidentifier"] )
place= "unit"
resource = form["idevice_shortname"].value
if debug: print "idevice_name:%s<br>\n" % resource
if form.has_key("subunitidentifier") and action<>"update":
resource_dict.update( self.get_idevice_detail( form["subunitidentifier"].value, resource, place ) )
if place =="section":
base_dir = self.doc_root + resource_dict["courseidentifier"] + '/'+ resource_dict["topicidentifier"] + '/'+ resource_dict["sectionidentifier"] + "/"
elif place=="unit":
base_dir = self.doc_root + resource_dict["courseidentifier"] + '/'+ resource_dict["topicidentifier"] + '/'+ resource_dict["sectionidentifier"] + '/' + resource_dict["unitidentifier"]+ "/"
elif place=="topic":
base_dir = self.doc_root + resource_dict["courseidentifier"] + '/'+ resource_dict["topicidentifier"] + '/'
if resource=="multimedia" and ( action == "add" or action == "update" ):
target_dir = base_dir + "/images/"
if form.has_key("new_thumbnail"):
resource_dict["thumbnail"] = self.process_graphic( form, target_dir, "thumbnail", "new_thumbnail" )
else :
resource_dict["thumbnail"] = self.process_graphic( form, target_dir, "thumbnail", "thumbnail" )
if form.has_key("new_mediafile"):
resource_dict["mediafile"] = self.process_graphic( form, target_dir, "mediafile", "new_mediafile" )
else :
resource_dict["mediafile"] = self.process_graphic( form, target_dir, "mediafile", "mediafile" )
if action == "add":
self.idevicexmlfilepath = base_dir + self.idevicexmlfile
maxidentifier = self.max_identifier( self.idevicexmlfilepath, "idevices", "subunitidentifier" )
resource_dict["subunitidentifier"] = str( maxidentifier + 1 )
elif action == "edit":
if form.has_key( "subunitidentifier" ):
heading = ""
form = self.doc_root + self.template_directory + resource + "_form.template"
content = self.xml_string( form, resource_dict )
if place=="topic":
crumb = "<H3><a href=%s?cmd=view_course&courseidentifier=%s>%s</a> ->\
<a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s>%s</a> ->Edit %s"\
% ( self.startcgi, self.dict["courseidentifier"], self.dict["title"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], self.topic_dict["title"], resource)
elif place=="section":
crumb = "<H3><a href=%s?cmd=view_course&courseidentifier=%s>%s</a> ->\
<a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s>%s</a> ->\
<a href=%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s>%s</a>->Edit %s"\
% ( self.startcgi, self.dict["courseidentifier"], self.dict["title"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], self.topic_dict["title"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], self.section_dict["title"], resource)
elif place=="unit":
crumb = "<H3><a href=%s?cmd=view_course&courseidentifier=%s>%s</a> ->\
<a href=%s?cmd=view_topic&courseidentifier=%s&topicidentifier=%s>%s</a> ->\
<a href=%s?cmd=view_section&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s>%s</a>->\
<a href=%s?cmd=view_unit&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s>%s</a> ->Edit %s"\
% ( self.startcgi, self.dict["courseidentifier"], self.dict["title"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], self.topic_dict["title"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], self.section_dict["title"],\
self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"], self.unit_dict["unitidentifier"], self.unit_dict["title"], resource)
preview = self.previewcgi + "?courseidentifier=%s" % self.dict["courseidentifier"]
outline = self.startcgi + "?cmd=outline_course&courseidentifier=%s" % self.dict["courseidentifier"]
self.showexe( self.theme_template, heading, content, crumb, preview, outline )
return
if debug: print "go to function save_idevice_file( action=%s, resource_dict=%s, resource=%s, place=%s)<br>\n" %( action, resource_dict, resource, place )
self.save_idevice_file( action, resource_dict, resource, place )
if place=="topic":
self.view_topic( form )
elif place=="section":
self.view_section( form )
elif place=="unit":
self.view_unit( form )
def save_idevice_file( self, action, resource_dict, resource, place ):
if place=="topic":
self.idevicexmlfilepath = self.doc_root + self.topic_dict["courseidentifier"] + "/" + self.topic_dict["topicidentifier"]+ "/" + self.idevicexmlfile
elif place=="section":
self.idevicexmlfilepath = self.doc_root + self.section_dict["courseidentifier"] + "/" + self.section_dict["topicidentifier"]+ "/" + self.section_dict["sectionidentifier"] + "/" + self.idevicexmlfile
elif place=="unit":
self.idevicexmlfilepath = self.doc_root + self.unit_dict["courseidentifier"] + "/" + self.unit_dict["topicidentifier"]+ "/" + self.unit_dict["sectionidentifier"] + "/" + self.unit_dict["unitidentifier"] + "/" + self.idevicexmlfile
self.saveidevicexmlfile( action, "idevices", resource_dict, "subunitidentifier", self.idevicexmlfilepath, resource )
def saveidevicexmlfile( self, action, roottag, dict, index_identifier, targetxmlfile, resource ):
x = '<?xml version="1.0" encoding="iso-8859-1"?>\n<%s multiple="true">\n' % roottag
if debug: print "in saveidevicexmlfile(action=%s,roottag=%s, dict=%s,index_identifier=%s, targetxmlfile=%s, resource=%s) <p>\n" %( action, roottag, dict, index_identifier, targetxmlfile, resource )
if exists( targetxmlfile ):
doc = readConfig( targetxmlfile )
try:
item_list = doc[roottag]
if debug:
for item in item_list:
print "%s <br>\n" %item
if action == "up" or action == "down":
found = 0
index = 0
for node in doc[roottag]:
index = index + 1
if node[index_identifier] == dict[index_identifier]:
found = index
if found == 0:
print "Sorry, this %s identifier:%s is not found <br>\n" % ( roottag, index_identifier )
return
elif action == "up" and found == 1:
print "First item can not be moved upward <br>\n"
return
elif action =="down":
if found == index:
print "Last one can not be moved downward <br>\n"
return
i = 1
for node in doc[roottag]:
if node.has_key("idevice_name"):
node["title"] = node["idevice_name"]
elif node.has_key("title"):
node["idevice_name"] = node["title"]
node["idevice_shortname"] = node["idevice_name"].replace( " ", "_" )
if action == "update" and node[index_identifier]==dict[index_identifier]:
xml_template = self.doc_root + self.template_directory + resource + "_xml.template"
t = self.xml_string( xml_template, dict, "escape" )
x = x + t
elif action=="delete" and node[index_identifier]==dict[index_identifier]:
i = i + 1
continue
elif ( action=="up" and i==(found-1) ) or (action=="down" and i==found ) :
xml_template = self.doc_root + self.template_directory + node["idevice_shortname"] + "_xml.template"
up_t = self.xml_string( xml_template, node, "escape" )
elif ( action=="up" and i == found ) or ( action=="down" and i ==(found+1) ):
xml_template = self.doc_root + self.template_directory + node["idevice_shortname"] + "_xml.template"
down_t = self.xml_string( xml_template, node, "escape" )
x = x + down_t + up_t
else:
xml_template = self.doc_root + self.template_directory + node["idevice_shortname"] + "_xml.template"
t = self.xml_string( xml_template, node, "escape" )
x = x + t
i = i + 1
except KeyError:
pass
if action == "add":
xml_template = self.doc_root + self.template_directory + resource + "_xml.template"
t = self.xml_string( xml_template, dict, "escape" )
x = x + t
try:
bkup_targetxmlfile = targetxmlfile + action + dict[index_identifier]
import shutil
shutil.copyfile( targetxmlfile, bkup_targetxmlfile )
except:
pass
x = x + "\n</%s>" %roottag
f = open( targetxmlfile ,"w" )
f.write( "%s" %x )
f.flush()
f.close
def show_idevices( self, place, icon_dir="" ):
x = ""
if place=="topic":
self.idevicexmlfilepath= self.doc_root + self.topic_dict["courseidentifier"] + "/" + self.topic_dict["topicidentifier"]+ "/" + self.idevicexmlfile
elif place=="section":
self.idevicexmlfilepath= self.doc_root + self.section_dict["courseidentifier"] + "/" + self.section_dict["topicidentifier"]+ "/" + self.section_dict["sectionidentifier"] + "/" + self.idevicexmlfile
elif place=="unit":
self.idevicexmlfilepath= self.doc_root + self.unit_dict["courseidentifier"] + "/" + self.unit_dict["topicidentifier"]+ "/" + self.unit_dict["sectionidentifier"] + "/" + self.unit_dict["unitidentifier"] + "/" + self.idevicexmlfile
if exists( self.idevicexmlfilepath ):
doc= readConfig( self.idevicexmlfilepath )
if doc.has_key("idevices"):
for node in doc["idevices"]:
if node.has_key("title"):
node["idevice_name"] = node["title"]
node["idevice_shortname"] = node["idevice_name"].replace( " ", "_" )
if place=="topic":
admin_link = '<br><a href="%s?cmd=up_idevice&courseidentifier=%s&topicidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_idevice&courseidentifier=%s&topicidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_idevice&courseidentifier=%s&topicidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">edit</a> \
<a href="%s?cmd=delete_idevice&courseidentifier=%s&topicidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">delete</a> \
<br>\n' % ( self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.topic_dict["courseidentifier"], self.topic_dict["topicidentifier"], node["subunitidentifier"],node["idevice_shortname"] )
elif place=="section":
admin_link = '<br><a href="%s?cmd=up_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">edit</a> \
<a href="%s?cmd=delete_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">delete</a> \
<br>\n' % ( self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.section_dict["courseidentifier"], self.section_dict["topicidentifier"], self.section_dict["sectionidentifier"], node["subunitidentifier"],node["idevice_shortname"] )
elif place=="unit":
admin_link = '<br><a href="%s?cmd=up_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/up.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=down_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s&subunitidentifier=%s&idevice_shortname=%s"><img src=/moodle/pix/t/down.gif height="11" width="11" border="0"></a> \
<a href="%s?cmd=edit_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">edit</a> \
<a href="%s?cmd=delete_idevice&courseidentifier=%s&topicidentifier=%s§ionidentifier=%s&unitidentifier=%s&subunitidentifier=%s&idevice_shortname=%s">delete</a> \
<br>\n' % ( self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"],self.unit_dict["unitidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"],self.unit_dict["unitidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"],self.unit_dict["unitidentifier"], node["subunitidentifier"],node["idevice_shortname"],\
self.startcgi, self.unit_dict["courseidentifier"], self.unit_dict["topicidentifier"], self.unit_dict["sectionidentifier"],self.unit_dict["unitidentifier"], node["subunitidentifier"],node["idevice_shortname"] )
template_file = self.doc_root + self.content_template_directory + node["idevice_shortname"] + "_content.template"
if debug:
print "idevice template file:%s <br>\n" % template_file
if icon_dir<>"":
node["icon_dir"] = icon_dir
else:
node["icon_dir"] = ""
if self.preview_cgi_string=="":
x += admin_link + self.xml_string( template_file, node )
else:
x += self.xml_string( template_file, node )
return x
def extract_h3( self, target_string ):
tmp_list = target_string.split( '<div id="about_nab_sub">' )
if tmp_list.__len__() > 1:
tmp_headers = tmp_list[1].split('</div>')[0]
else:
tmp_headers = ""
if debug: print "previous h3 titles:%s" % tmp_headers
tmp_content = target_string.split( '<h3>' )
if tmp_content.__len__() > 1:
h3_title = [] #use this to store each h3 title#
result = tmp_content[0]
if debug: print " h3 tag 0: %s <br>\n" % result
for subcontent in tmp_content[1:]:
tmp_subcontent = subcontent.split( '</h3>' )
if tmp_subcontent.__len__() > 1 :
tmp_id = "".join( tmp_subcontent[0].split() )
h3_title.append( """<a href="#%s">%s</a>""" %( tmp_id, tmp_subcontent[0] ) )
if debug: print " tmp_id: %s <br>\n" % tmp_id
result += """<h3 id="%s">%s</h3>%s""" %( tmp_id, tmp_subcontent[0], "".join( tmp_subcontent[1:]) )
if debug: print " result: %s <br>\n" % result
else:
result += "<h3>" + subcontent
"""<div id="about_nav_sub">... </div>"""
result = """<div id="about_nav_sub">%s %s</div>""" %( tmp_headers, " ".join( h3_title) ) + result
return result
else: #no h3 tag return the orginal content#
return target_string
def extract_header( self, target_string ):
tmp_list = target_string.split( '<div id="about_nav_sub">' )
if tmp_list.__len__() > 1:
tmp_headers = tmp_list[1].split('</div>')[0]
else:
tmp_headers = ""
if debug: print "previous header titles:%s" % tmp_headers
tmp_content = target_string.split( '<h' )
if tmp_content.__len__() > 1:
header_title = [] #use this to store each header title#
result = tmp_content[0]
for subcontent in tmp_content[1:]:
tmp_subcontent1 = subcontent.split( '>' )
if tmp_subcontent1[0].__len__() == 0:
result += "<h>"
continue #this is not a legal tag <h>#
elif tmp_subcontent1[0][0] in ( "1", "2", "3", "4" ):
tmp_tag = "h" + tmp_subcontent1[0][0] #strip tag : h1 or h2 or h3 ...#
else:
result += "<h" + subcontent
continue
if debug: print "tmp_tag:%s <br>\n" % tmp_tag
try:
tmp_attribute = tmp_subcontent1[0][1:] #others like style=....#
except:
tmp_attribute = ""
if debug: print "tmp_attribute:%s <br>\n" % tmp_attribute
endtmp_tag = "</%s>" % tmp_tag
tmp_subcontent_string = ">".join( tmp_subcontent1[1:])
if debug: print "tmp_subcontent string: %s" % tmp_subcontent_string
tmp_subcontent = tmp_subcontent_string.split( endtmp_tag )
if debug: print "tmp_subcontent list: %s" % tmp_subcontent
if tmp_subcontent.__len__() > 1 :
tmp_id = "".join( tmp_subcontent[0].split() )
header_title.append( """<a href="#%s">%s</a>""" %( tmp_id, tmp_subcontent[0] ) )
if debug: print " tmp_id: %s <br>\n" % tmp_id
result += """<%s %s id="%s">%s</%s>%s""" %( tmp_tag, tmp_attribute, tmp_id, tmp_subcontent[0], tmp_tag, "".join( tmp_subcontent[1:]) )
if debug: print " result: %s <br>\n" % result
else:
result += "<%s %s>" %( tmp_tag, tmp_attribute) + subcontent
"""<div id="about_nav_sub">... </div>"""
result = """<div id="about_nav_sub">%s %s</div><p><p>""" %( tmp_headers, " ".join( header_title) ) + result
return result
else: #no h3 tag return the orginal content#
return target_string
| [
"[email protected]"
]
| |
922f5b09c5963a6fd34c2599cd446d9543e37383 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/hackernews/testcase/firstcases/testcase8_021.py | 98d1969d272766b7e444194aa1fc4f50bbc714c4 | []
| no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'io.dwak.holohackernews.app',
'appActivity' : 'io.dwak.holohackernews.app.ui.storylist.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'io.dwak.holohackernews.app/io.dwak.holohackernews.app.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase021
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"37 comments\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.android.browser:id/lock\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"8_021\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'io.dwak.holohackernews.app'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
]
| |
afc72a95d33a18debe5ab8215d2980fedbec85a1 | b6472217400cfce4d12e50a06cd5cfc9e4deee1f | /sites/top/api/rest/ShoprecommendShopsGetRequest.py | f3f3da331ad55779252f48bdbb7fced2789a176c | []
| no_license | topwinner/topwinner | 2d76cab853b481a4963826b6253f3fb0e578a51b | 83c996b898cf5cfe6c862c9adb76a3d6a581f164 | refs/heads/master | 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | '''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class ShoprecommendShopsGetRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.count = None
self.ext = None
self.recommend_type = None
self.seller_id = None
def getapiname(self):
return 'taobao.shoprecommend.shops.get'
| [
"[email protected]"
]
| |
e25ec3eb506fc11242a235ac89c9295b7aeed705 | 86cc17a69213569af670faed7ad531cb599b960d | /play22.py | 96fff41800364d84c655379bd4c5eb33385b96a4 | []
| no_license | LakshmikanthRavi/guvi-lux | ed1c389e27a9ec62e0fd75c140322563f68d311a | 5c29f73903aa9adb6484c76103edf18ac165259e | refs/heads/master | 2020-04-15T05:07:19.743874 | 2019-08-13T08:53:00 | 2019-08-13T08:53:00 | 164,409,489 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | v,m=map(int,input().split())
li=[]
for i in range(1,m+1):
if v%i==0 and m%i==0:
li.append(i)
if len(li)==1:
print(*li)
else:
print(*li[-1:])
| [
"[email protected]"
]
| |
ffdaa560ee5d9baf3b364e2960e1937347afdaa3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02381/s394317702.py | 6be196429fcf9dbcdbf915b34a91ab62c14a1b5e | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import math
while True:
n=int(input())
if n==0:
break
s=list(map(int,input().split()))
m=sum(s)/n
S=0
for i in range(n):
S+=(s[i]-m)**2
a2=math.sqrt(S/n)
print('{:.8f}'.format(a2))
| [
"[email protected]"
]
| |
0436bf1993de7c49358a1def27e9475d4a47878b | 2d4240a03bfa47386677a78250df220c55a7bf6c | /PythonStandardLibrary/itertools11.py | 95d1ca9ae99eb8c809a9a0c1b092c83f707685c5 | []
| no_license | Falonie/Notes | c7976e9e7514e5d7cddf918c3c54442a89532aab | 38e980cb5170a696626085b72795a096679e972b | refs/heads/master | 2022-02-13T11:20:39.613115 | 2019-09-02T01:07:27 | 2019-09-02T01:07:27 | 99,218,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from itertools import accumulate
from operator import mul
a = [3, 4, 6, 2, 1, 9, 1, 7, 5, 8]
print(list(accumulate(a, max)))
print(list(accumulate(a,min)))
print(list(accumulate(a)))
print(list(accumulate(a,mul))) | [
"[email protected]"
]
| |
6643e993132ac4719ee45f52a4fefe2f8cb75fcf | b834dda071bbe33db47c06e4281d6203ad5e4d4d | /tests/conftest.py | 87ed9b0cbdb98c40e1ddf00cbb3d485633c1f6d6 | [
"BSD-2-Clause"
]
| permissive | revolter/invocations | 80caf5e7024379260f710187a166509c1aaaed03 | 07be47051407db4fc5ba4ae03945e75ece593e46 | refs/heads/master | 2023-03-08T10:07:28.305372 | 2020-04-24T22:39:56 | 2020-04-24T22:39:56 | 285,869,144 | 0 | 0 | BSD-2-Clause | 2020-08-07T16:02:19 | 2020-08-07T16:02:18 | null | UTF-8 | Python | false | false | 498 | py | from mock import Mock
from pytest import fixture
from invoke import MockContext, Result
# TODO: figure out how to distribute it in a way not requiring non-testing
# users to have mock installed?!
@fixture
def ctx():
# TODO: make MockContext more usable in a "don't care about results" mode
# NOTE: this is ugly but whatever.
MockContext.run_command = property(lambda self: self.run.call_args[0][0])
mc = MockContext(run=Result())
mc._set(run=Mock(wraps=mc.run))
yield mc
| [
"[email protected]"
]
| |
49280ec64484a56923af0285dda8242019893f40 | b47c136e077f5100478338280495193a8ab81801 | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/htu31d_simpletest.py | 8aced3ed9a6e2ecc60ae5ef59e07bc900c560eec | [
"Apache-2.0"
]
| permissive | IanSMoyes/SpiderPi | 22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | refs/heads/master | 2023-03-20T22:30:23.362137 | 2021-03-12T17:37:33 | 2021-03-12T17:37:33 | 339,555,949 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # SPDX-FileCopyrightText: Copyright (c) 2020 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import busio
import board
import adafruit_htu31d
i2c = busio.I2C(board.SCL, board.SDA)
htu = adafruit_htu31d.HTU31D(i2c)
print("Found HTU31D with serial number", hex(htu.serial_number))
htu.heater = True
print("Heater is on?", htu.heater)
htu.heater = False
print("Heater is on?", htu.heater)
while True:
temperature, relative_humidity = htu.measurements
print("Temperature: %0.1f C" % temperature)
print("Humidity: %0.1f %%" % relative_humidity)
print("")
time.sleep(1)
| [
"[email protected]"
]
| |
728bd4678275f71a76e178d83c68245ce54754c3 | 2c4efe2ce49a900c68348f50e71802994c84900a | /braindecode/braindecode/venv1/Lib/site-packages/numba/macro.py | 1b5c5020756b4ba4341455d2db174b4e5f09e18e | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | sisi2/Masterthesis | b508632526e82b23c2efb34729141bfdae078fa0 | 7ce17644af47db4ad62764ed062840a10afe714d | refs/heads/master | 2022-11-19T15:21:28.272824 | 2018-08-13T15:02:20 | 2018-08-13T15:02:20 | 131,345,102 | 2 | 1 | null | 2022-11-15T14:08:07 | 2018-04-27T21:09:21 | Python | UTF-8 | Python | false | false | 238 | py | """
Macro handling.
Macros are expanded on block-by-block
"""
from __future__ import absolute_import, print_function, division
# Expose the Macro object from the corresponding IR rewrite pass
from .rewrites.macros import Macro
| [
"[email protected]"
]
| |
a3849b47ff857c57b806d2cc30f90afdb5bceff3 | a32ca3544bb5a587e5fd7aaa1c73ac0fb918f11e | /hypha/apply/projects/migrations/0051_remove_unnecessary_fields_from_invoice.py | 11ad637d548ba6d70e8286efcb3f98d24703c04d | [
"BSD-3-Clause"
]
| permissive | jvasile/hypha | 87904bf514e7cf5af63c7146eaaa49d3611fd57f | b5ccad20dd3434f53a2b9d711fac510124c70a6e | refs/heads/main | 2023-07-08T04:10:08.233259 | 2023-06-20T05:35:29 | 2023-06-20T05:35:29 | 354,630,183 | 0 | 0 | BSD-3-Clause | 2021-04-04T19:32:38 | 2021-04-04T19:32:38 | null | UTF-8 | Python | false | false | 559 | py | # Generated by Django 3.2.12 on 2022-04-12 05:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application_projects', '0050_add_new_invoice_status'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='amount',
),
migrations.RemoveField(
model_name='invoice',
name='date_from',
),
migrations.RemoveField(
model_name='invoice',
name='date_to',
),
]
| [
"[email protected]"
]
| |
47674fd62d2ace70412aa6779f84bb9d0f8365db | f31285f1adf3a0c83120c2a8f91ac5ef6f5f0b8b | /mapped_seq.py | 06e0b3f44f4c828acd09cc3be5f6f6e380a2cf69 | [
"BSD-2-Clause"
]
| permissive | RobinQi/BioUtils | 1596cc1cec382567459d13d9e57a97099dbce76d | 72693760620b8afb1797fd9f23e1540a194ef929 | refs/heads/master | 2021-01-13T06:12:55.111042 | 2014-07-21T18:02:45 | 2014-07-21T18:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | '''The script reads alignment results from BLAT in PSL format and print out
sequences of those sequences in FASTA format.
'''
import sys
from Bio import SeqIO
def parse_mapped(pslfile):
mapped = set()
for line in open(pslfile):
name = line.strip().split()[9]
mapped.add(name)
return mapped
def select_seq(mapped_seqs, fasta_file):
for n, record in enumerate(SeqIO.parse(fasta_file, 'fasta'), start=1):
if record.id in mapped_seqs:
yield record
if n % 1000 == 0:
print >> sys.stderr, '...', n
if __name__=='__main__':
try:
pslfile = sys.argv[1]
fastafile = sys.argv[2]
except IndexError:
print >> sys.stderr, 'unmapped_seq.py <psl file> <fasta file>'
mapped_seqs = parse_mapped(pslfile)
print >> sys.stderr, 'Writing mapped sequences ...'
SeqIO.write(select_seq(mapped_seqs, fastafile), sys.stdout, 'fasta')
| [
"[email protected]"
]
| |
83ee80ed3aa2386cdadbb452e5bd1bde0eb0132f | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201029173348.py | 170f8373b1fe8c995bffecf90df6f9e77570601f | []
| no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,881 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import pygame as pg
from random import sample
from pyglet.gl import *
from string import *
import numpy as np
class create_board():
def __init__(self):
self.base = 3 # Will generate any size of random sudoku board in O(n^2) time
self.side = self.base * self.base
self.nums = sample(range(1, self.side + 1), self.side) # random numbers
self.board = [[self.nums[(self.base * (r%self.base) + r//self.base + c)%self.side] for c in range(self.side) ] for r in range(self.side)]
rows = [ r for g in sample(range(self.base),self.base) for r in sample(range(g * self.base,(g + 1) * self.base), self.base) ]
cols = [ c for g in sample(range(self.base),self.base) for c in sample(range(g * self.base,(g + 1) * self.base), self.base) ]
self.board = [[self.board[r][c] for c in cols] for r in rows]
# print("\nInput:")
# for line in self.board: print(line)
squares = self.side * self.side
empties = squares * 3//4
for p in sample(range(squares),empties):
self.board[p//self.side][p%self.side] = 0
self.lines()
def expandLine(self, line):
return line[0]+line[5:9].join([line[1:5]*(self.base-1)]*self.base)+line[9:13]
def lines(self):
self.line0 = self.expandLine("╔═══╤═══╦═══╗")
self.line1 = self.expandLine("║ . │ . ║ . ║")
self.line2 = self.expandLine("╟───┼───╫───╢")
self.line3 = self.expandLine("╠═══╪═══╬═══╣")
self.line4 = self.expandLine("╚═══╧═══╩═══╝")
self.draw()
def draw(self):
symbol = " 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.nums = [ [""]+[symbol[n] for n in row] for row in self.board ]
print(self.line0)
for r in range(1,self.side+1):
print( "".join(n+s for n,s in zip(self.nums[r-1],self.line1.split("."))) )
print([self.line2,self.line3,self.line4][(r%self.side==0)+(r%self.base==0)])
class solve_board():
def __init__(self, board):
self.row = []
self.col = []
self.cell = []
self.row_list = []
self.col_list = []
self.cell_list = []
for space in range(9):
self.col.append([])
self.cell.append([])
row_idx = 0
for line in board:
self.row.append(line)
cell_idx = 0
if row_idx >= 3:
cell_idx = 3
if row_idx >= 6:
cell_idx = 6
for col_idx in range(9):
self.col[col_idx].insert(row_idx, line[col_idx])
if col_idx % 3 == 0:
for triple in range(0, 3):
self.cell[cell_idx].insert(len(self.cell[row_idx]) + triple, line[col_idx + triple])
cell_idx += 1
self.row_list.append(self.row)
self.col_list.append(self.col)
self.cell_list.append(self.cell)
row_idx += 1
print("\nrow:")
for row in self.row_list[0]:
print(row)
# print("\ncolumn:")
# for col in self.col_list[0]:
# print(col)
# print("\ncell:")
# for cell in self.cell_list[0]:
# print(cell)
def assign_flags(self, board):
self.flags = []
row_idx = 0
cell_idx = 0
print("\n")
for line in board:
cell_idx = 0
if row_idx >= 3:
cell_idx = 3
if row_idx >= 6:
cell_idx = 6
for index in range(9):
# print("position: ", index, "value: ", line[index])
# print("row", row_idx, "col", index, "cell", cell_idx)
if (index % 3 == 0 and index != 0):
cell_idx += 1
if line[index] == 0:
flag_idx = 0
temp_flag = []
for value in range(1, 10):
# print(value)
if self.row_flag(value, row_idx):
# print("found in row")
pass
elif self.col_flag(value, index):
# print("found in column")
pass
elif self.cell_flag(value, cell_idx):
# print("found in cell")
pass
else:
temp_flag.append(value)
flag_idx += 1
print(temp_flag)
self.flags.append(temp_flag)
row_idx += 1
def check_row(self):
pass
def column(self, x):
pass
def cell(self, row, col):
pass
def row_flag(self, index, row_idx):
for row in self.row_list[0][row_idx]:
# print("comparing in row ", row, "with ", index, "row_idx ", row_idx)
if row == index:
return 1
return 0
def col_flag(self, index, col_idx):
for col in self.col_list[0][col_idx]:
# print("comparing in column ", col, "with ", index, "col_idx ", col_idx)
if col == index:
return 1
return 0
def cell_flag(self, index, cell_idx):
for cell in self.cell_list[0][cell_idx]:
# print("comparing in cell ", cell, "with ", index, "cell_idx ", cell_idx)
if cell == index:
return 1
return 0
class Display_board(pyglet.window.Window):
def __init__(self, *args):
super().__init__(*args)
# Set window size
self.set_minimum_size(700,700)
# Set background color
background_color = [255, 255, 255, 255]
background_color = [i / 255 for i in background_color]
gClearColor(*background_color)
def on_key_press(self, symbol, modifier):
pass
def on_key_release(self, symbol, modifier):
pass
def on_mouse_press(self, symbol, modifier):
pass
def on_draw(self, symbol, modifier):
pass
def update(self, symbol, modifier):
pass
# Start the main window and start a timer to hit the update method
frame_rate = 30
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
self.board = create_board().board
self.solution = solve_board(self.board)
self.solution.assign_flags(self.board)
if __name__ == '__main__':
window = Display_board(700, 700, "sudoku")
pyglet.clock.schedule_interval(window.update, 2/ frame_rate)
pyglet.app.run()
Main()
| [
"[email protected]"
]
| |
60df77afdc909e5419af996727a7299b127ba7c4 | a0556d5e8368bf171b9019aba03e65b06e8c12e4 | /secao20_testes_python/intro_unittest.py | dad9e905e5ddc211815850549a3b2359e387fb5b | []
| no_license | RianMarlon/Python-Geek-University | f1c9db588f23ce8e6699d1352ebfb3428e0ab1ec | 3be7ec5c35bf74b1b2152de63db95bee33ee1719 | refs/heads/master | 2020-06-25T04:41:52.911513 | 2020-02-01T16:18:35 | 2020-02-01T16:18:35 | 199,204,603 | 23 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | """
Introdução ao móduo Unittest
Unittest -> Testes Unitários
O que são testes unitários?
Teste é a forma de se testar unidades individuais do código fonte
Unidades individuais podem ser: funções, métodos, classes, módulos e etc.
# OBS: Testes unitários não são específics da linguagem Python.
Para criar nossos testes, criamos classes que herdam do unittest.TestCase
e a partir de então ganhamos todos os 'assertions' presentes no módulo.
Para rodar os testes, utilizamos unittest.main()
TestCase -> Casos de teste para a sua unidade
# Conhecendo as assertions
https://docs.python.org/3/library/unittest.html
Método Checa se
assertEqual(a, b) a == b
assertNotEqual(a, b) a != b
assertTrue(x) x é verdadeiro
assertFalse(x) x é falso
assertIs(a, b) a é b
assertIsNot(a, b) a não é b
assertIsNone(x) x é None
assertIsNotNone(x) x não é None
assertIn(a, b) a está em b
assertNotIn(a, b) a não está em b
assertIsInstance(a, b) a é instância de b
assertNotIsInstance(a, b) a não é instância de b
Por convenção, todos os teste em um test case, devem ter seu nome iniciado
com test_
# Para executar os testes com unittest
python nome_do_modulo.py
# Para executar os testes com unnitest no modo verboso
python nome_do_modulo.py -v
# Docstrings nos testes
Podemos acrescentar (e é recomendado) docstrings em nossos testes.
"""
# Prática - Utilizando a abordagem TDD
| [
"[email protected]"
]
| |
20c5c2b53a0eddde92dfeb705345c3b7415da96d | 1322ae1334af76643e108e78495e05538b0d7834 | /test/preprocess_test.py | b21b3687c61feec41e9de60f1fb8f2e6081bc0e6 | []
| no_license | kimsoosoo0928/chatbot02 | 47d34d6e6ac75b86930b1840337d17f4e487df26 | 222e75023391f1d4cd486680d8abaa1300a72453 | refs/heads/main | 2023-06-08T02:22:25.862407 | 2021-07-04T00:28:02 | 2021-07-04T00:28:02 | 381,740,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from utils.Preprocess import Preprocess
from tensorflow.keras import preprocessing
sent = "내일 오전 10시에 짬뽕 주문하고 싶어ㅋㅋ"
p = Preprocess(word2index_dic='../train_tools/dict/chatbot_dict.bin',
userdic = '../utils/user_dic.tsv')
pos = p.pos(sent)
keywords = p.get_keywords(pos, without_tag=False)
print(keywords)
# w2i = p.get_wordidx_sequence(keywords)
# sequences = [w2i]
#
# MAX_SEQ_LEN = 15 # 임베딩 벡터 크기
# padded_seqs = preprocessing.sequence.pad_sequences(sequences, maxlen=MAX_SEQ_LEN, padding='post')
#
# print(keywords)
# print(sequences)
# print(padded_seqs) | [
"[email protected]"
]
| |
01e704976208c35951e0ae956efe5dacebe2ef99 | 1c6276c90ab97004e2435a539d011c5b9d08d134 | /electrum/plugins/trustedcoin/trustedcoin.py | f9a9918a6202666382c70fe8db95d8018602fc95 | [
"MIT"
]
| permissive | c4pt000/electrum-ravencoin-lite | 020dedec4891293e9439f4692c0e5855466c249d | fe0f139a3708261467beb855700f56b5fbc7c2e9 | refs/heads/main | 2023-08-24T00:52:51.983733 | 2021-10-20T13:22:15 | 2021-10-20T13:22:15 | 406,919,123 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32,315 | py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'standard': self.db.get('trustedcoin_billing_addresses', {}),
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, *, coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], fee=None,
change_addr: str = None, is_sweep=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
# address = self.billing_info['billing_address_segwit']
# fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['standard'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'standard':
return bitcoin.public_key_to_p2pkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
# billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
# if billing_address != billing_info['billing_address_segwit']:
# raise Exception(f'unexpected trustedcoin billing address: '
# f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
# wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='standard')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='standard')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_seed', _('Standard 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
| [
"[email protected]"
]
| |
3c57c45a3312ed022a7054e73f8fd40267221ab7 | 509fc176af52f46ce62f54a6f63c7c27b1bd0c2c | /djangofiltertest/djangofiltertest/apps/posts/migrations/0002_job_title.py | 46caade00daf9756946303a0eb4bb0a31d5664eb | [
"MIT"
]
| permissive | gonzaloamadio/django-filter-test | 8b16fdb989a8141ba5852cd4804148cb6b153e86 | 7b9dbc36ca248e2113deaac03e824b123a31a4ba | refs/heads/master | 2022-12-10T11:35:07.684916 | 2019-01-24T09:19:21 | 2019-01-24T09:19:21 | 167,159,577 | 0 | 0 | MIT | 2022-12-08T01:33:33 | 2019-01-23T09:54:40 | Python | UTF-8 | Python | false | false | 453 | py | # Generated by Django 2.0 on 2019-01-24 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='title',
field=models.CharField(db_index=True, default='asd', max_length=128, verbose_name='name'),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
cd8872e86939dbf066a6d051eb173111d9964db4 | f170ed80b4269c8fbe622b78cadc7f293a956e56 | /lib/googlecloudsdk/third_party/apis/networkmanagement/v1beta1/networkmanagement_v1beta1_messages.py | 4e054df645de0f562077d40931b9fef5b7f2dd2b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/google-cloud-sdk-2 | 16227c61c6d60b1af290348ed5f6075741f5b68a | b94a8c7ea9776effc9cee67f1db677cec4800b6d | refs/heads/master | 2022-11-20T14:46:30.764336 | 2020-07-17T14:41:11 | 2020-07-17T14:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,421 | py | """Generated message classes for networkmanagement version v1beta1.
The Network Management API provides a collection of network performance
monitoring and diagnostic capabilities.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'networkmanagement'
class AbortInfo(_messages.Message):
r"""Details of the final state "abort" and associated resource.
Enums:
CauseValueValuesEnum: Causes that the analysis is aborted.
Fields:
cause: Causes that the analysis is aborted.
resourceUri: URI of the resource that caused the abort.
"""
class CauseValueValuesEnum(_messages.Enum):
r"""Causes that the analysis is aborted.
Values:
CAUSE_UNSPECIFIED: Cause is unspecified.
UNKNOWN_NETWORK: Aborted due to unknown network. The reachability
analysis cannot proceed because the user does not have access to the
host project's network configurations, including firewall rules and
routes. This happens when the project is a service project and the
endpoints being traced are in the host project's network.
UNKNOWN_IP: Aborted because the IP address(es) are unknown.
UNKNOWN_PROJECT: Aborted because no project information can be derived
from the test input.
PERMISSION_DENIED: Aborted because the user lacks the permission to
access all or part of the network configurations required to run the
test.
NO_SOURCE_LOCATION: Aborted because no valid source endpoint is derived
from the input test request.
INVALID_ARGUMENT: Aborted because the source and/or destination endpoint
specified in the test are invalid. The possible reasons that an
endpoint is invalid include: malformed IP address; nonexistent
instance or network URI; IP address not in the range of specified
network URI; and instance not owning the network interface in the
specified network.
NO_EXTERNAL_IP: Aborted because traffic is sent from a public IP to an
instance without an external IP.
UNINTENDED_DESTINATION: Aborted because none of the traces matches
destination information specified in the input test request.
TRACE_TOO_LONG: Aborted because the number of steps in the trace
exceeding a certain limit which may be caused by routing loop.
INTERNAL_ERROR: Aborted due to internal server error.
"""
CAUSE_UNSPECIFIED = 0
UNKNOWN_NETWORK = 1
UNKNOWN_IP = 2
UNKNOWN_PROJECT = 3
PERMISSION_DENIED = 4
NO_SOURCE_LOCATION = 5
INVALID_ARGUMENT = 6
NO_EXTERNAL_IP = 7
UNINTENDED_DESTINATION = 8
TRACE_TOO_LONG = 9
INTERNAL_ERROR = 10
cause = _messages.EnumField('CauseValueValuesEnum', 1)
resourceUri = _messages.StringField(2)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices", "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, {
"log_type": "DATA_WRITE" }, {
"log_type": "ADMIN_READ" } ] }, {
"service": "sampleservice.googleapis.com", "audit_log_configs": [
{ "log_type": "DATA_READ" }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:[email protected]" ] } ] }
] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging,
and [email protected] from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:[email protected]" ]
}, { "log_type": "DATA_WRITE" } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
[email protected] from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the members in this binding.
To learn which resources support conditions in their IAM policies, see
the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`[email protected]` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `[email protected]?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
[email protected]?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group
that has been recently deleted. For example,
`[email protected]?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G
Suite domain (primary) that represents all the users of that domain.
For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class ConnectivityTest(_messages.Message):
r"""A Connectivity Test for a network reachability analysis.
Messages:
LabelsValue: Resource labels to represent user-provided metadata.
Fields:
createTime: Output only. The time the test was created.
description: The user-supplied description of the Connectivity Test.
Maximum of 512 characters.
destination: Required. Destination specification of the Connectivity Test.
You can use a combination of destination IP address, Compute Engine VM
instance, or VPC network to uniquely identify the destination location.
Even if the destination IP address is not unique, the source IP location
is unique. Usually, the analysis can infer the destination endpoint from
route information. If the destination you specify is a VM instance and
the instance has multiple network interfaces, then you must also specify
either a destination IP address or VPC network to identify the
destination interface. A reachability analysis proceeds even if the
destination location is ambiguous. However, the result can include
endpoints that you don't intend to test.
displayName: Output only. The display name of a Connectivity Test.
labels: Resource labels to represent user-provided metadata.
name: Required. Unique name of the resource using the form:
`projects/{project_id}/locations/global/connectivityTests/{test}`
protocol: IP Protocol of the test. When not provided, "TCP" is assumed.
reachabilityDetails: Output only. The reachability details of this test
from the latest run. The details are updated when creating a new test,
updating an existing test, or triggering a one-time rerun of an existing
test.
relatedProjects: Other projects that may be relevant for reachability
analysis. This is applicable to scenarios where a test can cross project
boundaries.
source: Required. Source specification of the Connectivity Test. You can
use a combination of source IP address, virtual machine (VM) instance,
or Compute Engine network to uniquely identify the source location.
Examples: If the source IP address is an internal IP address within a
Google Cloud Virtual Private Cloud (VPC) network, then you must also
specify the VPC network. Otherwise, specify the VM instance, which
already contains its internal IP address and VPC network information.
If the source of the test is within an on-premises network, then you
must provide the destination VPC network. If the source endpoint is a
Compute Engine VM instance with multiple network interfaces, the
instance itself is not sufficient to identify the endpoint. So, you must
also specify the source IP address or VPC network. A reachability
analysis proceeds even if the source location is ambiguous. However, the
test result may include endpoints that you don't intend to test.
updateTime: Output only. The time the test's configuration was updated.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Resource labels to represent user-provided metadata.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
description = _messages.StringField(2)
destination = _messages.MessageField('Endpoint', 3)
displayName = _messages.StringField(4)
labels = _messages.MessageField('LabelsValue', 5)
name = _messages.StringField(6)
protocol = _messages.StringField(7)
reachabilityDetails = _messages.MessageField('ReachabilityDetails', 8)
relatedProjects = _messages.StringField(9, repeated=True)
source = _messages.MessageField('Endpoint', 10)
updateTime = _messages.StringField(11)
class DeliverInfo(_messages.Message):
r"""Details of the final state "deliver" and associated resource.
Enums:
TargetValueValuesEnum: Target type where the packet is delivered to.
Fields:
resourceUri: URI of the resource that the packet is delivered to.
target: Target type where the packet is delivered to.
"""
class TargetValueValuesEnum(_messages.Enum):
r"""Target type where the packet is delivered to.
Values:
TARGET_UNSPECIFIED: Target not specified.
INSTANCE: Target is a Compute Engine instance.
INTERNET: Target is the Internet.
GOOGLE_API: Target is a Google API.
"""
TARGET_UNSPECIFIED = 0
INSTANCE = 1
INTERNET = 2
GOOGLE_API = 3
resourceUri = _messages.StringField(1)
target = _messages.EnumField('TargetValueValuesEnum', 2)
class DropInfo(_messages.Message):
r"""Details of the final state "drop" and associated resource.
Enums:
CauseValueValuesEnum: Cause that the packet is dropped.
Fields:
cause: Cause that the packet is dropped.
resourceUri: URI of the resource that caused the drop.
"""
class CauseValueValuesEnum(_messages.Enum):
r"""Cause that the packet is dropped.
Values:
CAUSE_UNSPECIFIED: Cause is unspecified.
UNKNOWN_EXTERNAL_ADDRESS: Destination external address cannot be
resolved to a known target.
FOREIGN_IP_DISALLOWED: a Compute Engine instance can only send or
receive a packet with a foreign IP <code>if ip_forward</code> is
enabled.
FIREWALL_RULE: Dropped due to a firewall rule unless allowed due to
connection tracking.
NO_ROUTE: Dropped due to no routes.
ROUTE_BLACKHOLE: Dropped due to invalid route. Route's next hop is a
blackhole.
ROUTE_WRONG_NETWORK: Packet is sent to a wrong (unintended) network.
Example: user traces a packet from VM1:Network1 to VM2:Network2,
however, the route configured in Network1 sends the packet destined
for VM2's IP addresss to Network3.
PRIVATE_TRAFFIC_TO_INTERNET: Packet with internal destination address
sent to Internet gateway.
PRIVATE_GOOGLE_ACCESS_DISALLOWED: Instance with only an internal IP
tries to access Google API and Services, and private Google access is
not enabled.
NO_EXTERNAL_ADDRESS: Instance with only internal IP tries to access
external hosts, but Cloud NAT is not enabled in the subnet, unless
special configurations on a VM allows this connection. See [Special
Configurations for VM instances](/vpc/docs/special-configurations) for
details.
UNKNOWN_INTERNAL_ADDRESS: Destination internal address cannot be
resolved to a known target.
FORWARDING_RULE_MISMATCH: Forwarding rule's protocol and ports do not
match the packet header.
FORWARDING_RULE_NO_INSTANCES: Forwarding rule does not have backends
configured.
FIREWALL_BLOCKING_LOAD_BALANCER_BACKEND_HEALTH_CHECK: Firewalls block
the health check probes to the backends and cause the backends to be
unavailable for traffic from the load balancer. See [Health check
firewall rules](/load-balancing/docs/ health-checks#firewall_rules)
for more details.
INSTANCE_NOT_RUNNING: Packet is sent from or to a Compute Engine
instance that is not in a running state.
TRAFFIC_TYPE_BLOCKED: The type of traffic is blocked and the user cannot
configure a firewall rule to enable it. See [Always blocked
traffic](/vpc/docs/firewalls# blockedtraffic) for more details.
GKE_MASTER_UNAUTHORIZED_ACCESS: Access to GKE master's endpoint is not
authorized. See [Access to the cluster endpoints](/kubernetes-
engine/docs/how-to/ private-clusters#access_to_the_cluster_endpoints)
for more details.
"""
CAUSE_UNSPECIFIED = 0
UNKNOWN_EXTERNAL_ADDRESS = 1
FOREIGN_IP_DISALLOWED = 2
FIREWALL_RULE = 3
NO_ROUTE = 4
ROUTE_BLACKHOLE = 5
ROUTE_WRONG_NETWORK = 6
PRIVATE_TRAFFIC_TO_INTERNET = 7
PRIVATE_GOOGLE_ACCESS_DISALLOWED = 8
NO_EXTERNAL_ADDRESS = 9
UNKNOWN_INTERNAL_ADDRESS = 10
FORWARDING_RULE_MISMATCH = 11
FORWARDING_RULE_NO_INSTANCES = 12
FIREWALL_BLOCKING_LOAD_BALANCER_BACKEND_HEALTH_CHECK = 13
INSTANCE_NOT_RUNNING = 14
TRAFFIC_TYPE_BLOCKED = 15
GKE_MASTER_UNAUTHORIZED_ACCESS = 16
cause = _messages.EnumField('CauseValueValuesEnum', 1)
resourceUri = _messages.StringField(2)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Endpoint(_messages.Message):
r"""Source or destination of the Connectivity Test.
Enums:
NetworkTypeValueValuesEnum: Type of the network where the endpoint is
located. Applicable only to source endpoint, as destination network type
can be inferred from the source.
Fields:
instance: A Compute Engine instance URI.
ipAddress: The IP address of the endpoint, which can be an external or
internal IP. An IPv6 address is only allowed when the test's destination
is a [global load balancer VIP](/load-balancing/docs/load-balancing-
overview).
network: A Compute Engine network URI.
networkType: Type of the network where the endpoint is located. Applicable
only to source endpoint, as destination network type can be inferred
from the source.
port: The IP protocol port of the endpoint. Only applicable when protocol
is TCP or UDP.
projectId: Project ID where the endpoint is located. The Project ID can be
derived from the URI if you provide a VM instance or network URI. The
following are two cases where you must provide the project ID: 1. Only
the IP address is specified, and the IP address is within a GCP project.
2. When you are using Shared VPC and the IP address that you provide is
from the service project. In this case, the network that the IP address
resides in is defined in the host project.
"""
class NetworkTypeValueValuesEnum(_messages.Enum):
r"""Type of the network where the endpoint is located. Applicable only to
source endpoint, as destination network type can be inferred from the
source.
Values:
NETWORK_TYPE_UNSPECIFIED: Default type if unspecified.
GCP_NETWORK: A network hosted within Google Cloud Platform. To receive
more detailed output, specify the URI for the source or destination
network.
NON_GCP_NETWORK: A network hosted outside of Google Cloud Platform. This
can be an on-premises network, or a network hosted by another cloud
provider.
"""
NETWORK_TYPE_UNSPECIFIED = 0
GCP_NETWORK = 1
NON_GCP_NETWORK = 2
instance = _messages.StringField(1)
ipAddress = _messages.StringField(2)
network = _messages.StringField(3)
networkType = _messages.EnumField('NetworkTypeValueValuesEnum', 4)
port = _messages.IntegerField(5, variant=_messages.Variant.INT32)
projectId = _messages.StringField(6)
class EndpointInfo(_messages.Message):
r"""For display only. The specification of the endpoints for the test.
EndpointInfo is derived from source and destination Endpoint and validated
by the backend data plane model.
Fields:
destinationIp: Destination IP address.
destinationNetworkUri: URI of the network where this packet is sent to.
destinationPort: Destination port. Only valid when protocol is TCP or UDP.
protocol: IP protocol in string format, for example: "TCP", "UDP", "ICMP".
sourceIp: Source IP address.
sourceNetworkUri: URI of the network where this packet originates from.
sourcePort: Source port. Only valid when protocol is TCP or UDP.
"""
destinationIp = _messages.StringField(1)
destinationNetworkUri = _messages.StringField(2)
destinationPort = _messages.IntegerField(3, variant=_messages.Variant.INT32)
protocol = _messages.StringField(4)
sourceIp = _messages.StringField(5)
sourceNetworkUri = _messages.StringField(6)
sourcePort = _messages.IntegerField(7, variant=_messages.Variant.INT32)
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is
less than 100 chars" expression: "document.summary.size() < 100"
Example (Equality): title: "Requestor is owner" description:
"Determines if requestor is the document owner" expression:
"document.owner == request.auth.claims.email" Example (Logic): title:
"Public documents" description: "Determine whether the document should
be publicly visible" expression: "document.type != 'private' &&
document.type != 'internal'" Example (Data Manipulation): title:
"Notification string" description: "Create a notification string with a
timestamp." expression: "'New message received at ' +
string(document.create_time)" The exact variables and functions that may be
referenced within an expression are determined by the service that evaluates
it. See the service documentation for additional information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class FirewallInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine firewall
rule.
Fields:
action: Possible values: ALLOW, DENY
direction: Possible values: INGRESS, EGRESS
displayName: Name of a Compute Engine firewall rule.
networkUri: URI of a Compute Engine network.
priority: Priority of the firewall rule.
targetServiceAccounts: Target service accounts of the firewall rule.
targetTags: Target tags of the firewall rule.
uri: URI of a Compute Engine firewall rule. Implied default rule does not
have URI.
"""
action = _messages.StringField(1)
direction = _messages.StringField(2)
displayName = _messages.StringField(3)
networkUri = _messages.StringField(4)
priority = _messages.IntegerField(5, variant=_messages.Variant.INT32)
targetServiceAccounts = _messages.StringField(6, repeated=True)
targetTags = _messages.StringField(7, repeated=True)
uri = _messages.StringField(8)
class ForwardInfo(_messages.Message):
r"""Details of the final state "forward" and associated resource.
Enums:
TargetValueValuesEnum: Target type where this packet is forwarded to.
Fields:
resourceUri: URI of the resource that the packet is forwarded to.
target: Target type where this packet is forwarded to.
"""
class TargetValueValuesEnum(_messages.Enum):
r"""Target type where this packet is forwarded to.
Values:
TARGET_UNSPECIFIED: Target not specified.
PEERING_VPC: Forwarded to a VPC peering network.
VPN_GATEWAY: Forwarded to a Cloud VPN gateway.
INTERCONNECT: Forwarded to an Cloud Interconnect connection.
GKE_MASTER: Forwarded to a Google Kubernetes Engine Container cluster
master.
IMPORTED_CUSTOM_ROUTE_NEXT_HOP: Forwarded to the next hop of a custom
route imported from a peering VPC.
"""
TARGET_UNSPECIFIED = 0
PEERING_VPC = 1
VPN_GATEWAY = 2
INTERCONNECT = 3
GKE_MASTER = 4
IMPORTED_CUSTOM_ROUTE_NEXT_HOP = 5
resourceUri = _messages.StringField(1)
target = _messages.EnumField('TargetValueValuesEnum', 2)
class ForwardingRuleInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine forwarding
rule.
Fields:
displayName: Name of a Compute Engine forwarding rule.
matchedPortRange: Port range defined in the forwarding rule that matches
the test.
matchedProtocol: Protocol defined in the forwarding rule that matches the
test.
networkUri: Network URI. Only valid for Internal Load Balancer.
target: Target type of the forwarding rule.
uri: URI of a Compute Engine forwarding rule.
vip: VIP of the forwarding rule.
"""
displayName = _messages.StringField(1)
matchedPortRange = _messages.StringField(2)
matchedProtocol = _messages.StringField(3)
networkUri = _messages.StringField(4)
target = _messages.StringField(5)
uri = _messages.StringField(6)
vip = _messages.StringField(7)
class InstanceInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine instance.
Fields:
displayName: Name of a Compute Engine instance.
externalIp: External IP address of the network interface.
interface: Name of the network interface of a Compute Engine instance.
internalIp: Internal IP address of the network interface.
networkTags: Network tags configured on the instance.
networkUri: URI of a Compute Engine network.
serviceAccount: Service account authorized for the instance.
uri: URI of a Compute Engine instance.
"""
displayName = _messages.StringField(1)
externalIp = _messages.StringField(2)
interface = _messages.StringField(3)
internalIp = _messages.StringField(4)
networkTags = _messages.StringField(5, repeated=True)
networkUri = _messages.StringField(6)
serviceAccount = _messages.StringField(7)
uri = _messages.StringField(8)
class ListConnectivityTestsResponse(_messages.Message):
r"""Response for the `ListConnectivityTests` method.
Fields:
nextPageToken: Page token to fetch the next set of Connectivity Tests.
resources: List of Connectivity Tests.
unreachable: Locations that could not be reached (when querying all
locations with `-`).
"""
nextPageToken = _messages.StringField(1)
resources = _messages.MessageField('ConnectivityTest', 2, repeated=True)
unreachable = _messages.StringField(3, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class LoadBalancerBackend(_messages.Message):
r"""For display only. Metadata associated with a specific load balancer
backend.
Enums:
HealthCheckFirewallStateValueValuesEnum: State of the health check
firewall configuration.
Fields:
displayName: Name of a Compute Engine instance or network endpoint.
healthCheckAllowingFirewallRules: A list of firewall rule URIs allowing
probes from health check IP ranges.
healthCheckBlockingFirewallRules: A list of firewall rule URIs blocking
probes from health check IP ranges.
healthCheckFirewallState: State of the health check firewall
configuration.
uri: URI of a Compute Engine instance or network endpoint.
"""
class HealthCheckFirewallStateValueValuesEnum(_messages.Enum):
r"""State of the health check firewall configuration.
Values:
HEALTH_CHECK_FIREWALL_STATE_UNSPECIFIED: State is unspecified. Default
state if not populated.
CONFIGURED: There are configured firewall rules to allow health check
probes to the backend.
MISCONFIGURED: There are firewall rules configured to allow partial
health check ranges or block all health check ranges. If a health
check probe is sent from denied IP ranges, the health check to the
backend will fail. Then, the backend will be marked unhealthy and will
not receive traffic sent to the load balancer.
"""
HEALTH_CHECK_FIREWALL_STATE_UNSPECIFIED = 0
CONFIGURED = 1
MISCONFIGURED = 2
displayName = _messages.StringField(1)
healthCheckAllowingFirewallRules = _messages.StringField(2, repeated=True)
healthCheckBlockingFirewallRules = _messages.StringField(3, repeated=True)
healthCheckFirewallState = _messages.EnumField('HealthCheckFirewallStateValueValuesEnum', 4)
uri = _messages.StringField(5)
class LoadBalancerInfo(_messages.Message):
r"""For display only. Metadata associated with a load balancer.
Enums:
BackendTypeValueValuesEnum: Type of load balancer's backend configuration.
LoadBalancerTypeValueValuesEnum: Type of the load balancer.
Fields:
backendType: Type of load balancer's backend configuration.
backendUri: Backend configuration URI.
backends: Information for the loadbalancer backends.
healthCheckUri: URI of the health check for the load balancer.
loadBalancerType: Type of the load balancer.
"""
class BackendTypeValueValuesEnum(_messages.Enum):
r"""Type of load balancer's backend configuration.
Values:
BACKEND_TYPE_UNSPECIFIED: Type is unspecified.
BACKEND_SERVICE: Backend Service as the load balancer's backend.
TARGET_POOL: Target Pool as the load balancer's backend.
"""
BACKEND_TYPE_UNSPECIFIED = 0
BACKEND_SERVICE = 1
TARGET_POOL = 2
class LoadBalancerTypeValueValuesEnum(_messages.Enum):
r"""Type of the load balancer.
Values:
LOAD_BALANCER_TYPE_UNSPECIFIED: Type is unspecified.
INTERNAL_TCP_UDP: Internal TCP/UDP load balancer.
NETWORK_TCP_UDP: Network TCP/UDP load balancer.
HTTP_PROXY: HTTP(S) proxy load balancer.
TCP_PROXY: TCP proxy load balancer.
SSL_PROXY: SSL proxy load balancer.
"""
LOAD_BALANCER_TYPE_UNSPECIFIED = 0
INTERNAL_TCP_UDP = 1
NETWORK_TCP_UDP = 2
HTTP_PROXY = 3
TCP_PROXY = 4
SSL_PROXY = 5
backendType = _messages.EnumField('BackendTypeValueValuesEnum', 1)
backendUri = _messages.StringField(2)
backends = _messages.MessageField('LoadBalancerBackend', 3, repeated=True)
healthCheckUri = _messages.StringField(4)
loadBalancerType = _messages.EnumField('LoadBalancerTypeValueValuesEnum', 5)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class NetworkInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine network.
Fields:
displayName: Name of a Compute Engine network.
matchedIpRange: The IP range that matches the test.
uri: URI of a Compute Engine network.
"""
displayName = _messages.StringField(1)
matchedIpRange = _messages.StringField(2)
uri = _messages.StringField(3)
class NetworkmanagementProjectsLocationsGetRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsCreateRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsCreateRequest
object.
Fields:
connectivityTest: A ConnectivityTest resource to be passed as the request
body.
parent: Required. The parent resource of the Connectivity Test to create:
`projects/{project_id}/locations/global`
testId: Required. The logical name of the Connectivity Test in your
project with the following restrictions: * Must contain only lowercase
letters, numbers, and hyphens. * Must start with a letter. * Must be
between 1-40 characters. * Must end with a number or a letter. * Must be
unique within the customer project
"""
connectivityTest = _messages.MessageField('ConnectivityTest', 1)
parent = _messages.StringField(2, required=True)
testId = _messages.StringField(3)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsDeleteRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsDeleteRequest
object.
Fields:
name: Required. Connectivity Test resource name using the form:
`projects/{project_id}/locations/global/connectivityTests/{test_id}`
"""
name = _messages.StringField(1, required=True)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsGetIamPolicyRequest(_messages.Message):
r"""A
NetworkmanagementProjectsLocationsGlobalConnectivityTestsGetIamPolicyRequest
object.
Fields:
options_requestedPolicyVersion: Optional. The policy format version to be
returned. Valid values are 0, 1, and 3. Requests specifying an invalid
value will be rejected. Requests for policies with any conditional
bindings must specify version 3. Policies without any conditional
bindings may specify any valid value or leave the field unset. To learn
which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
options_requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resource = _messages.StringField(2, required=True)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsGetRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsGetRequest
object.
Fields:
name: Required. `ConnectivityTest` resource name using the form:
`projects/{project_id}/locations/global/connectivityTests/{test_id}`
"""
name = _messages.StringField(1, required=True)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsListRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsListRequest
object.
Fields:
filter: Lists the `ConnectivityTests` that match the filter expression. A
filter expression filters the resources listed in the response. The
expression must be of the form `<field> <operator> <value>` where
operators: `<`, `>`, `<=`, `>=`, `!=`, `=`, `:` are supported (colon `:`
represents a HAS operator which is roughly synonymous with equality).
<field> can refer to a proto or JSON field, or a synthetic field. Field
names can be camelCase or snake_case. Examples: - Filter by name:
name = "projects/proj-1/locations/global/connectivityTests/test-1 -
Filter by labels: - Resources that have a key called `foo`
labels.foo:* - Resources that have a key called `foo` whose value is
`bar` labels.foo = bar
orderBy: Field to use to sort the list.
pageSize: Number of `ConnectivityTests` to return.
pageToken: Page token from an earlier query, as returned in
`next_page_token`.
parent: Required. The parent resource of the Connectivity Tests:
`projects/{project_id}/locations/global`
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsPatchRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsPatchRequest
object.
Fields:
connectivityTest: A ConnectivityTest resource to be passed as the request
body.
name: Required. Unique name of the resource using the form:
`projects/{project_id}/locations/global/connectivityTests/{test}`
updateMask: Required. Mask of fields to update. At least one path must be
supplied in this field.
"""
connectivityTest = _messages.MessageField('ConnectivityTest', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsRerunRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsRerunRequest
object.
Fields:
name: Required. Connectivity Test resource name using the form:
`projects/{project_id}/locations/global/connectivityTests/{test_id}`
rerunConnectivityTestRequest: A RerunConnectivityTestRequest resource to
be passed as the request body.
"""
name = _messages.StringField(1, required=True)
rerunConnectivityTestRequest = _messages.MessageField('RerunConnectivityTestRequest', 2)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsSetIamPolicyRequest(_messages.Message):
r"""A
NetworkmanagementProjectsLocationsGlobalConnectivityTestsSetIamPolicyRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class NetworkmanagementProjectsLocationsGlobalConnectivityTestsTestIamPermissionsRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalConnectivityTestsTestIamPermis
sionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class NetworkmanagementProjectsLocationsGlobalOperationsCancelRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalOperationsCancelRequest
object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class NetworkmanagementProjectsLocationsGlobalOperationsDeleteRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalOperationsDeleteRequest
object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class NetworkmanagementProjectsLocationsGlobalOperationsGetRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class NetworkmanagementProjectsLocationsGlobalOperationsListRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsGlobalOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class NetworkmanagementProjectsLocationsListRequest(_messages.Message):
r"""A NetworkmanagementProjectsLocationsListRequest object.
Fields:
filter: The standard list filter.
name: The resource that owns the locations collection, if applicable.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
r"""Metadata describing an Operation
Fields:
apiVersion: API version.
cancelRequested: Specifies if cancellation was requested for the
operation.
createTime: The time the operation was created.
endTime: The time the operation finished running.
statusDetail: Human-readable status of the operation, if any.
target: Target of the operation - for example
projects/project-1/locations/global/connectivityTests/test-1
verb: Name of the verb executed by the operation.
"""
apiVersion = _messages.StringField(1)
cancelRequested = _messages.BooleanField(2)
createTime = _messages.StringField(3)
endTime = _messages.StringField(4)
statusDetail = _messages.StringField(5)
target = _messages.StringField(6)
verb = _messages.StringField(7)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members` to a single `role`.
Members can be user accounts, service accounts, Google groups, and domains
(such as G Suite). A `role` is a named list of permissions; each `role` can
be an IAM predefined role or a user-created custom role. For some types of
Google Cloud resources, a `binding` can also specify a `condition`, which is
a logical expression that allows access to a resource only if the expression
evaluates to `true`. A condition can add constraints based on attributes of
the request, the resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** { "bindings": [ {
"role": "roles/resourcemanager.organizationAdmin", "members": [
"user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-project-
[email protected]" ] }, {
"role": "roles/resourcemanager.organizationViewer", "members": [
"user:[email protected]" ], "condition": {
"title": "expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ],
"etag": "BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:[email protected] -
group:[email protected] - domain:google.com -
serviceAccount:[email protected] role:
roles/resourcemanager.organizationAdmin - members: -
user:[email protected] role: roles/resourcemanager.organizationViewer
condition: title: expirable access description: Does not
grant access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= -
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. Optionally, may
specify a `condition` that determines how and when the `bindings` are
applied. Each of the `bindings` must contain at least one member.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class ReachabilityDetails(_messages.Message):
r"""The details of reachability state from the latest run.
Enums:
ResultValueValuesEnum: The overall reachability result of the test.
Fields:
error: The details of a failure or a cancellation of reachability
analysis.
result: The overall reachability result of the test.
traces: Result may contain a list of traces if a test has multiple
possible paths in the network, such as when destination endpoint is a
load balancer with multiple backends.
verifyTime: The time the reachability state was verified.
"""
class ResultValueValuesEnum(_messages.Enum):
r"""The overall reachability result of the test.
Values:
RESULT_UNSPECIFIED: Result is not specified.
REACHABLE: Packet originating from source is expected to reach
destination.
UNREACHABLE: Packet originating from source is expected to be dropped
before reaching destination.
AMBIGUOUS: If the source and destination endpoint does not uniquely
identify the test location in the network, and the reachability result
contains multiple traces with mixed reachable and unreachable states,
then this result is returned.
UNDETERMINED: The reachability could not be determined. Possible reasons
are: * Analysis is aborted due to permission error. User does not
have read permission to the projects listed in the test. * Analysis
is aborted due to internal errors. * Analysis is partially complete
based on configurations where the user has permission. The Final
state indicates that the packet is forwarded to another network where
the user has no permission to access the configurations.
"""
RESULT_UNSPECIFIED = 0
REACHABLE = 1
UNREACHABLE = 2
AMBIGUOUS = 3
UNDETERMINED = 4
error = _messages.MessageField('Status', 1)
result = _messages.EnumField('ResultValueValuesEnum', 2)
traces = _messages.MessageField('Trace', 3, repeated=True)
verifyTime = _messages.StringField(4)
class RerunConnectivityTestRequest(_messages.Message):
r"""Request for the `RerunConnectivityTest` method."""
class RouteInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine route.
Enums:
NextHopTypeValueValuesEnum: Type of next hop.
RouteTypeValueValuesEnum: Type of route.
Fields:
destIpRange: Destination IP range of the route.
displayName: Name of a Compute Engine route.
instanceTags: Instance tags of the route.
networkUri: URI of a Compute Engine network.
nextHop: Next hop of the route.
nextHopType: Type of next hop.
priority: Priority of the route.
routeType: Type of route.
uri: URI of a Compute Engine route. Dynamic route from cloud router does
not have a URI. Advertised route from Google Cloud VPC to on-premises
network also does not have a URI.
"""
class NextHopTypeValueValuesEnum(_messages.Enum):
r"""Type of next hop.
Values:
NEXT_HOP_TYPE_UNSPECIFIED: Unspecified type. Default value.
NEXT_HOP_IP: Next hop is an IP address.
NEXT_HOP_INSTANCE: Next hop is a Compute Engine instance.
NEXT_HOP_NETWORK: Next hop is a VPC network gateway.
NEXT_HOP_PEERING: Next hop is a peering VPC.
NEXT_HOP_INTERCONNECT: Next hop is an interconnect.
NEXT_HOP_VPN_TUNNEL: Next hop is a VPN tunnel.
NEXT_HOP_VPN_GATEWAY: Next hop is a VPN Gateway. This scenario only
happens when tracing connectivity from an on-premises network to GCP
through a VPN. The analysis simulates a packet departing from the on-
premises network through a VPN tunnel and arrives at a Cloud VPN
gateway.
NEXT_HOP_INTERNET_GATEWAY: Next hop is an internet gateway.
NEXT_HOP_BLACKHOLE: Next hop is blackhole; that is, the next hop either
does not exist or is not running.
NEXT_HOP_ILB: Next hop is the forwarding rule of an Internal Load
Balancer.
"""
NEXT_HOP_TYPE_UNSPECIFIED = 0
NEXT_HOP_IP = 1
NEXT_HOP_INSTANCE = 2
NEXT_HOP_NETWORK = 3
NEXT_HOP_PEERING = 4
NEXT_HOP_INTERCONNECT = 5
NEXT_HOP_VPN_TUNNEL = 6
NEXT_HOP_VPN_GATEWAY = 7
NEXT_HOP_INTERNET_GATEWAY = 8
NEXT_HOP_BLACKHOLE = 9
NEXT_HOP_ILB = 10
class RouteTypeValueValuesEnum(_messages.Enum):
r"""Type of route.
Values:
ROUTE_TYPE_UNSPECIFIED: Unspecified type. Default value.
SUBNET: Route is a subnet route automatically created by the system.
STATIC: Static route created by the user including the default route to
the Internet.
DYNAMIC: Dynamic route exchanged between BGP peers.
PEERING_SUBNET: A subnet route received from peering network.
PEERING_STATIC: A static route received from peering network.
PEERING_DYNAMIC: A dynamic route received from peering network.
"""
ROUTE_TYPE_UNSPECIFIED = 0
SUBNET = 1
STATIC = 2
DYNAMIC = 3
PEERING_SUBNET = 4
PEERING_STATIC = 5
PEERING_DYNAMIC = 6
destIpRange = _messages.StringField(1)
displayName = _messages.StringField(2)
instanceTags = _messages.StringField(3, repeated=True)
networkUri = _messages.StringField(4)
nextHop = _messages.StringField(5)
nextHopType = _messages.EnumField('NextHopTypeValueValuesEnum', 6)
priority = _messages.IntegerField(7, variant=_messages.Variant.INT32)
routeType = _messages.EnumField('RouteTypeValueValuesEnum', 8)
uri = _messages.StringField(9)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: `paths: "bindings, etag"`
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Step(_messages.Message):
r"""A simulated forwarding path is composed of multiple steps. Each step has
a well-defined state and an associated configuration.
Enums:
StateValueValuesEnum: Each step is in one of the pre-defined states.
Fields:
abort: Display info of the final state "abort" and reason.
causesDrop: This is a step that leads to the final state Drop.
deliver: Display info of the final state "deliver" and reason.
description: A description of the step. Usually this is a summary of the
state.
drop: Display info of the final state "drop" and reason.
endpoint: Display info of the source and destination under analysis. The
endpiont info in an intermediate state may differ with the initial
input, as it might be modified by state like NAT, or Connection Proxy.
firewall: Display info of a Compute Engine firewall rule.
forward: Display info of the final state "forward" and reason.
forwardingRule: Display info of a Compute Engine forwarding rule.
instance: Display info of a Compute Engine instance.
loadBalancer: Display info of the load balancers.
network: Display info of a GCP network.
projectId: Project ID that contains the configuration this step is
validating.
route: Display info of a Compute Engine route.
state: Each step is in one of the pre-defined states.
vpnGateway: Display info of a Compute Engine VPN gateway.
vpnTunnel: Display info of a Compute Engine VPN tunnel.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Each step is in one of the pre-defined states.
Values:
STATE_UNSPECIFIED: Unspecified state.
START_FROM_INSTANCE: Initial state: packet originating from a Compute
Engine instance. An InstanceInfo will be populated with starting
instance info.
START_FROM_INTERNET: Initial state: packet originating from Internet.
The endpoint info will be populated.
START_FROM_PRIVATE_NETWORK: Initial state: packet originating from a VPC
or on-premises network with internal source IP. If the source is a VPC
network visible to the user, a NetworkInfo will be populated with
details of the network.
APPLY_INGRESS_FIREWALL_RULE: Config checking state: verify ingress
firewall rule.
APPLY_EGRESS_FIREWALL_RULE: Config checking state: verify egress
firewall rule.
APPLY_ROUTE: Config checking state: verify route.
APPLY_FORWARDING_RULE: Config checking state: match forwarding rule.
SPOOFING_APPROVED: Config checking state: packet sent or received under
foreign IP address and allowed.
ARRIVE_AT_INSTANCE: Forwarding state: arriving at a Compute Engine
instance.
ARRIVE_AT_INTERNAL_LOAD_BALANCER: Forwarding state: arriving at a
Compute Engine internal load balancer.
ARRIVE_AT_EXTERNAL_LOAD_BALANCER: Forwarding state: arriving at a
Compute Engine external load balancer.
ARRIVE_AT_VPN_GATEWAY: Forwarding state: arriving at a Cloud VPN
gateway.
ARRIVE_AT_VPN_TUNNEL: Forwarding state: arriving at a Cloud VPN tunnel.
NAT: Transition state: packet header translated.
PROXY_CONNECTION: Transition state: original connection is terminated
and a new proxied connection is initiated.
DELIVER: Final state: packet delivered.
DROP: Final state: packet dropped.
FORWARD: Final state: packet forwarded to a network with an unknown
configuration.
ABORT: Final state: analysis is aborted.
VIEWER_PERMISSION_MISSING: Special state: viewer of the test result does
not have permission to see the configuration in this step.
"""
STATE_UNSPECIFIED = 0
START_FROM_INSTANCE = 1
START_FROM_INTERNET = 2
START_FROM_PRIVATE_NETWORK = 3
APPLY_INGRESS_FIREWALL_RULE = 4
APPLY_EGRESS_FIREWALL_RULE = 5
APPLY_ROUTE = 6
APPLY_FORWARDING_RULE = 7
SPOOFING_APPROVED = 8
ARRIVE_AT_INSTANCE = 9
ARRIVE_AT_INTERNAL_LOAD_BALANCER = 10
ARRIVE_AT_EXTERNAL_LOAD_BALANCER = 11
ARRIVE_AT_VPN_GATEWAY = 12
ARRIVE_AT_VPN_TUNNEL = 13
NAT = 14
PROXY_CONNECTION = 15
DELIVER = 16
DROP = 17
FORWARD = 18
ABORT = 19
VIEWER_PERMISSION_MISSING = 20
abort = _messages.MessageField('AbortInfo', 1)
causesDrop = _messages.BooleanField(2)
deliver = _messages.MessageField('DeliverInfo', 3)
description = _messages.StringField(4)
drop = _messages.MessageField('DropInfo', 5)
endpoint = _messages.MessageField('EndpointInfo', 6)
firewall = _messages.MessageField('FirewallInfo', 7)
forward = _messages.MessageField('ForwardInfo', 8)
forwardingRule = _messages.MessageField('ForwardingRuleInfo', 9)
instance = _messages.MessageField('InstanceInfo', 10)
loadBalancer = _messages.MessageField('LoadBalancerInfo', 11)
network = _messages.MessageField('NetworkInfo', 12)
projectId = _messages.StringField(13)
route = _messages.MessageField('RouteInfo', 14)
state = _messages.EnumField('StateValueValuesEnum', 15)
vpnGateway = _messages.MessageField('VpnGatewayInfo', 16)
vpnTunnel = _messages.MessageField('VpnTunnelInfo', 17)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class Trace(_messages.Message):
r"""Trace represents one simulated packet forwarding path. <ul> <li>Each
trace contains multiple ordered steps.</li> <li>Each step is in a
particular state and has an associated configuration.</li> <li>State is
categorized as a final or non-final state.</li> <li>Each final state has a
reason associated with it.</li> <li>Each trace must end with a final state
(the last step).</li> </ul> <pre><code>
|---------------------Trace----------------------| Step1(State)
Step2(State) --- StepN(State(final)) </code></pre>
Fields:
endpointInfo: Derived from the source and destination endpoints
definition, and validated by the data plane model. If there are multiple
traces starting from different source locations, then the endpoint_info
may be different between traces.
steps: A trace of a test contains multiple steps from the initial state to
the final state (delivered, dropped, forwarded, or aborted). The steps
are ordered by the processing sequence within the simulated network
state machine. It is critical to preserve the order of the steps and
avoid reordering or sorting them.
"""
endpointInfo = _messages.MessageField('EndpointInfo', 1)
steps = _messages.MessageField('Step', 2, repeated=True)
class VpnGatewayInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine VPN gateway.
Fields:
displayName: Name of a VPN gateway.
ipAddress: IP address of the VPN gateway.
networkUri: URI of a Compute Engine network where the VPN gateway is
configured.
region: Name of a GCP region where this VPN gateway is configured.
uri: URI of a VPN gateway.
vpnTunnelUri: A VPN tunnel that is associated with this VPN gateway. There
may be multiple VPN tunnels configured on a VPN gateway, and only the
one relevant to the test is displayed.
"""
displayName = _messages.StringField(1)
ipAddress = _messages.StringField(2)
networkUri = _messages.StringField(3)
region = _messages.StringField(4)
uri = _messages.StringField(5)
vpnTunnelUri = _messages.StringField(6)
class VpnTunnelInfo(_messages.Message):
r"""For display only. Metadata associated with a Compute Engine VPN tunnel.
Enums:
RoutingTypeValueValuesEnum: Type of the routing policy.
Fields:
displayName: Name of a VPN tunnel.
networkUri: URI of a Compute Engine network where the VPN tunnel is
configured.
region: Name of a GCP region where this VPN tunnel is configured.
remoteGateway: URI of a VPN gateway at remote end of the tunnel.
remoteGatewayIp: Remote VPN gateway's IP address.
routingType: Type of the routing policy.
sourceGateway: URI of the VPN gateway at local end of the tunnel.
sourceGatewayIp: Local VPN gateway's IP address.
uri: URI of a VPN tunnel.
"""
class RoutingTypeValueValuesEnum(_messages.Enum):
r"""Type of the routing policy.
Values:
ROUTING_TYPE_UNSPECIFIED: Unspecified type. Default value.
ROUTE_BASED: Route based VPN.
POLICY_BASED: Policy based routing.
DYNAMIC: Dynamic (BGP) routing.
"""
ROUTING_TYPE_UNSPECIFIED = 0
ROUTE_BASED = 1
POLICY_BASED = 2
DYNAMIC = 3
displayName = _messages.StringField(1)
networkUri = _messages.StringField(2)
region = _messages.StringField(3)
remoteGateway = _messages.StringField(4)
remoteGatewayIp = _messages.StringField(5)
routingType = _messages.EnumField('RoutingTypeValueValuesEnum', 6)
sourceGateway = _messages.StringField(7)
sourceGatewayIp = _messages.StringField(8)
uri = _messages.StringField(9)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"[email protected]"
]
| |
07d011bae38803dbb97917a30438280459506518 | 8cfcfc59bfcf4255954ebac81d6bb9c183c407e7 | /orders/migrations/0006_alter_person_user.py | 87ebf260909b8dc6fb3d3e1eb129ed4193b405b7 | []
| no_license | elisaZeneli/OrderPlacement | 4928e61e1a60ea992571709e526c50ce4c8deffe | 581d06ec4ddab9a0afe9bdc28d9efe8e8d801b87 | refs/heads/main | 2023-07-24T13:37:04.613726 | 2021-07-16T20:44:26 | 2021-07-16T20:44:26 | 386,264,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 3.2.5 on 2021-07-15 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_alter_person_user'),
]
operations = [
migrations.AlterField(
model_name='person',
name='user',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
]
| |
abb6ccb0fe0cc0e772bd2d54cb73fd93c0131ac4 | 5fee6afe91711fbb1ca87845f502776fbfab7851 | /examples/contoursSSEDemo.py | b62444dff30e3d02184741940dc6fbfc1a7a247c | [
"MIT"
]
| permissive | chenxofhit/pyprobml | f66ad4c1186f0ba22e520e14700ac0bd6fee400d | fe48d6111bd121e01cfbdefe3361a993fa14abe1 | refs/heads/master | 2021-01-24T09:39:29.828935 | 2016-09-17T03:34:59 | 2016-09-17T03:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | #!/usr/bin/env python3
# Error surface for linear regression model.
import matplotlib.pyplot as pl
import numpy as np
import utils.util as util
from mpl_toolkits.mplot3d import Axes3D
def contoursSSEDemo():
N = 21
x,y,_,_,_,_ = util.poly_data_make(sampling='thibaux', n=N)
X = util.add_ones(x)
return X,y
if __name__ == '__main__':
X,y = contoursSSEDemo()
N = len(y)
w = np.linalg.lstsq(X, y)[0]
v = np.arange(-6, 6, .1)
W0, W1 = np.meshgrid(v, v)
SS = np.array([sum((w0*X[:,0] + w1*X[:,1] - y)**2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])
SS = SS.reshape(W0.shape)
fig = pl.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(W0, W1, SS)
pl.savefig('linregSurfSSE.png')
pl.show()
fig,ax = pl.subplots()
ax.set_title('Sum of squares error contours for linear regression')
CS = pl.contour(W0, W1, SS)
pl.plot([-4.351],[0.5377],'x')
pl.savefig('linregContoursSSE.png')
pl.show()
| [
"[email protected]"
]
| |
d244d27e34895e0c317a9a52c2c75875afd40e4a | a5a71451ad2380f63a81e50ff119a17e1019d1f6 | /tests/test_get_finder/tests.py | 8849328d27e08d36f7ffcdd5a3217478a08c8cf0 | [
"ISC"
]
| permissive | gears/django-gears | 3a3ed5c7444771d7f14dd33d8655189d06a66143 | 80a5cb085c5c4edac481228765edcfda3d6309f7 | refs/heads/develop | 2021-01-01T19:24:02.617522 | 2015-01-21T08:00:18 | 2015-01-21T08:00:18 | 2,822,397 | 15 | 3 | ISC | 2023-08-23T00:22:29 | 2011-11-21T19:31:28 | Python | UTF-8 | Python | false | false | 581 | py | from __future__ import with_statement
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django_gears.utils import get_finder
from . import finders
class GetFinderTests(TestCase):
def test_if_it_is_a_subclass_of_base_finder(self):
finder = get_finder('test_get_finder.finders.GoodFinder')
self.assertIsInstance(finder, finders.GoodFinder)
def test_if_it_is_not_a_subclass_of_base_finder(self):
with self.assertRaises(ImproperlyConfigured):
get_finder('test_get_finder.finders.BadFinder')
| [
"[email protected]"
]
| |
62bcdafd6fdf8fe92d355ad201ca963132513bd9 | 074421d31af92ae29c7c78bdb7e50f199a38eb9b | /weixin/code/rfid_plt/base_platform/cluster/cluster_thread.py | d1a29d36f26b56567f9b378b4d51e150ccca9123 | []
| no_license | allenforrest/wxbiz | 3f49ce66b37e281fc375f548610aa54a0f73268f | e78df71fbc5d73dd93ba9452d4b54183fe1e7e1f | refs/heads/master | 2016-09-06T15:17:49.420934 | 2013-08-05T13:13:40 | 2013-08-05T13:13:40 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 34,724 | py | #coding=gbk
"""
Copyright (C), 2012-2015, Anything Connected Possibilities
Author: ACP2013
Version: 1.0
Date:
Description: 本文件中实现了维护集群节点状态的线程
Others:
Key Class&Method List:
1. ClusterServerEventHandler: 节点间通过callacp通信时使用的EventHandler
2. MasterNodeInfo: master的节点信息
3. ClusterThread: 维护集群节点状态的线程
History:
1. Date:
Author:
Modification:
"""
import os.path
import threading
import copy
import time
import multiprocessing
import tracelog
import pycallacp
import err_code_mgr
import utility
from cluster_const import *
from cluster.cluster_node_info import ClusterNodeInfo
from cluster.cluster_mit import ClusterMit
from cluster import cluster_cmd_code
from cluster.cluster_struct_def import ClusterStateMsg
from cluster.virtual_ip import bind_virtual_ip, unbind_virtual_ip
class ClusterServerEventHandler(pycallacp.AcpEventHandler):
"""
Class: ClusterServerEventHandler
Description: 节点间通过callacp通信时使用的EventHandler
Base:
Others:
"""
def __init__(self, cluster_thread):
"""
Method: __init__
Description: 构造函数
Parameter:
cluster_thread: ClusterThread对象
Return:
Others:
"""
pycallacp.AcpEventHandler.__init__(self)
self.__cluster_thread = cluster_thread
def on_msg_received(self, url_or_srv_name, msg):
"""
Method: on_msg_received
Description: "收到消息"的处理接口
Parameter:
url_or_srv_name: 消息发送者的url
msg: 消息
Return:
Others:
"""
#print "on_msg_received", msg.get_cmd_code()
cmd_code = msg.get_cmd_code()
if (cmd_code != cluster_cmd_code.CMD_CLUSTER_QUERY_STATE
and cmd_code != cluster_cmd_code.CMD_CLUSTER_ACK_STATE):
tracelog.error("ClusterServerEventHandler receved invalid msg:%d" % cmd_code)
return
try:
state_msg = ClusterStateMsg.deserialize(msg.get_data())
if state_msg is None:
tracelog.error("ClusterStateMsg.deserialize failed. "
"msg:%d, %r" % (cmd_code, msg.get_data()))
return
if cmd_code == cluster_cmd_code.CMD_CLUSTER_QUERY_STATE:
self.__cluster_thread.on_query_state(url_or_srv_name, state_msg)
elif cmd_code == cluster_cmd_code.CMD_CLUSTER_ACK_STATE:
self.__cluster_thread.on_ack_state(state_msg)
except:
tracelog.exception("handler msg(%d) failed" % cmd_code)
class MasterNodeInfo:
"""
Class: MasterNodeInfo
Description: master的节点信息
Base:
Others:
"""
def __init__(self):
"""
Method: __init__
Description:
Parameter: 无
Return:
Others:
master_ip: master的ip
start_time: master的启动时间
"""
self.__master_ip = ""
self.__start_time = ""
def update(self, ip, start_time):
"""
Method: update
Description: 更新master的ip和启动时间
Parameter:
ip: master的ip
start_time: master的启动时间
Return: master的信息是否发生了变化
Others:
"""
is_change = False
if self.__master_ip != ip or self.__start_time != start_time:
is_change = True
self.__master_ip = ip
self.__start_time = start_time
return is_change
def get_ip(self):
"""
Method: get_ip
Description: 获取master的IP
Parameter: 无
Return: master的IP
Others:
"""
return self.__master_ip
class ClusterThread(threading.Thread):
"""
Class: ClusterThread
Description: 维护集群节点状态的线程
Base: threading.Thread
Others:
"""
def __init__(self, cluster_node):
"""
Method: __init__
Description: 维护当前集群节点状态的线程
Parameter:
cluster_node: ClusterNode的对象实例
Return:
Others:
"""
threading.Thread.__init__(self)
self.daemon = True # 跟随主线程一起退出
self.__cluster_node = cluster_node
# 用于集群之间通信的callacp实例
self.__callacp_srv = None
self.__callacp_client = None
# 集群的配置信息
self.__cluster_cfg_info = None
# 其他的所有节点
self.__other_nodes = []
# 通过mit获取集群节点的信息
self.__mit = None
# 软件安装的根目录
self.__app_top_path = ""
self.__lock = threading.RLock()
# 发送心跳查询命令的计数器
self.__query_counter = 0
# 当前的角色
self.__role = CLUSTER_ROLE_UNKNOWN
# 当前的状态
self.__state = CLUSTER_STATE_STARTING
# 是否已经被停止
self.__stoped_event = multiprocessing.Event()
# master的信息
self.__mater_node_info = MasterNodeInfo()
# 启动的时间
self.__start_time = str(time.time())
def __clear(self):
"""
Method: __clear
Description: 情况内部的数据
Parameter: 无
Return:
Others:
"""
if self.__callacp_srv is not None:
self.__callacp_srv.clear()
self.__callacp_srv = None
if self.__callacp_client is not None:
self.__callacp_client.clear()
self.__callacp_client = None
self.__cluster_cfg_info = None
self.__other_nodes = []
if self.__mit is not None:
self.__mit.close()
self.__mit = None
self.__app_top_path = ""
def is_master(self):
"""
Method: is_master
Description: 判断当前角色是否是master
Parameter: 无
Return: 当前角色是否是master
Others:
"""
with self.__lock:
return self.__role == CLUSTER_ROLE_MASTER
def is_only_master(self):
"""
Method: is_only_master
Description: 判断当前节点的角色是否是mater,并且仅有master节点没有其他slave节点
Parameter: 无
Return: 当前的角色是否是mater,并且仅有master节点没有其他slave节点
Others:
"""
with self.__lock:
return self.__state == CLUSTER_STATE_ONLY_MASTER
def is_slave(self):
"""
Method: is_slave
Description: 判断当前的节点角色是否是slave
Parameter: 无
Return: 当前的节点角色是否是slave
Others:
"""
with self.__lock:
return self.__role == CLUSTER_ROLE_SLAVE
def get_role(self):
"""
Method: get_role
Description: 获取当前节点的角色
Parameter: 无
Return: 当前节点的角色
Others:
"""
with self.__lock:
return self.__role
def get_master_ip(self):
"""
Method: get_master_ip
Description: 获取master的ip
Parameter: 无
Return: master的ip
Others:
"""
return self.__mater_node_info.get_ip()
def get_all_nodes(self):
"""
Method: get_all_nodes
Description: 获取所有的节点信息
Parameter: 无
Return: 所有的节点信息
Others:
"""
# 获取所有的nodes的信息
with self.__lock:
all_nodes = copy.deepcopy(self.__other_nodes)
myself = ClusterNodeInfo(self.__cluster_cfg_info.my_inner_ip)
myself.set_role(self.__role)
myself.set_online(True)
all_nodes.append(myself)
return all_nodes
def rmv_node(self, ip):
"""
Method: rmv_node
Description: 删除指定的节点
Parameter:
ip: 指定的节点的ip
Return: 错误码,错误信息
Others:
"""
online_err = (err_code_mgr.ER_CANNOT_RMV_ONLINE_CLUSTER_NODE
, err_code_mgr.get_error_msg(err_code_mgr.ER_CANNOT_RMV_ONLINE_CLUSTER_NODE))
with self.__lock:
# 只允许删除离线的节点
if ip == self.__cluster_cfg_info.my_inner_ip:
return online_err
for node in self.__other_nodes:
if node.get_ip() == ip and node.is_online():
return online_err
# 先删除mit中的信息
ret_code, err_msg = self.__mit.rmv_node(ip)
if ret_code == 0:
# 删除内存中的信息
self.__rmv_node(ip)
tracelog.info("remvoe node %s." % ip)
else:
tracelog.error("remvoe node %s failed." % ip)
return ret_code, err_msg
def initial_cluster(self, cluster_cfg_info, app_top_path):
"""
Method: initial_cluster
Description: 初始化集群
Parameter:
cluster_cfg_info: 集群的配置信息
app_top_path: 软件安装的根目录
Return: 错误码
Others:
"""
# out_NIC: 外网网卡 outer Network Interface Card
self.__clear()
self.__cluster_cfg_info = cluster_cfg_info
self.__app_top_path = app_top_path
# 启动callacp的服务端和客户端
self.__callacp_srv = pycallacp.CallAcpServer()
self.__callacp_client = pycallacp.CallAcpClient()
self.__callacp_srv.set_event_handler(ClusterServerEventHandler(self))
self.__callacp_client.set_event_handler(ClusterServerEventHandler(self))
self.__callacp_srv.set_msg_buf_max_num(3)
self.__callacp_client.set_msg_buf_max_num(3)
my_ip = cluster_cfg_info.my_inner_ip
ret_code = self.__callacp_srv.bind(my_ip, CLUSTER_LISTEN_PORT)
if ret_code != 0:
tracelog.error("cluster: listen on (%s, %d) failed." % (
my_ip
, CLUSTER_LISTEN_PORT))
return ret_code
else:
tracelog.info("cluster: listen on (%s, %d) ok." % (
my_ip
, CLUSTER_LISTEN_PORT))
# 重新加载
ret_code, cur_node = self.reload_nodes(True)
if ret_code != 0:
tracelog.error("load cluster nodes from DB failed. ret:%d" % ret_code)
return ret_code
# 判断自己是否已经存在于DB中,如果不存在则插入DB
if cur_node is None:
# 如果当前集群节点已经达到了最大个数,那么就返回失败
if len(self.__other_nodes) >= cluster_cfg_info.max_nodes_num:
tracelog.error("The number of cluster nodes has reached the "
"maximum(%d)" % cluster_cfg_info.max_nodes_num)
return err_code_mgr.ER_CLUSTER_REACH_MAX
ret_code = self.__mit.save_node(my_ip, True)
if ret_code != 0:
tracelog.error("save current nodes to DB failed. ret:%d" % ret_code)
return ret_code
else:
# 判断自己是否被禁用了
if not cur_node.is_enable():
tracelog.error("the current node is disabled, can not start.")
return err_code_mgr.ER_CLUSTER_IS_DISABLED
# 绑定网卡虚拟ip
self.__unbind_virtual_ip(False)
return ret_code
def stop_cluster(self):
"""
Method: stop_cluster
Description: 停止当前的节点
Parameter: 无
Return: 无
Others:
"""
# 停止当前节点
self.__stoped_event.set()
# 取消ip绑定
if self.is_master():
self.__unbind_virtual_ip(True)
def is_node_prior(self, node):
"""
Method: is_node_prior
Description: 判断指定的节点的判决优先级,是否比当前节点的优先级高
Parameter:
node: 待比较的节点
Return: 指定的节点的判决优先级,是否比当前节点的优先级高
Others:
"""
# 节点node的优先级是否比当前节点高
return node.get_ip() < self.__cluster_cfg_info.my_inner_ip
def __get_url(self, node_ip):
"""
Method: __get_url
Description: 根据ip获取节点的url
Parameter:
node_ip: 节点的ip
Return: 节点的url
Others:
"""
return "tcp://%s:%d" %(node_ip, CLUSTER_LISTEN_PORT)
def __add_node(self, node_ip, is_enable):
"""
Method: __add_node
Description: 增加节点
Parameter:
node_ip: 节点的ip
is_enable: 是否启用了
Return: 错误码
Others:
"""
ret = 0
if node_ip == self.__cluster_cfg_info.my_inner_ip:
return ret
for node in self.__other_nodes:
if node.get_ip() == node_ip:
break
else:
node_info = ClusterNodeInfo(node_ip)
if is_enable == 0:
node_info.set_enable(False)
self.__other_nodes.append(node_info)
url = self.__get_url(node_ip)
ret = self.__callacp_client.new_connect(url)
if ret != 0:
tracelog.error("new connection to cluster node failed. %s" % url)
return ret
def __rmv_node(self, node_ip):
"""
Method: __rmv_node
Description: 删除节点
Parameter:
node_ip: 节点的ip
Return:
Others:
"""
for i, node in enumerate(self.__other_nodes):
if node.get_ip() != node_ip:
continue
self.__other_nodes.pop(i)
url = self.__get_url(node_ip)
self.__callacp_client.rmv_connect(url)
def reload_nodes(self, log_all_nodes = False):
"""
Method: reload_nodes
Description: 从数据库中重新记载节点信息
Parameter:
log_all_nodes: 是否将所有的节点信息记录日志
Return: 错误码,当前的节点信息
Others:
"""
# 从DB中读取所有节点的信息
# 返回值: 错误码, 当前node
cur_node = None
with self.__lock:
if self.__mit is None:
try:
db_file = os.path.join(self.__app_top_path, "data", "sqlite", "cluster.db")
self.__mit = ClusterMit(db_file)
except:
tracelog.exception("reload cluster node failed.")
return err_code_mgr.ER_CLUSTER_START_FAILED, None
# 加载所有的节点信息
other_nodes_ips = set([node.get_ip() for node in self.__other_nodes])
nodes = self.__mit.get_all_nodes()
for node in nodes:
if node.ip == self.__cluster_cfg_info.my_inner_ip:
cur_node = ClusterNodeInfo(node.ip)
if node.is_enable == 0:
cur_node.set_enable(False)
else:
other_nodes_ips.discard(node.ip)
ret = self.__add_node(node.ip, node.is_enable)
if ret != 0:
tracelog.error("add cluster node %s failed." % node.ip)
return err_code_mgr.ER_CLUSTER_START_FAILED, None
if log_all_nodes is True:
tracelog.info("load cluster node: %s" % node.ip)
# 删除已经不存在的节点
for node_ip in other_nodes_ips:
self.__rmv_node(node_ip)
return 0, cur_node
def __when_starting(self):
"""
Method: __when_starting
Description: 当处于启动中的处理函数
Parameter: 无
Return:
Others:
"""
has_other_enable_nodes = False
has_other_online_nodes = False
# 检查是否有节点返回了应答消息
for node in self.__other_nodes:
if not node.is_enable():
continue
has_other_enable_nodes = True
if node.is_online():
# 以slave方式启动
has_other_online_nodes = True
if node.is_role_master() and self.__mater_node_info.get_ip() == "":
self.__mater_node_info.update(node.get_ip(), node.get_start_time())
if has_other_online_nodes is True:
self.__start_with_slave()
return
if has_other_enable_nodes is False:
# 没有其他可用的节点,以master方式启动
tracelog.info("the current cluster node is the only enabled node.")
self.__start_with_master(CLUSTER_STATE_ONLY_MASTER)
return
# 如果计数器少于CLUSTER_JUDGE_STATE_HAERTBEAT,则继续发送查询命令
# 否则,以master方式启动
if self.__query_counter < CLUSTER_JUDGE_STATE_HAERTBEAT:
self.__query_other_node_state()
else:
tracelog.info("other cluster nodes didn't respond for state query command.")
self.__start_with_master(CLUSTER_STATE_ONLY_MASTER)
def __when_now_master(self):
"""
Method: __when_now_master
Description: 当处于master的处理函数
Parameter: 无
Return:
Others:
"""
is_any_other_node_online = False
with self.__lock:
for node in self.__other_nodes:
if not node.is_enable():
continue
# 检查节点的状态变更
state_change = node.fetch_change_flag()
if state_change == CLUSTER_NODE_STATE_CHANGE_ONLINE:
tracelog.info("cluster node %s is online" % node.get_ip())
# 监测到节点离线了,通知上层
self.__cluster_node.on_node_online(node.get_ip())
elif state_change == CLUSTER_NODE_STATE_CHANGE_OFFLINE:
tracelog.info("cluster node %s is offline" % node.get_ip())
# 监测到节点离线了,通知上层
self.__cluster_node.on_node_offline(node.get_ip())
if not node.is_online():
continue
is_any_other_node_online = True
if node.is_role_master():
# 如果收到了ip更小的master的查询命令,那么就切换为slave
# 注意,这里ip比较是字符串的比较,只要所有节点的算法是一致的就OK
if self.is_node_prior(node):
tracelog.info("cluster node %s is also master, this node will goto slave" % node.get_ip())
self.__mater_node_info.update(node.get_ip(), node.get_start_time())
self.__switch_to_slave()
return
else:
tracelog.info("cluster node %s is also master, that node will goto slave" % node.get_ip())
else:
if self.__state == CLUSTER_STATE_ONLY_MASTER:
self.__change_state(CLUSTER_STATE_NORMAL)
if is_any_other_node_online is False and self.__state == CLUSTER_STATE_NORMAL:
self.__change_state(CLUSTER_STATE_ONLY_MASTER)
self.__query_other_node_state()
def __when_now_slave(self):
"""
Method: __when_now_slave
Description: 当处于slave的处理函数
Parameter: 无
Return:
Others:
"""
# 没有收到master的查询,并且没有其他节点,或没有收到ip更小的应答
# 那么转为master
with self.__lock:
for node in self.__other_nodes:
if not node.is_enable():
continue
if node.is_role_master():
node.check_heartbeat()
if node.is_online():
old_master_ip = self.__mater_node_info.get_ip()
if self.__mater_node_info.update(node.get_ip(), node.get_start_time()):
self.__cluster_node.on_master_change(old_master_ip, self.__mater_node_info.get_ip())
return
tracelog.info("the master cluster node is offline.")
# 切换为无主状态
self.__change_state(CLUSTER_STATE_NO_MASTER)
self.__reset_query_counter(True)
self.reload_nodes()
def __when_now_no_master(self):
"""
Method: __when_now_no_master
Description: 当处于没有master节点状态的处理函数
Parameter: 无
Return:
Others:
"""
# 当无主状态下,如果没有其他节点,或者没有收到ip更小的应答
# 那么转为master
# 是否存在其他在线的节点
has_other_online_node = False
# 是否存在优先级更高、并且在线的节点
has_prior_online_node = False
self.__query_other_node_state()
with self.__lock:
for node in self.__other_nodes:
if not node.is_enable():
continue
if not node.is_online():
continue
has_other_online_node = True
if node.is_role_master():
tracelog.info("the cluster node %s become to master" % node.get_ip())
self.__mater_node_info.update(node.get_ip(), node.get_start_time())
self.__change_state(CLUSTER_STATE_NORMAL)
return
if self.is_node_prior(node):
has_prior_online_node = True
if has_prior_online_node is True:
# 等待其他节点成为master
return
if self.__query_counter >= CLUSTER_JUDGE_STATE_HAERTBEAT:
tracelog.info("no higher priority cluster node respond for state query command.")
if has_other_online_node:
self.__switch_to_master(CLUSTER_STATE_NORMAL)
else:
self.__switch_to_master(CLUSTER_STATE_ONLY_MASTER)
def run(self):
"""
Method: run
Description: 线程的run接口
Parameter: 无
Return:
Others:
"""
self.__reset_query_counter(True)
reload_counter = 0
reload_times = 30
while 1:
try:
if self.__role == CLUSTER_ROLE_UNKNOWN:
if self.__state == CLUSTER_STATE_STARTING:
self.__when_starting()
elif self.is_master():
self.__when_now_master()
elif self.is_slave():
if self.__state == CLUSTER_STATE_NO_MASTER:
self.__when_now_no_master()
else:
self.__when_now_slave()
except:
tracelog.exception("error occur")
if self.__stoped_event.wait(2) is True:
break
reload_counter += 1
if reload_counter == reload_times:
self.reload_nodes()
if self.is_master():
# 定期尝试绑定ip
self.__bind_virtual_ip(False)
elif self.is_slave():
# 定期尝试取消绑定ip
self.__unbind_virtual_ip(False)
reload_counter = 0
self.__clear()
tracelog.info("cluster node stoped.")
def __bind_virtual_ip(self, write_log):
"""
Method: __bind_virtual_ip
Description: 绑定虚拟ip地址
Parameter:
write_log: 当绑定失败时,是否记录日志
Return: 错误码
Others:
"""
if self.__cluster_cfg_info.virtual_cluster_ip == "":
return
ret, msg = bind_virtual_ip(self.__cluster_cfg_info.virtual_cluster_ip
, self.__cluster_cfg_info.virtual_cluster_mask
, self.__cluster_cfg_info.external_NIC)
if ret != 0 and write_log:
tracelog.error("bind_virtual_ip(%s/%s on %s) failed:%d, %s" % (
self.__cluster_cfg_info.virtual_cluster_ip
, self.__cluster_cfg_info.virtual_cluster_mask
, self.__cluster_cfg_info.external_NIC
, ret
, msg))
return ret
def __unbind_virtual_ip(self, write_log):
"""
Method: __unbind_virtual_ip
Description: 解除绑定的虚拟ip
Parameter:
write_log: 解除绑定虚拟ip失败时,是否需要记录日志
Return: 错误码
Others:
"""
if self.__cluster_cfg_info.virtual_cluster_ip == "":
return
ret, msg = unbind_virtual_ip(self.__cluster_cfg_info.virtual_cluster_ip
, self.__cluster_cfg_info.virtual_cluster_mask
, self.__cluster_cfg_info.external_NIC)
if ret != 0 and write_log:
tracelog.error("unbind_virtual_ip(%s/%s on %s) failed:%d, %s" % (
self.__cluster_cfg_info.virtual_cluster_ip
, self.__cluster_cfg_info.virtual_cluster_mask
, self.__cluster_cfg_info.external_NIC
, ret
, msg))
return ret
def __start_with_master(self, state):
"""
Method: __start_with_master
Description: 使用master角色启动当前节点
Parameter:
state: 状态
Return:
Others:
"""
# 以master启动
self.__role = CLUSTER_ROLE_MASTER
self.__state = state
self.__mater_node_info.update(self.__cluster_cfg_info.my_inner_ip, self.__start_time)
tracelog.info("the current cluster node %s start with master, state:%d." % (
self.__cluster_cfg_info.my_inner_ip
, state))
self.__cluster_node.on_start(self.__role, state)
# 进入master状态后,重新设置其他节点的状态
self.__reset_query_counter(True)
def __start_with_slave(self):
"""
Method: __start_with_slave
Description: 使用slave角色启动当前节点
Parameter: 无
Return:
Others:
"""
# 以slave启动
self.__role = CLUSTER_ROLE_SLAVE
self.__state = CLUSTER_STATE_NORMAL
self.__mater_node_info.update("", "")
tracelog.info("the current cluster node %s start with slave." % self.__cluster_cfg_info.my_inner_ip)
self.__cluster_node.on_start(self.__role, self.__state)
def __switch_to_master(self, state):
"""
Method: __switch_to_master
Description: 将角色切换到master
Parameter:
state: 状态
Return:
Others:
"""
# 切换到master
old_role = self.__role
old_state = self.__state
self.__mater_node_info.update(self.__cluster_cfg_info.my_inner_ip, self.__start_time)
self.__role = CLUSTER_ROLE_MASTER
self.__state = state
tracelog.info("the current cluster node %s switch to master. state:%d" % (
self.__cluster_cfg_info.my_inner_ip
, state))
ret_code = self.__bind_virtual_ip(True)
if ret_code != 0:
tracelog.error("bind virtual ip faild. ret_code:%d" % ret_code)
self.__cluster_node.on_state_change(old_role, old_state, self.__role, state)
# 进入master状态后,重新设置其他节点的状态
self.__reset_query_counter(True)
def __switch_to_slave(self):
"""
Method: __switch_to_slave
Description: 将角色切换到slave
Parameter: 无
Return:
Others:
"""
# 切换到slave
old_role = self.__role
old_state = self.__state
self.__role = CLUSTER_ROLE_SLAVE
self.__state = CLUSTER_STATE_NORMAL
tracelog.info("the current cluster node %s switch to slave. state:%d" % (
self.__cluster_cfg_info.my_inner_ip
, state))
ret_code = self.__unbind_virtual_ip(True)
if ret_code != 0:
tracelog.error("unbind virtual ip faild. ret_code:%d" % ret_code)
self.__cluster_node.on_state_change(old_role, old_state, self.__role, state)
def __change_state(self, state):
"""
Method: __change_state
Description: 切换当前的状态
Parameter:
state:
Return:
Others:
"""
old_state = self.__state
self.__state = state
tracelog.info("the current cluster node %s change state:%d" % (
self.__cluster_cfg_info.my_inner_ip
, state))
self.__cluster_node.on_state_change(self.__role, old_state, self.__role, state)
def __query_other_node_state(self):
"""
Method: __query_other_node_state
Description: 查询其他节点的状态
Parameter: 无
Return:
Others:
"""
req_msg = self.__get_state_msg(cluster_cmd_code.CMD_CLUSTER_QUERY_STATE)
with self.__lock:
for node in self.__other_nodes:
# 发送状态查询命令给节点
url = self.__get_url(node.get_ip())
self.__callacp_client.send(url, req_msg)
# 更新节点的心跳计数
node.check_heartbeat()
self.__query_counter += 1
def __reset_query_counter(self, set_nodes_to_offline):
"""
Method: __reset_query_counter
Description: 重置心跳查询的计数器
Parameter:
set_nodes_to_offline: 是否同时设置节点为离线
Return:
Others:
"""
self.__query_counter = 0
for node in self.__other_nodes:
node.reset_heartbeat(set_nodes_to_offline)
def __get_state_msg(self, cmd_code):
"""
Method: __get_state_msg
Description: 生成状态应答消息
Parameter:
cmd_code: 命令码
Return:
Others:
"""
state = ClusterStateMsg()
state.ip = self.__cluster_cfg_info.my_inner_ip
state.role = self.get_role()
state.start_time = self.__start_time
msg = pycallacp.AcpMessage(cmd_code
, state.serialize())
return msg
def on_query_state(self, url, msg):
"""
Method: on_query_state
Description: "查询状态"的处理接口
Parameter:
url: 发送查询者的url(对端的url)
msg: 查询消息
Return:
Others:
"""
try_times = 1
with self.__lock:
while try_times <= 2:
for node in self.__other_nodes:
if node.get_ip() == msg.ip:
node.on_heartbeat(msg)
try_times = 3
break
else:
if try_times == 2:
tracelog.error("the cluster node %s is unknown" % msg.ip)
else:
# 重新从DB中加载节点信息
tracelog.error("receive state query cmd from unknown "
"node:%s now try to reload nodes" % msg.ip)
self.reload_nodes()
try_times += 1
# 发送应答消息
ack_msg = self.__get_state_msg(cluster_cmd_code.CMD_CLUSTER_ACK_STATE)
self.__callacp_srv.send(url, ack_msg)
def on_ack_state(self, msg):
"""
Method: on_ack_state
Description: 收到状态查询的应答消息
Parameter:
msg: 状态查询的应答消息
Return:
Others:
"""
with self.__lock:
for node in self.__other_nodes:
if node.get_ip() == msg.ip:
node.on_heartbeat(msg)
break
else:
tracelog.error("receive state ack cmd from unknown node:%s" % msg.ip)
| [
"[email protected]"
]
| |
55f8b696d41883c2fcef7f58c03e8f9c9a06bf81 | 9869821dbd52df5083e86eac3afa97422ea07f89 | /sevchefs_api/models.py | 5c60cb8bb0ba9bfeda3f29daa0f8c09e65fb29f7 | []
| no_license | sohjunjie/the7chefs_backend | af18afe4b7279526e1717c2a1113cd69b7e6f0cf | a4eec24050fd07db31d76465c42d5434b1f7a177 | refs/heads/master | 2022-12-12T12:13:48.820638 | 2017-11-06T13:26:32 | 2017-11-06T13:26:32 | 101,177,905 | 0 | 0 | null | 2022-12-08T00:40:54 | 2017-08-23T12:32:54 | Python | UTF-8 | Python | false | false | 6,302 | py | import uuid
from django.db import models
from django.contrib.auth.models import User
def user_avatar_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return 'user/{0}/{1}'.format(instance.id, filename)
def recipe_image_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return 'recipe/{0}/{1}'.format(instance.id, filename)
def ingredient_image_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return 'ingredient/{0}'.format(filename)
def recipe_instruction_image_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return 'recipe/{0}/{1}/{2}/'.format(instance.recipe.id, instance.id, filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="userprofile")
description = models.TextField(max_length=500, null=True)
avatar = models.ImageField(upload_to=user_avatar_directory_path, blank=True, null=True)
follows = models.ManyToManyField('UserProfile', related_name='followed_by', blank=True)
favourited_recipes = models.ManyToManyField(
'Recipe',
through='UserRecipeFavourites',
through_fields=('userprofile', 'recipe'),
)
def __str__(self):
return self.user.username
def following_count(self):
return self.follows.all().count()
def followers_count(self):
return self.followed_by.all().count()
class ActivityTimeline(models.Model):
user = models.ForeignKey(User, related_name="timeline")
summary_text = models.TextField(max_length=200, null=False, blank=False)
target_user = models.ForeignKey(User, related_name="mentioned_timeline", null=True)
main_object_image = models.ImageField(blank=True, null=True)
target_object_image = models.ImageField(blank=True, null=True)
datetime = models.DateTimeField(auto_now_add=True)
def get_formatted_summary_text(self, user):
# summary text is about following
aboutfollow = True if "follow" in self.summary_text else False
if self.target_user is None:
if self.user.id == user.id:
return self.summary_text.format("you")
return self.summary_text.format(self.target_user.username)
# you followed yourself, you favourited your recipe, you commented on your recipe
if (user.id == self.user.id) and (user.id == self.target_user.id):
if aboutfollow:
return self.summary_text.format("you", "yourself")
return self.summary_text.format("you", "your")
# you followed someone, you favourited someone recipe, you commented on someone recipe
elif user.id == self.user.id:
if aboutfollow:
return self.summary_text.format("you", self.target_user.username)
return self.summary_text.format("you", self.target_user.username + "'s")
# someone followed you, someone favourited your recipe, someone commented on your recipe
elif self.target_user.id == user.id:
if aboutfollow:
return self.summary_text.format(self.target_user.username, "you")
return self.summary_text.format(self.target_user.username, "your")
return self.summary_text.format(self.user.username, self.target_user.username)
class Recipe(models.Model):
name = models.TextField(max_length=100, null=False, blank=False)
description = models.TextField(max_length=500, null=False, blank=False)
upload_datetime = models.DateTimeField(auto_now_add=True)
upload_by_user = models.ForeignKey(User, related_name='recipes')
image = models.ImageField(upload_to=recipe_image_directory_path, blank=True, null=True)
difficulty_level = models.IntegerField(default=0)
ingredients_list = models.ManyToManyField(
'Ingredient',
through='RecipeIngredient',
through_fields=('recipe', 'ingredient'),
)
favourited_by = models.ManyToManyField(
UserProfile,
through='UserRecipeFavourites',
through_fields=('recipe', 'userprofile'),
)
tags = models.ManyToManyField(
'RecipeTag',
through='RecipeTagTable',
through_fields=('recipe', 'tag'),
)
def __str__(self):
return self.name
def get_recipe_tags(self):
return self.tags.all()
def get_recipe_ingredients(self):
return self.ingredients.all()
def get_favourited_count(self):
return self.favourited_by.all().count()
def get_image_url(self):
return str(self.image)
class RecipeComment(models.Model):
recipe = models.ForeignKey(Recipe, related_name='comments')
user = models.ForeignKey(User, related_name='recipe_comments')
text = models.TextField(max_length=500, null=False, blank=False)
datetime = models.DateTimeField(auto_now_add=True)
class UserRecipeFavourites(models.Model):
userprofile = models.ForeignKey(UserProfile)
recipe = models.ForeignKey(Recipe)
datetime = models.DateTimeField(auto_now_add=True)
class RecipeInstruction(models.Model):
recipe = models.ForeignKey(Recipe, related_name='instructions')
step_num = models.IntegerField(default=1)
instruction = models.TextField(max_length=140, null=False, blank=False)
time_required = models.DurationField(null=True)
image = models.ImageField(upload_to=recipe_instruction_image_directory_path, blank=True, null=True)
class Meta:
ordering = ['step_num']
class Ingredient(models.Model):
name = models.TextField(max_length=100, blank=False, null=False)
description = models.TextField(max_length=200)
image = models.ImageField(upload_to=ingredient_image_directory_path, null=True, blank=True)
class RecipeIngredient(models.Model):
recipe = models.ForeignKey(Recipe, related_name="ingredients")
ingredient = models.ForeignKey(Ingredient)
serving_size = models.TextField()
class RecipeTag(models.Model):
text = models.TextField(blank=False, null=False, unique=True)
class RecipeTagTable(models.Model):
recipe = models.ForeignKey(Recipe)
tag = models.ForeignKey(RecipeTag)
| [
"[email protected]"
]
| |
9e1300463ec95eed050e294dbf1f512cca88b175 | 890aaf5ffd178ad3c601079cddc520b66dfb4130 | /player/models.py | 7217c17dcd4f4dd6b62357a35e1fd5db9ccc1b45 | []
| no_license | FelixTheC/browsergame | 8dbaacee34835b3e84a2ba14a0ff4c005647cddf | 00bb1eef8f28bf46bbb03d882f3c8019b97fd8a5 | refs/heads/master | 2020-03-27T10:21:57.008465 | 2018-08-29T13:29:28 | 2018-08-29T13:29:28 | 146,415,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | from django.contrib.auth.models import User
from django.db import models
from spell.models import Spell
class Player(User):
class Meta:
ordering = 'pk'
db_table = 'player'
level = models.IntegerField(default=0, blank=True)
xp = models.DecimalField(default=0.0, blank=True)
live_points = models.PositiveIntegerField(default=100, blank=True)
max_live_points = models.PositiveIntegerField(default=100, blank=True)
mana_points = models.PositiveIntegerField(default=50, blank=True)
max_mana_points = models.PositiveIntegerField(default=50, blank=True)
armor_points = models.DecimalField(default=5.0, blank=True)
spells = models.ManyToManyField(Spell, blank=True, null=True)
def __str__(self):
return self.username
def __repr__(self):
return f'Player - {self.pk}' | [
"[email protected]"
]
| |
f4e7c4d0c00235efdf0c1814f151ce92182ab87c | 187ec84de1e03e2fe1e154dcb128b5886b4d0547 | /chapter_03/exercises/04_guest_list.py | 59d42604ab669d68dc242c613f594a8bbfdac314 | []
| no_license | xerifeazeitona/PCC_Basics | fcbc1b8d5bc06e82794cd9ff0061e6ff1a38a64e | 81195f17e7466c416f97acbf7046d8084829f77b | refs/heads/main | 2023-03-01T07:50:02.317941 | 2021-01-27T21:08:28 | 2021-01-27T21:08:28 | 330,748,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # 3-4. Guest List
# If you could invite anyone, living or deceased, to dinner, who would
# you invite? Make a list that includes at least three people you’d like
# to invite to dinner. Then use your list to print a message to each
# person, inviting them to dinner.
persons = []
persons.append('jenny')
persons.append('grace')
persons.append('fred')
for person in persons:
print(f"Hello {person.title()}, would you like to come for dinner?") | [
"[email protected]"
]
| |
1447230f766afd41a6dd0803b07be0e8a4ba698d | 200839d1368245d23cf522ed007794210dda8e4b | /test_script.py | 30e0b8348eee91f9caec501a8a252561a8fe2c5d | [
"BSD-3-Clause"
]
| permissive | tmcclintock/Halo_catalog_with_projection | 88911c0311b79642627281e065e36faf2ab05b2d | 9c13b996c29dd54225a1626b1cb20598d4dccbed | refs/heads/master | 2020-03-28T15:54:36.400654 | 2018-09-20T17:32:40 | 2018-09-20T17:32:40 | 148,637,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | import numpy as np
import convert_catalog as cc
import matplotlib.pyplot as plt
scatters = [0.2] #only this one for now
scatter = scatters[0]
#Values for the RMR for fox
alpha = 0.77
M_pivot = 10**12.63 #Msun/h
M_min = 10**11.24
z = 1.
inpath = "test_catalog.npy"
conv = cc.Converter(scatter, M_min=M_min, M_pivot=M_pivot, alpha=alpha)
data = np.load(inpath)
Ms = data[:,3] #Mass in Msun/h
Nh = len(data)
Nd = len(data[0]) #x, y, z, M, Np
out = np.zeros((Nh, Nd+3)) #x, y, z, M, Np, lambda_true, lambda_real, lambda_obs
out[:, :Nd] = data #copy in x, y, z, M, Np
out[:, Nd] = conv.lambda_true(Ms)
out[:, Nd+1] = conv.lambda_true_realization(Ms)
count = 0
for ltr in out[:, Nd+1]:
try:
out[:, Nd+2] = conv.Draw_from_CDF(1, ltr, z)
except ValueError:
x = conv.cdf4interp
y = conv.l_out_grid
dx = x[1:] - x[:-1]
print x
inds = dx <= 0
ind = np.argmin(dx)
print dx[inds]
print dx[ind-2:ind+2]
plt.plot(x[:-1]-x[1:])
plt.show()
plt.plot(x, y)
plt.show()
exit()
count += 1
if count%1 == 0:
print "Finished %d / %d"%(count, len(Ms))
| [
"[email protected]"
]
| |
85b75024cde8366d99fb76cbbd9525b3975ce3d4 | 95aa6f7c87af7728d999e4038b92ee7f9d91a17c | /PAZCOLOMBIA/pruebaserial.py | 7153e90710fd0a88b29005fff70e7a7ed2b26e38 | []
| no_license | Fermec28/HackathonAzureIoT | f7f56ebf7d8cf89405bb1fa83b89e8fe9ce0ea94 | df666a3ce6fb467b25c2f751fcd67e4c4b9addd1 | refs/heads/master | 2021-01-22T21:59:19.385202 | 2017-03-20T14:41:09 | 2017-03-20T14:41:09 | 85,498,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | import serial
import sys
import time
ser= serial.Serial('/dev/ttyACM0',9600)
ser.write("dato")
dato=""
while dato=="":
while ser.inWaiting()>0:
dato=ser.readline()
print dato
ser.close()
| [
"[email protected]"
]
| |
fee6ebb539107d535f7a54a856b21e86decace56 | 39b228e1261882f41deac10870601c722504eccb | /gputools/convolve/__init__.py | 06f134919322a2919d5e9fa1581b600916b19514 | [
"BSD-3-Clause"
]
| permissive | Traecp/gputools | 4831a6a8dd28191994b57784a5eb9e75c56c6e84 | fd25e9457ddc50bbe1c29eff91f397dcf80a4767 | refs/heads/master | 2021-05-12T06:30:29.043605 | 2017-09-27T22:50:40 | 2017-09-27T22:50:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py |
#from .blur import blur
from .convolve_sep import convolve_sep2, convolve_sep3
from .convolve import convolve
from .convolve_spatial2 import convolve_spatial2
from .convolve_spatial3 import convolve_spatial3
#from .minmax_filter import max_filter, min_filter
from .filters import max_filter, min_filter, uniform_filter
| [
"[email protected]"
]
| |
d83a64cc10d5e83cbeb0a511b422a57b15bc9802 | 8372d349be47f85c6650bf81b2e1d87e5fdcd259 | /modules/app.py | 5358408780fa56e4f02ab6e67292951f78247e31 | []
| no_license | pflashpunk/myrecon.py | b2bd6c22b36f83245e3df0fad7644ff66af12345 | 150eeb5e1473e3bef2ccc9ad2f1419d56c120a4d | refs/heads/master | 2020-09-29T20:02:37.780438 | 2019-12-05T10:51:16 | 2019-12-05T10:51:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,733 | py | # I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import imp
import time
from modules import functions as func
from colored import fg, bg, attr
class App:
config = []
mods = []
d_app = ''
d_mods = ''
d_output = ''
f_report = ''
f_domains = ''
f_hosts = ''
f_tmphosts = ''
f_dead = ''
f_ips = ''
f_urls = ''
domains = []
n_domains = 0
hosts = []
tmphosts = ''
n_hosts = 0
dead = []
n_dead = 0
ips = []
n_ips = 0
urls = []
n_urls = 0
def __init__( self, config ):
self.config = config
self.d_app = os.path.dirname( os.path.dirname( os.path.realpath(__file__) ) )
self.d_mods = self.d_app + '/modules'
def init( self ):
func.parseargs( self )
def run( self ):
for mod_name in self.mods:
if mod_name in self.config['mandatory_mods'] or 'resume' in self.mods or 'report' in self.mods:
# if mod_name in self.config['mandatory_mods'] or 'resume' in self.mods:
self.runMod( mod_name )
else:
self.launchMod( mod_name )
def runMod( self, mod_name ):
mod_file = self.d_mods + '/' + mod_name + '.py'
if not os.path.isfile(mod_file):
sys.stdout.write( "%s[-] error occurred: %s not found%s\n" % (fg('red'),mod_name,attr(0)) )
else:
py_mod = imp.load_source( mod_name.capitalize(), mod_file)
mod = getattr( py_mod, mod_name.capitalize() )()
try:
mod.run( self )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
# if hasattr(mod,'postrun'):
# mod.postrun( self )
# if hasattr(mod,'report'):
# mod.report( self )
def launchMod( self, mod_name ):
cmd = sys.argv[0] + ' -r -m ' + mod_name + ' 2>&1 &'
# print( cmd )
os.system( cmd )
def wait( self ):
i = 0
t_chars = ['|','/','-','\\','|','/','-']
l = len(t_chars)
sys.stdout.write( "\n\n" )
for n in range(100000):
time.sleep( 0.5 )
sys.stdout.write( ' %s\r' % t_chars[n%l] )
def setMods( self, t_mods ):
self.mods = t_mods
def setOutputDirectory( self, d_output ):
self.d_output = d_output.rstrip('/')
sys.stdout.write( '[+] output directory is: %s\n' % self.d_output )
self.initFilePath()
def initFilePath( self ):
self.f_report = self.d_output + '/report'
self.f_domains = self.d_output + '/domains'
self.f_hosts = self.d_output + '/hosts'
self.f_tmphosts = self.d_output + '/tmp_hosts'
self.f_dead = self.d_output + '/hosts_dead'
self.f_ips = self.d_output + '/ips'
self.f_urls = self.d_output + '/urls'
self.f_urls_ips = self.d_output + '/urls_ips'
def setDomains( self, t_domains ):
self.domains = t_domains
self.n_domains = len(t_domains)
sys.stdout.write( '%s[+] %d domains found.%s\n' % (fg('green'),self.n_domains,attr(0)) )
if self.n_domains:
fp = open( self.f_domains, 'w' )
fp.write( "\n".join(self.domains) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_domains )
def setHosts( self, t_hosts ):
self.hosts = t_hosts
self.n_hosts = len(t_hosts)
sys.stdout.write( '%s[+] %d hosts found.%s\n' % (fg('green'),self.n_hosts,attr(0)) )
if self.n_hosts:
fp = open( self.f_hosts, 'w' )
fp.write( "\n".join(self.hosts) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_hosts )
def setIps( self, t_ips, tmphosts ):
self.ips = t_ips
self.n_ips = len(t_ips)
sys.stdout.write( '%s[+] %d ips found.%s\n' % (fg('green'),self.n_ips,attr(0)) )
if self.n_ips:
fp = open( self.f_ips, 'w' )
fp.write( "\n".join(t_ips) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_ips )
if len(tmphosts):
fp = open( self.f_tmphosts, 'w' )
fp.write( tmphosts )
fp.close()
def setDeadHosts( self, t_dead ):
sys.stdout.write( '[+] %d dead hosts found, cleaning...\n' % len(t_dead) )
if len(t_dead):
for host in t_dead:
self.hosts.remove( host )
fp = open( self.f_dead, 'w' )
fp.write( "\n".join(t_dead) )
fp.close()
def setUrls( self, t_urls ):
self.urls = t_urls
self.n_urls = len(t_urls)
sys.stdout.write( '%s[+] %d urls created.%s\n' % (fg('green'),self.n_urls,attr(0)) )
if self.n_urls:
fp = open( self.f_urls, 'w' )
fp.write( "\n".join(self.urls) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_urls )
def setUrlsIps( self, t_new_urls ):
new_urls = len(t_new_urls)
sys.stdout.write( '%s[+] %d urls created.%s\n' % (fg('green'),new_urls,attr(0)) )
if new_urls:
fp = open( self.f_urls_ips, 'w' )
fp.write( "\n".join(t_new_urls) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_urls_ips )
def getReportDatas( self ):
t_vars = {}
if os.path.isfile(self.f_domains):
t_vars['n_domains'] = sum(1 for line in open(self.f_domains))
return t_vars
| [
"[email protected]"
]
| |
f0481fd9b443fa7e04a7487692c816a354e643d0 | 6d04a5a3ed838df588ebd799d6b411ad4a0423d8 | /setup3.py | 377e930b6745c29b6f0386ab4c70eb19a2cbefc0 | []
| no_license | matthew-brett/arraymakers | f2aa7dbb92fe432d717195be9e8600c90b558bd8 | 220bb91ec9fcbf99820d5c5c2866f51edc5c640b | refs/heads/master | 2016-08-07T23:10:24.426500 | 2009-11-17T06:03:31 | 2009-11-17T06:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
sourcefiles = ['sturlabench.pyx']
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("sturlabench",
sourcefiles,
include_dirs = [np.get_include()])]
)
| [
"[email protected]"
]
| |
a30d5296c270e3f997b65094140ee7f36f85b11d | 06c2bc496f9e285f06e4c3c71f14d5716f411d89 | /source/webapp/migrations/0001_initial.py | 2340231d02b94aea805a5292d5b1c67ac6d30ad9 | []
| no_license | Beknasar/Coin_collection | 37a9e77cc00270dfcb9d0cb5916f985cec4c591d | 091860f98e7dc81d460ab0cbcb6ca1d7fdeffda8 | refs/heads/master | 2023-06-09T16:25:30.473134 | 2021-06-25T09:31:13 | 2021-06-25T09:31:13 | 365,229,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | # Generated by Django 2.2 on 2021-05-03 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='название страны')),
],
options={
'verbose_name': 'страна',
'verbose_name_plural': 'страны',
},
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='название страны')),
],
options={
'verbose_name': 'страна',
'verbose_name_plural': 'страны',
},
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='название материала')),
],
options={
'verbose_name': 'материал',
'verbose_name_plural': 'материалы',
},
),
migrations.CreateModel(
name='Nominal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='название номинала')),
],
options={
'verbose_name': 'номинал',
'verbose_name_plural': 'номиналы',
},
),
]
| [
"[email protected]"
]
| |
05d17b4c1ba3a133b032f6f66a03a7e2c15c5227 | 119437adb7830659307c18b79a9cc3f6bfc6fe40 | /transformers_learning/text_classification/ernie_model_evaluate.py | ea61f8fb38d798d0f7835c7452c00b911801326d | []
| no_license | percent4/PyTorch_Learning | 478bec35422cdc66bf41b4258e29fbcb6d24f60c | 24184d49032c9c9a68142aff89dabe33adc17b52 | refs/heads/master | 2023-03-31T03:01:19.372830 | 2023-03-17T17:02:39 | 2023-03-17T17:02:39 | 171,400,828 | 16 | 7 | null | 2023-09-02T08:53:26 | 2019-02-19T03:47:41 | Jupyter Notebook | UTF-8 | Python | false | false | 2,001 | py | # -*- coding: utf-8 -*-
# @Time : 2021/1/29 15:54
# @Author : Jclian91
# @File : ernie_model_evaluate.py
# @Place : Yangpu, Shanghai
import json
import torch
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from params import *
from model_train import convert_text_to_ids, seq_padding, test_file
# read label id dict
with open("{}_label2id.json".format(dataset), "r", encoding="utf-8") as g:
label_id_dict = json.loads(g.read())
id_label_dict = {v: k for k, v in label_id_dict.items()}
# load model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained("../ernie-1.0", num_labels=num_labels, hidden_dropout_prob=hidden_dropout_prob)
model = AutoModelForSequenceClassification.from_pretrained("../ernie-1.0", config=config)
model.to(device)
state_dict = torch.load('{}_ernie_cls.pth'.format(dataset))
model.load_state_dict(state_dict)
tokenizer = AutoTokenizer.from_pretrained("../ernie-1.0")
# read test file
test_df = pd.read_csv(test_file)
contents, true_labels = test_df["content"].tolist(), test_df["label"].tolist()
# model evaluate
pred_labels = []
for i, text in enumerate(contents):
print("predict {} samples".format(i+1))
input_ids, token_type_ids = convert_text_to_ids(tokenizer, [text], max_sequence_length)
# print(input_ids, token_type_ids)
input_ids = seq_padding(tokenizer, input_ids)
token_type_ids = seq_padding(tokenizer, token_type_ids)
input_ids, token_type_ids = input_ids.long(), token_type_ids.long()
input_ids, token_type_ids = input_ids.to(device), token_type_ids.to(device)
output = model(input_ids=input_ids, token_type_ids=token_type_ids)
label_id = np.argmax(output[0].detach().cpu().numpy(), axis=1)[0]
pred_labels.append(id_label_dict[label_id])
# print evaluate output
print(classification_report(true_labels, pred_labels, digits=4)) | [
"[email protected]"
]
| |
2390996443d739e28ca04fe04250dcbfc1c2f21f | f7e93ca894f21cead7ef8c3681f4df548ba0a1a1 | /typings/pymongo/read_concern.pyi | 364db579bd3ea1d3964bf0932ce648a54e46d757 | [
"Apache-2.0"
]
| permissive | GlennOlsson/mongo-types | 20a3429a32528681be645600b5889a449c606b50 | 19a24d99aa8872e384c8017dd2564c84e9ff755a | refs/heads/main | 2023-05-06T14:37:31.069202 | 2021-05-18T15:58:42 | 2021-05-18T15:58:42 | 368,573,598 | 0 | 0 | Apache-2.0 | 2021-05-18T15:58:42 | 2021-05-18T15:06:56 | null | UTF-8 | Python | false | false | 23 | pyi | class ReadConcern: ...
| [
"[email protected]"
]
| |
458263b68266a9818ebc55736b4bc25d6fa981e2 | 6b2db6fca8f31c4e6c96e68cf11e5ca3ce7e8a9b | /src/calPosteriorModifyForIdeal.py | 2836c82b00fcdfe11ca8ba606f41756da73a5ca0 | [
"MIT"
]
| permissive | ningtangla/escapeFromMultipleSuspectors | e04da12488be9767c5b6511355c167fdcf18e723 | e6dcb0f7f9371b7ca6cca8779f69f18095092140 | refs/heads/master | 2022-05-03T05:25:21.556950 | 2022-04-20T13:51:53 | 2022-04-20T13:51:53 | 190,686,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | import pandas as pd
import numpy as np
import scipy.stats as stats
import math
def calAngleLikelihoodLogModifiedForPiRange(angle, kappa):
return stats.vonmises.logpdf(angle, kappa) + np.log(2)
class CalPosteriorLog():
def __init__(self, minDistance):
self.minDistance = minDistance
def __call__(self, hypothesesInformation, observedData):
hypothesesInformation['chasingLikelihoodLog'] = calAngleLikelihoodLogModifiedForPiRange(observedData['wolfDeviation'], 1/(1/hypothesesInformation.index.get_level_values('chasingPrecision') + 1/hypothesesInformation['perceptionPrecision']))
hypothesesInformation['escapingLikelihoodLog'] = 0
originPrior = np.exp(hypothesesInformation['logP'].values)
normalizedPrior = np.maximum([1e-300] * len(originPrior), originPrior / np.sum(originPrior))
hypothesesInformation['beforeLogPAfterDecay'] = np.log(normalizedPrior) * hypothesesInformation['memoryDecay']
#hypothesesInformation['beforeLogPAfterDecay'] = hypothesesInformation['memoryDecay'] * hypothesesInformation['logP']
#print(np.exp(hypothesesInformation['logP']).values)
#print('***', originPrior)
#print('!!!', normalizedPrior)
#distanceLikelihoodLog = np.array([-50 if distance <= self.minDistance else 0 for distance in observedData['distanceBetweenWolfAndSheep'].values])
distanceLikelihoodLog = 0
hypothesesInformation['logP'] = hypothesesInformation['beforeLogPAfterDecay'] + hypothesesInformation['chasingLikelihoodLog'] \
+ hypothesesInformation['escapingLikelihoodLog'] + distanceLikelihoodLog
return hypothesesInformation
| [
"[email protected]"
]
| |
5abb8da374c2b313c8e9f7bd922bec0aefe29d2f | 9ec109e52db63c3ff60b299d5b441167e5f1403e | /rules/clustering.smk | 88d3d57128d79d28c8155411747df23e7482f154 | [
"MIT"
]
| permissive | metagenome-atlas/genecatalog_atlas | 47edac84cf5c9f391e0a1615e9f54afe9bb12219 | e83d27c6f2ad6a10c9139db73d8e8f517f4fb6dd | refs/heads/master | 2021-11-12T11:07:58.330563 | 2021-11-03T10:26:24 | 2021-11-03T10:26:24 | 235,655,500 | 1 | 1 | MIT | 2020-03-10T15:43:39 | 2020-01-22T20:00:54 | Python | UTF-8 | Python | false | false | 8,829 | smk | import os
localrules: input_genes
rule input_genes:
input:
faa= os.path.abspath(config['input_faa'])
output:
faa= temp("genecatalog/input.faa")
shell:
"ln -s {input} {output}"
rule createdb:
input:
"genecatalog/{catalogname}.faa"
output:
temp(directory("genecatalog/{catalogname}_mmseqdb"))
threads:
1
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/make_db/{catalogname}.log"
benchmark:
"logs/benchmarks/createdb/{catalogname}.tsv"
shell:
"mkdir {output} 2> {log} ; "
"mmseqs createdb {input} {output}/db >> {log} 2>> {log} "
rule cluster_genes:
input:
db="genecatalog/input_mmseqdb"
output:
clusterdb = temp(directory("genecatalog/clustering/mmseqs"))
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/clustering/cluster_proteins.log"
threads:
config["threads"]
params:
tmpdir= os.path.join(config['tmpdir'],"mmseqs"),
clustermethod = 'linclust' if config['clustermethod']=='linclust' else 'cluster',
coverage=config['coverage'], #0.8,
minid=config['minid'], # 0.00
extra=config['extra'],
shell:
"""
mkdir -p {params.tmpdir} {output} 2>> {log}
mmseqs {params.clustermethod} -c {params.coverage} \
--min-seq-id {params.minid} {params.extra} \
--threads {threads} {input.db}/db {output.clusterdb}/db {params.tmpdir} >> {log} 2>> {log}
rm -fr {params.tmpdir} 2>> {log}
"""
rule get_rep_proteins:
input:
db= rules.cluster_genes.input.db,
clusterdb = rules.cluster_genes.output.clusterdb,
output:
rep_seqs_db = temp(directory("genecatalog/protein_catalog")),
rep_seqs = temp("genecatalog/representatives_of_clusters.fasta")
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/clustering/get_rep_proteins.log"
benchmark:
"logs/benchmarks/get_rep_proteins.tsv"
resources:
mem=config['mem']['low']
threads:
1
shell:
"""
mkdir {output.rep_seqs_db} 2>> {log}
mmseqs result2repseq {input.db}/db {input.clusterdb}/db {output.rep_seqs_db}/db >> {log} 2>> {log}
mmseqs result2flat {input.db}/db {input.db}/db {output.rep_seqs_db}/db {output.rep_seqs} >> {log} 2>> {log}
"""
rule get_mapping_original:
input:
db= rules.cluster_genes.input.db,
clusterdb = rules.cluster_genes.output.clusterdb,
output:
cluster_attribution = temp("genecatalog/clustering/cluster_attribution.tsv"),
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/clustering/get_rep_proteins.log"
benchmark:
"logs/benchmarks/get_mapping_original.tsv"
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
threads:
1
shell:
"""
mmseqs createtsv {input.db}/db {input.db}/db {input.clusterdb}/db {output.cluster_attribution} > {log} 2>> {log}
"""
rule rename_gene_catalog:
input:
faa= "genecatalog/representatives_of_clusters.fasta",
log= "logs/genecatalog/clustering/cluster_proteins.log"
output:
faa= "genecatalog/gene_catalog.faa",
name_mapping = temp("genecatalog/clustering/renamed_genenames.tsv.gz"),
shadow: "minimal"
benchmark:
"logs/benchmarks/rename_catalog.tsv"
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
threads:
1
log:
"logs/genecatalog/clustering/rename_catalog.log"
params:
prefix='Gene'
script:
"../scripts/rename_catalog.py"
rule rename_mapping:
input:
name_mapping = "genecatalog/clustering/renamed_genenames.tsv.gz",
cluster_mapping ="genecatalog/clustering/cluster_attribution.tsv"
output:
"genecatalog/clustering/orf2gene.h5",
params:
headers = ('ORF','Gene')
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
benchmark:
"logs/benchmarks/genecatalog/clustering/rename_mapping.tsv"
threads:
1
log:
"logs/genecatalog/clustering/rename_mapping_clusters.log"
conda:
"../envs/hdf.yaml"
shadow:
"minimal"
script:
"../scripts/rename_mapping.py"
#### SUBCLUSTERING ####
def get_subcluster_id(wildcards):
id= int(wildcards.id)
assert (id>30) & (id<100), f"id should be an integer in [30,100], got {wildcards.id}"
id = id/100
if id >= float(config['minid']):
logger.error("Id for gene subclustering should be lower than that the gene catalog"
f" {id} is not smaller than {config['minid']}"
)
exit(1)
return id
rule subcluster_genes:
input:
db=ancient("genecatalog/gene_catalog_mmseqdb"),
faa="genecatalog/gene_catalog.faa" # used to update if genecatalog updates
output:
clusterdb = temp(directory("genecatalog/subcluster/GC{id}_mmseqs")),
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/subcluster/cluster_{id}.log"
threads:
config["threads"]
params:
clustermethod = 'linclust' if config['clustermethod']=='linclust' else 'cluster',
coverage=config['coverage'],
minid= get_subcluster_id,
extra=config['extra'],
tmpdir= directory(os.path.join(config['tmpdir'],"GC{id}_subcluster"))
shell:
"""
mkdir -p {output} {params.tmpdir} 2> {log}
mmseqs {params.clustermethod} -c {params.coverage} \
--min-seq-id {params.minid} {params.extra} \
--threads {threads} {input.db}/db {output.clusterdb}/db {params.tmpdir} >> {log} 2>> {log}
"""
rule get_rep_subclusters:
input:
db=ancient(rules.subcluster_genes.input.db),
clusterdb = rules.subcluster_genes.output.clusterdb,
output:
rep_seqs_db = temp(directory("genecatalog/subcluster/GC{id}_rep")),
rep_seqs = temp("genecatalog/subcluster/GC{id}_representatives.fasta")
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/subcluster/GC{id}_get_rep_proteins.log"
threads:
config.get("threads", 1)
shell:
"""
mkdir {output.rep_seqs_db} 2>> {log}
mmseqs result2repseq {input.db}/db {input.clusterdb}/db {output.rep_seqs_db}/db >> {log} 2>> {log}
mmseqs result2flat {input.db}/db {input.db}/db {output.rep_seqs_db}/db {output.rep_seqs} >> {log} 2>> {log}
"""
rule rename_subcluster_catalog:
input:
faa= "genecatalog/subcluster/GC{id}_representatives.fasta",
log= "logs/genecatalog/subcluster/cluster_{id}.log"
output:
faa= "genecatalog/subcluster/gc{id}.fasta",
name_mapping = temp("genecatalog/clustering/GC{id}_name_mapping.tsv"),
shadow: "minimal"
benchmark:
"logs/benchmarks/GC{id}_rename_gene_clusters.tsv"
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
threads:
1
log:
"logs/genecatalog/clustering/GC{id}_rename_gene_clusters.log"
params:
prefix='GC{id}_'
script:
"../scripts/rename_catalog.py"
rule get_subcluster_mapping_original:
input:
db= rules.subcluster_genes.input.db,
clusterdb = rules.subcluster_genes.output.clusterdb,
output:
cluster_attribution = temp("genecatalog/clustering/GC{id}_cluster_attribution.tsv"),
conda:
"../envs/mmseqs.yaml"
log:
"logs/genecatalog/clustering/GC{id}_get_rep_proteins.log"
benchmark:
"logs/benchmarks/GC{id}_get_mapping_original.tsv"
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
threads:
1
shell:
"""
mmseqs createtsv {input.db}/db {input.db}/db {input.clusterdb}/db {output.cluster_attribution} > {log} 2>> {log}
"""
rule rename_subcluster_mapping:
input:
name_mapping = "genecatalog/clustering/GC{id}_name_mapping.tsv",
cluster_mapping ="genecatalog/clustering/GC{id}_cluster_attribution.tsv"
output:
"genecatalog/clustering/gene2gc{id}.h5",
threads:
1
log:
"logs/genecatalog/clustering/GC{id}_rename_mapping_clusters.log"
benchmark:
"logs/benchmarks/genecatalog/clustering/rename_mapping_GC{id}.tsv"
params:
headers = lambda wc: ('Gene',f'GC{wc.id}')
resources:
time=config['runtime']['short'],
mem=config['mem']['low']
shadow:
"minimal"
conda:
"../envs/hdf.yaml"
script:
"../scripts/rename_mapping.py"
| [
"[email protected]"
]
| |
ee7f9224ffd1fdcb74a482747ba70aad5787d8a9 | 046207f434966462fff55f634ba5a450d2208534 | /PythonForBeginner/source_code/151.py | 028f9b583b104aa52315e036794c68c63c2389e3 | []
| no_license | sungsikyang92/pythonStudy | e293e1ac8af443809f840ccee7052a8f57480b70 | 26522b5e232ccd9ab25c52122d254aa7249a8fdf | refs/heads/master | 2023-07-04T16:58:40.318976 | 2021-08-04T02:00:27 | 2021-08-04T02:00:27 | 365,398,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import os
pdir = os.getcwd()
print(pdir)
os.chdir('..')
print(os.getcwd())
os.chdir(pdir)
print(os.getcwd())
| [
"[email protected]"
]
| |
926644d6b51709392d243e98819486b28d544b79 | e6d4a87dcf98e93bab92faa03f1b16253b728ac9 | /algorithms/python/deleteLeavesWithaGivenValue/deleteLeavesWithaGivenValue.py | b5d6b7308887d04f9df27fa9e19a93de3af9356a | []
| no_license | MichelleZ/leetcode | b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f | a390adeeb71e997b3c1a56c479825d4adda07ef9 | refs/heads/main | 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/delete-leaves-with-a-given-value/
# Author: Miao Zhang
# Date: 2021-04-23
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def removeLeafNodes(self, root: TreeNode, target: int) -> TreeNode:
if not root:
return None
root.left = self.removeLeafNodes(root.left, target)
root.right = self.removeLeafNodes(root.right, target)
if not root.left and not root.right and root.val == target:
return None
return root
| [
"[email protected]"
]
| |
6c42d5c74d411827cb4f887bc2345c3d697d5ce6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02623/s803927291.py | 1bb8ba08366a2d97232987232cea5c363032232f | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from sys import stdin
import sys
n,m,k = [int(x) for x in stdin.readline().rstrip().split()]
A = [int(x) for x in stdin.readline().rstrip().split()]
B = [int(x) for x in stdin.readline().rstrip().split()]
cc = []
count = 0
sumA = 0
for i in range(n):
sumA_ = sumA + A[i]
if sumA_ > k: break
sumA = sumA_
count = count + 1
maxA = count
sumB = 0
j = 0
for i in range(maxA+1):
if i != 0:
count = count - 1
sumA = sumA - A[maxA-i]
while True:
sumB_ = sumB + B[j]
if sumA + sumB_ > k:
cc.append(count)
break
sumB = sumB_
count = count + 1
j = j + 1
if j == m:
cc.append(count)
break
if j == m:
break
if cc == []:
print (0)
else:
print (max(cc))
| [
"[email protected]"
]
| |
e48db17d4ddd3254f132e9542692a9665eed806f | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /drivers/hal/fm/FM33A0xx_HAL/SConscript | 0a0392c9f48f9aae5ecbdab86877c156ad8ca896 | [
"Apache-2.0"
]
| permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 1,519 | import osconfig
from build_tools import *
# get current directory
pwd = PresentDir()
# The set of source files associated with this SConscript file.
src = Split('''
FM33A0xx_HAL_Driver/Src/fm33a0xx_aes.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_anac.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_btim.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_crc.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_dma.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_etim.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_gpio.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_hspi.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_lcd.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_lptim.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_pmu.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_rcc.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_scu.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_trng.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_u7816.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_wwdt.c
FM33A0xx_HAL_Driver/Src/fm33a0xx_flash.c
''')
if IsDefined(['OS_USING_SERIAL']):
src += ['FM33A0xx_HAL_Driver/Src/fm33a0xx_uart.c']
if IsDefined(['OS_USING_I2C']):
src += ['FM33A0xx_HAL_Driver/Src/fm33a0xx_i2c.c']
if IsDefined(['OS_USING_SPI']):
src += ['FM33A0xx_HAL_Driver/Src/fm33a0xx_spi.c']
if IsDefined(['OS_USING_RTC']):
src += ['FM33A0xx_HAL_Driver/Src/fm33a0xx_rtc.c']
if IsDefined(['OS_USING_WDT']):
src += ['FM33A0xx_HAL_Driver/Src/fm33a0xx_iwdt.c']
path = [pwd + '/FM33A0xx_HAL_Driver/Inc',
pwd + '/CMSIS']
CPPDEFINES = ['USE_HAL_DRIVER']
group = AddCodeGroup('hal', src, depend = ['SOC_SERIES_FM33A0'], CPPPATH = path, CPPDEFINES = CPPDEFINES)
Return('group')
| [
"[email protected]"
]
| ||
bdd1e985f8f65ddfde119fad57709df0f8f15b2e | e9e6a89c200f1800bf4fb6f1ef5ec4926efb2783 | /mTRPO/mytrpo/util/__init__.py | 007b858818d9cf8bd810bd4315067b639d345e78 | []
| no_license | wwxFromTju/baselines_trpo | 331d7ed6788ead30313834605a5a92165a4a4d32 | 5c01c5f40cc2f96c7c06da392292d01e3c99f3b8 | refs/heads/master | 2021-04-27T03:58:37.730940 | 2018-02-26T08:23:08 | 2018-02-26T08:23:08 | 122,723,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from mytrpo.util.normal_util import *
from mytrpo.util.dataset import Dataset
from mytrpo.util.normal_util import *
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
| [
"[email protected]"
]
| |
f27edd291982fde6ab630cad2358fd98bfae178f | a35b24c8c3c5bdf861f3cda9396f2fa6795ec929 | /abc/093/A.py | de1bd8b4e45dfc6460741234fe47dc34d4c5658d | []
| no_license | Msksgm/atcoder_msksgm_practice | 92a19e2d6c034d95e1cfaf963aff5739edb4ab6e | 3ae2dcb7d235a480cdfdfcd6a079e183936979b4 | refs/heads/master | 2021-08-18T16:08:08.551718 | 2020-09-24T07:01:11 | 2020-09-24T07:01:11 | 224,743,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | def main():
s = list(input())
s_num = len(set(s))
if s_num == 3:
print('Yes')
else:
print('No')
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
97c4cf3325b10b0c2e3d9b74f8bc1b85d3ada71f | 48d820d4bd6a433c2b0fdb0dcb7657b62db050bf | /Training_Work/odoo_backup_wfo_till_april/custom_addons/college_portal/models/clg_students.py | 905875d5741165343d2a34ec60aba5c89829b26e | []
| no_license | dhruv-aktiv/training_task_data | 1a30580a512aa4831fb547b250faffff11f7e008 | 3d8b25ca812e876a484d387fc57272257322c85f | refs/heads/master | 2023-06-07T07:06:04.193576 | 2021-07-01T04:37:13 | 2021-07-01T04:37:13 | 381,908,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,032 | py | import datetime
import math
import random
import string
from odoo import api, fields, models
from odoo.exceptions import UserError, ValidationError
class Clg_Student(models.Model):
_name = 'clg.student.detail'
_description = 'college student detail'
user_name = fields.Char(string='User Name')
name = fields.Char(string='Student Name',
default="your name")
mobile_no = fields.Char(string='Student Mo No.')
email_id = fields.Char(string='Student email id.')
address = fields.Text(string='Student Address')
dob = fields.Date(string='Student Date of Birth')
gender = fields.Selection(
[('male', 'Male'), ('female', 'Female'), ], 'Gender', default='male')
id_no = fields.Integer(compute='compute_id_no', store=True)
todos = fields.Many2one(
'todos.detail', string="Todo")
age = fields.Integer()
courses = fields.Many2one('courses.detail', string="Course")
spend_amt = fields.Float(string='Amount to spend', compute='compute_amt')
res = fields.Char()
# def name_get(self):
# name = []
# for rec in self:
# name = f"{rec.name} ({rec.user_name})"
# return name
# @api.onchange('dob')
# def calc_age(self):
# for rec in self:
# if rec.dob:
# your_date = rec.dob
# today_date = datetime.date.today()
# rec.age = abs(((today_date - your_date) // 365).days)
# @api.model
# def create(self, vals):
# print(f"\n\n\n student input dt : {vals}\n\n\n")
# # clg_student = super(Clg_Student, self).create(vals)
# task_dt = self.env['todos.detail'].create(
# {'title': 'new to dfdod'})
# # # vals['todos'] = task_dt[0]
# # # clg_student.write(vals)
# print(f"\n\n\n\n {task_dt.id} \n\n\n")
# vals['name'] = 'xyzw'
# vals['todos'] = task_dt.id
# return super(Clg_Student, self).create(vals)
# @api.model
# def create(self, vals):
# print(f"\n\n\n student input dt : {vals}\n\n\n")
# clg_student = super(Clg_Student, self).create(vals)
# # task_dt = self.env['todos.detail'].create(
# # {'title': 'make a programming language'})
# # vals['todos'] = task_dt[0]
# # clg_student.write(vals)
# return clg_student
@api.model
def create(self, vals):
print(f"student vals {vals}")
clg_student = super(Clg_Student, self).create(vals)
course_dt = self.env['courses.detail'].create(
{'name': 'DsManthan course '})
vals['courses'] = course_dt[0]
clg_student.write(vals)
print(course_dt)
return clg_student
def write(self, vals):
vals['mobile_no'] = 8945631274
clg_up_student = super(Clg_Student, self).write(vals)
print(f"n\n\n{type(clg_up_student)}\n\n\n")
return clg_up_student
# def write(self, vals):
# vals['email_id'] = '[email protected]'
# clg_up_student = super(Clg_Student, self).write(vals)
# print(f"n\n\n{type(clg_up_student)}\n\n\n")
# return clg_up_student
# def unlink(self):
# print("\n\n\nlink done sucessfully\n\n\n")
# print(f"\n\n\n {super(Clg_Student, self).unlink()} \n\n\n")
# return super(Clg_Student, self).unlink()
def search_read_func(self):
# read
read_res = self.env['clg.student.detail'].search(
[('name', '=', "fdefd")]).read(['name'])
print(f"\n\n\nread() res : {read_res}\n\n\n")
self.res = "read res : " + str(read_res)
# read
search_read_res = self.env['clg.student.detail'].search_read(
[('gender', '=', "male")], ['name'])
self.res += "<br>" + "search_read res : " + str(search_read_res)
print(f"\n\n\nsearch_read() res : {search_read_res}\n\n\n")
def search_func(self):
# search
search_res = self.env['clg.student.detail'].search(
[('gender', '=', 'female')])
print(f"\n\n\n search() res : {search_res} \n\n\n")
# search_count
search_cnt = self.env['clg.student.detail'].search_count(
[('gender', '=', 'male')])
print(f"\n\n\nsearch_count() res : {search_cnt}\n\n\n")
# #browse
browse_res = self.env['clg.student.detail'].browse([5])
print(f"\n\n\nbrowse() res : {browse_res}\n\n\n")
# @api.model
# def create(self, vals):
# print(f"values that we get before: {vals}")
# vals['mobile_no'] = str(8945128910)
# print(f"values that we get after: {vals}")
# clg_student = super(Clg_Student, self).create(vals)
# print(f"return vals : {clg_student}")
# print(type(clg_student))
# return clg_student
# else:
# raise ValidationError("user name is required.")
@api.depends('courses')
def compute_amt(self):
discount_per = 15
for rec in self:
if rec.courses.rate > 500:
# print(f"\n\n\n{rec.courses.rate * (discount_per //
# 100)}\n\n\n")
rec.spend_amt = rec.courses.rate - \
(rec.courses.rate * (discount_per / 100))
else:
discount_per = 5
rec.spend_amt = rec.courses.rate - \
(rec.courses.rate * (discount_per / 100))
@api.depends('todos')
def compute_id_no(self):
for rec in self:
rec.id_no = 0
if rec.todos.is_done == True:
rec.id_no = 5
else:
rec.id_no = 15
# @api.depends('name')
# def compute_id_no(self):
# for rec in self:
# self.id_no = 0
# if rec.name == 'manthan':
# rec.id_no = 5
# else:
# rec.id_no = 15
# @api.model
# def name_create(self, name):
# for i in self:
# rec = i.name + i.user_name
# return rec
# def generate_user_name(self):
# self.user_name = ''.join(random.choice(
# string.ascii_uppercase + string.ascii_lowercase + string.digits) for _
# in range(random.randint(9, 15)))
# for rec in self:
# rec.write({'user_name': ''.join(random.choice(
# string.ascii_uppercase + string.ascii_lowercase + string.digits) for
# _ in range(random.randint(9, 15)))})
# @api.constrains("mobile_no")
# def check_mobile_no(self):
# if str(self.mobile_no).strip() != 'False':
# print("\n\n\n True \n\n\n")
# if not str(self.mobile_no).isdigit():
# raise ValidationError("Please enter valid mobile no.")
# else:
# if len(str(self.mobile_no).strip()) != 10:
# raise ValidationError("mobile no. size must be 10.")
# def write(self, vals):
# vals['email_id'] = '[email protected]'
# return super(Clg_Student, self).write(vals)
| [
"[email protected]"
]
| |
06b13d5e00ab9ca1dbae55b35b9bdac815c1d6dc | 448ae937e516e6c55f425068bad3da49d22fa138 | /splendor/splendor_agents/utils/value_function.py | cfb39a2c5934f48661a6b37bf8227be35e585ee3 | []
| no_license | TomaszOdrzygozdz/alpha_splendor | a1ef5048e4abb3794bffc40ef63591f8d0afe545 | 599f8c2969c4b544d2c28dfef0720ecc7b025884 | refs/heads/master | 2022-07-16T19:01:06.821146 | 2020-05-16T09:44:05 | 2020-05-16T09:44:05 | 255,910,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | import numpy as np
from splendor.envs.mechanics.enums import GemColor
from splendor.envs.mechanics.gems_collection import GemsCollection
from splendor.envs.mechanics.players_hand import PlayersHand
from splendor.envs.mechanics.state import State
class SplendorValueFunction:
def __init__(self, points_to_win):
self.weights = [1000, 800, 500, 650, 0, -2000, 0, 0, 0, 0, 0, 0, 150, 180, 10, 10, 0, 0, 0, 0]
self.scaling_factor = 1/30000
self.points_to_win = points_to_win
def set_weights(self, weights):
self.weights = weights
def pre_card_frac_value(self, gems : GemsCollection, price : GemsCollection):
s = 0
total_price = sum(list(price.gems_dict.values()))
for color in GemColor:
if price.gems_dict[color] > 0:
s += (gems.gems_dict[color] - price.gems_dict[color]) / total_price
s += gems.gems_dict[GemColor.GOLD] / total_price
return s
def cards_stats(self, state, active: bool):
p_hand = state.active_players_hand() if active else state.other_players_hand()
discount = p_hand.discount()
cards_affordability = sum([int(p_hand.can_afford_card(card, discount)) for card in state.board.cards_on_board]) + \
sum([int(p_hand.can_afford_card(card, discount)) for card in
p_hand.cards_reserved])
value_affordability = sum([card.victory_points*int(p_hand.can_afford_card(card, discount)) for card in state.board.cards_on_board]) + \
sum([card.victory_points * int(
p_hand.can_afford_card(card, discount)) for card in
p_hand.cards_reserved])
cards_frac_value = sum([self.pre_card_frac_value(p_hand.discount() + p_hand.gems_possessed, card.price) for card in
state.board.cards_on_board])
nobles_frac_value = sum([self.pre_card_frac_value(p_hand.discount(), noble.price) for noble in state.board.nobles_on_board])
return [cards_affordability, value_affordability, cards_frac_value, nobles_frac_value]
def hand_features(self, players_hand : PlayersHand):
points = players_hand.number_of_my_points()
cards_possessed = len(players_hand.cards_possessed)
nobles_possessed = len(players_hand.nobles_possessed)
total_gems_non_gold = players_hand.gems_possessed.sum()
gem_gold = players_hand.gems_possessed.gems_dict[GemColor.GOLD]
winner = int(points >= self.points_to_win)
return [winner, points, cards_possessed, nobles_possessed, total_gems_non_gold, gem_gold]
def state_to_features(self, state : State):
my_hand = self.hand_features(state.active_players_hand())
opp_hand = self.hand_features(state.other_players_hand())
my_cards_stats = self.cards_stats(state, True)
opp_cards_stats = self.cards_stats(state, False)
return my_hand + opp_hand + my_cards_stats + opp_cards_stats
def evaluate(self, state: State):
value = np.dot(np.array(self.weights), np.array(self.state_to_features(state)))
return value*self.scaling_factor | [
"[email protected]"
]
| |
cb0fe887184287fd77edf97b0f52282926ab0e76 | c7f98de17088cb4df6c171f1e76614beb1f4e0f7 | /arachni.py | 5a6680d220178ee70763a537f8274dbfa4ca8c90 | []
| no_license | fryjustinc/ptf | 6262ca5b94a43a51e984d3eee1649a16584b597b | ba85f9e867b65b4aa4f06b6232207aadac9782c9 | refs/heads/master | 2020-03-31T09:43:44.474563 | 2018-10-08T18:39:03 | 2018-10-08T18:39:03 | 152,107,950 | 0 | 0 | null | 2018-10-08T16:00:37 | 2018-10-08T16:00:37 | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
#####################################
# Installation module for arachni
#####################################
AUTHOR="Justin Fry"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/arachni/arachni"
LAUNCHER="arachni"
| [
"[email protected]"
]
| |
bac416402c5c026b1181313d94a98c6fdfb8be29 | 19f52d4aeffe31e697532f08710498789a46dd6e | /keras_to_tensorflow.py | c6ea297a3d533457339e44a2ff04a0e0ec8b1990 | []
| no_license | maxwellchang7777/keras_to_tensorflow | 4ae6b9aa6e62f0ccf66c35f84cb0aeebaa274f39 | efbf5c9148d76ffba0174c2dd8856d687f14666b | refs/heads/master | 2020-05-07T16:22:37.678241 | 2018-08-02T05:52:14 | 2018-08-02T05:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | # This file shows how to save a keras model to tensorflow pb file
# and how to use tensorflow to reload the model and inferece by the model
from keras.models import Sequential
from keras.layers import Dense
import tensorflow as tf
import numpy as np
np.random.seed(0)
# parameter ==========================
wkdir = '/home/pipidog/keras_to_tensorflow'
pb_filename = 'model.pb'
# build a keras model ================
x = np.vstack((np.random.rand(1000,10),-np.random.rand(1000,10)))
y = np.vstack((np.ones((1000,1)),np.zeros((1000,1))))
print(x.shape)
print(y.shape)
model = Sequential()
model.add(Dense(units = 32, input_shape=(10,), activation ='relu'))
model.add(Dense(units = 16, activation ='relu'))
model.add(Dense(units = 1, activation ='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['binary_accuracy'])
model.fit(x = x, y=y, epochs = 2, validation_split=0.2)
# save model to pb ====================
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
# save keras model as tf pb files ===============
from keras import backend as K
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, wkdir, pb_filename, as_text=False)
# # load & inference the model ==================
from tensorflow.python.platform import gfile
with tf.Session() as sess:
# load model from pb file
with gfile.FastGFile(wkdir+'/'+pb_filename,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
g_in = tf.import_graph_def(graph_def)
# write to tensorboard (check tensorboard for each op names)
writer = tf.summary.FileWriter(wkdir+'/log/')
writer.add_graph(sess.graph)
writer.flush()
writer.close()
# print all operation names
print('\n===== ouptut operation names =====\n')
for op in sess.graph.get_operations():
print(op)
# inference by the model (op name must comes with :0 to specify the index of its output)
tensor_output = sess.graph.get_tensor_by_name('import/dense_3/Sigmoid:0')
tensor_input = sess.graph.get_tensor_by_name('import/dense_1_input:0')
predictions = sess.run(tensor_output, {tensor_input: x})
print('\n===== output predicted results =====\n')
print(predictions) | [
"[email protected]"
]
| |
1917d5e820f4f39ad62dbc6f420e55495043a206 | 47deebe6fefedb01fdce5d4e82f58bb08f8e1e92 | /python core/Lesson_5/for_7.py | 839845eb70264bad5d73d981ad0125b5f2b7abcd | []
| no_license | developeryuldashev/python-core | 5bb162603bdb5782acf05e3fb25ca5dd6347067a | 08fca77c9cfde69d93a7875b3fb65b98f3dabd78 | refs/heads/main | 2023-08-21T03:33:12.160133 | 2021-10-19T04:56:53 | 2021-10-19T04:56:53 | 393,383,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | a,b=2,5
s=0
for i in range(a,b+1):
s+=i
print(s) | [
"[email protected]"
]
| |
09506b373f9c63284be54f45bcd1193d7bbc4926 | 0a06c52144f184e939ed8a3ec16af601447e4247 | /course13/cnn.py | 7f1575f6de3a4f40aaaea75e8aa37756b056dc47 | [
"Apache-2.0"
]
| permissive | fengxiang2/PaddlePaddleCourse | 7dd88dc13e9b5f5f7f27db2b155fe4f1adcf22e4 | 1b94da406884f8a0da22e471e6b9b6a4dec80e45 | refs/heads/master | 2022-08-01T11:25:39.915680 | 2020-05-22T13:25:23 | 2020-05-22T13:25:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | import paddle
class CNN(paddle.nn.Layer):
def __init__(self):
super(CNN, self).__init__()
# 定义每个网络的结构
self.conv1 = paddle.nn.Conv2D(num_channels=1, num_filters=20, filter_size=5, act="relu")
self.conv2 = paddle.nn.Conv2D(num_channels=20, num_filters=50, filter_size=5, act="relu")
self.pool1 = paddle.nn.Pool2D(pool_size=2, pool_type='max', pool_stride=2)
self.input_dim = 50 * 4 * 4
self.fc = paddle.nn.Linear(input_dim=self.input_dim, output_dim=10, act='softmax')
def forward(self, inputs):
# 把每个网络组合在一起
x = self.conv1(inputs)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool1(x)
x = paddle.reshape(x, shape=[-1, self.input_dim])
x = self.fc(x)
return x
| [
"[email protected]"
]
| |
9d69e20c1c5183ae8488d661cee4a8cbe6a71acb | eeb4752a22ef99152784c0ef6f720f8e4f2dd9d9 | /myrest/app_one/migrations/0005_citycount_town.py | cd723eb747bb4c43d5b22164ea2eae5ccf036ceb | []
| no_license | borko81/django-rest-test | 9a63d328fea8155029bb3d1d29ab624ea4a0027b | e21d41494154622c2472b679df40d5f42d8ab356 | refs/heads/main | 2023-08-05T22:36:10.099746 | 2021-09-10T17:54:20 | 2021-09-10T17:54:20 | 318,290,143 | 0 | 0 | null | 2021-08-23T18:51:22 | 2020-12-03T18:53:44 | Python | UTF-8 | Python | false | false | 509 | py | # Generated by Django 3.2.5 on 2021-08-24 11:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_one', '0004_citycount'),
]
operations = [
migrations.AddField(
model_name='citycount',
name='town',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='app_one.country'),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
a6e43e86f8947ff4ae23fbc52d8d92d2a3caa805 | e9747b8f6c487e983e9c1fe866ddf3c149130b73 | /corefit/__init__.py | 521bece7ac2076d22cbd3745beb878d221f05695 | []
| no_license | redcliver/corefit | f25974edfb2f7e697b7ced92a7617cc3004a5276 | ed9713f48588db7c2598c3ba4a9bda17054eea85 | refs/heads/master | 2022-12-10T04:11:03.646857 | 2018-07-31T15:18:59 | 2018-07-31T15:18:59 | 136,400,693 | 0 | 0 | null | 2022-06-27T16:58:03 | 2018-06-07T00:32:43 | CSS | UTF-8 | Python | false | false | 29 | py | """
Package for corefit.
"""
| [
"[email protected]"
]
| |
7710833890b0049ceaa60b65b07edcb3846bd339 | fbcdb3e66f9fce9bf8596ae9f28e14ad23da30a2 | /template/office/OleFileIO_PL.py | 10d50608d141383a986f4fa7917fcd9809c961d1 | [
"BSD-2-Clause"
]
| permissive | arizvisa/syringe | 38349e6ff81bc1d709d520b8a8d949a47a3b5f6c | e02b014dc764ed822288210248c9438a843af8a9 | refs/heads/master | 2023-08-18T11:44:50.096141 | 2023-08-16T21:15:58 | 2023-08-16T21:15:58 | 22,565,979 | 36 | 9 | BSD-2-Clause | 2021-05-24T19:38:31 | 2014-08-03T03:24:16 | Python | UTF-8 | Python | false | false | 69,855 | py | #!/usr/local/bin/python
# -*- coding: latin-1 -*-
"""
OleFileIO_PL:
Module to read Microsoft OLE2 files (also called Structured Storage or
Microsoft Compound Document File Format), such as Microsoft Office
documents, Image Composer and FlashPix files, Outlook messages, ...
version 0.22 2012-02-16 Philippe Lagadec - http://www.decalage.info
Project website: http://www.decalage.info/python/olefileio
Improved version of the OleFileIO module from PIL library v1.1.6
See: http://www.pythonware.com/products/pil/index.htm
The Python Imaging Library (PIL) is
Copyright (c) 1997-2005 by Secret Labs AB
Copyright (c) 1995-2005 by Fredrik Lundh
OleFileIO_PL changes are Copyright (c) 2005-2012 by Philippe Lagadec
See source code and LICENSE.txt for information on usage and redistribution.
WARNING: THIS IS (STILL) WORK IN PROGRESS.
"""
__author__ = "Fredrik Lundh (Secret Labs AB), Philippe Lagadec"
__date__ = "2012-02-16"
__version__ = '0.22'
#--- LICENSE ------------------------------------------------------------------
# OleFileIO_PL is an improved version of the OleFileIO module from the
# Python Imaging Library (PIL).
# OleFileIO_PL changes are Copyright (c) 2005-2012 by Philippe Lagadec
#
# The Python Imaging Library (PIL) is
# Copyright (c) 1997-2005 by Secret Labs AB
# Copyright (c) 1995-2005 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its associated
# documentation, you agree that you have read, understood, and will comply with
# the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and that both
# that copyright notice and this permission notice appear in supporting
# documentation, and that the name of Secret Labs AB or the author(s) not be used
# in advertising or publicity pertaining to distribution of the software
# without specific, written prior permission.
#
# SECRET LABS AB AND THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL SECRET LABS AB OR THE AUTHORS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#-----------------------------------------------------------------------------
# CHANGELOG: (only OleFileIO_PL changes compared to PIL 1.1.6)
# 2005-05-11 v0.10 PL: - a few fixes for Python 2.4 compatibility
# (all changes flagged with [PL])
# 2006-02-22 v0.11 PL: - a few fixes for some Office 2003 documents which raise
# exceptions in _OleStream.__init__()
# 2006-06-09 v0.12 PL: - fixes for files above 6.8MB (DIFAT in loadfat)
# - added some constants
# - added header values checks
# - added some docstrings
# - getsect: bugfix in case sectors >512 bytes
# - getsect: added conformity checks
# - DEBUG_MODE constant to activate debug display
# 2007-09-04 v0.13 PL: - improved/translated (lots of) comments
# - updated license
# - converted tabs to 4 spaces
# 2007-11-19 v0.14 PL: - added OleFileIO._raise_defect() to adapt sensitivity
# - improved _unicode() to use Python 2.x unicode support
# - fixed bug in _OleDirectoryEntry
# 2007-11-25 v0.15 PL: - added safety checks to detect FAT loops
# - fixed _OleStream which didn't check stream size
# - added/improved many docstrings and comments
# - moved helper functions _unicode and _clsid out of
# OleFileIO class
# - improved OleFileIO._find() to add Unix path syntax
# - OleFileIO._find() is now case-insensitive
# - added get_type() and get_rootentry_name()
# - rewritten loaddirectory and _OleDirectoryEntry
# 2007-11-27 v0.16 PL: - added _OleDirectoryEntry.kids_dict
# - added detection of duplicate filenames in storages
# - added detection of duplicate references to streams
# - added get_size() and exists() to _OleDirectoryEntry
# - added isOleFile to check header before parsing
# - added __all__ list to control public keywords in pydoc
# 2007-12-04 v0.17 PL: - added _load_direntry to fix a bug in loaddirectory
# - improved _unicode(), added workarounds for Python <2.3
# - added set_debug_mode and -d option to set debug mode
# - fixed bugs in OleFileIO.open and _OleDirectoryEntry
# - added safety check in main for large or binary
# properties
# - allow size>0 for storages for some implementations
# 2007-12-05 v0.18 PL: - fixed several bugs in handling of FAT, MiniFAT and
# streams
# - added option '-c' in main to check all streams
# 2009-12-10 v0.19 PL: - bugfix for 32 bit arrays on 64 bits platforms
# (thanks to Ben G. and Martijn for reporting the bug)
# 2009-12-11 v0.20 PL: - bugfix in OleFileIO.open when filename is not plain str
# 2010-01-22 v0.21 PL: - added support for big-endian CPUs such as PowerPC Macs
# 2012-02-16 v0.22 PL: - fixed bug in getproperties, patch by chuckleberryfinn
# (https://bitbucket.org/decalage/olefileio_pl/issue/7)
# - added close method to OleFileIO (fixed issue #2)
#-----------------------------------------------------------------------------
# TODO (for version 1.0):
# - TESTS with Linux, MacOSX, Python 1.5.2, various files, PIL, ...
# - add underscore to each private method, to avoid their display in
# pydoc/epydoc documentation
# - replace all raised exceptions with _raise_defect (at least in OleFileIO)
# - merge code from _OleStream and OleFileIO.getsect to read sectors
# (maybe add a class for FAT and MiniFAT ?)
# - add method to check all streams (follow sectors chains without storing all
# stream in memory, and report anomalies)
# - use _OleDirectoryEntry.kids_dict to improve _find and _list ?
# - fix Unicode names handling (find some way to stay compatible with Py1.5.2)
# => if possible avoid converting names to Latin-1
# - review DIFAT code: fix handling of DIFSECT blocks in FAT (not stop)
# - rewrite OleFileIO.getproperties
# - improve docstrings to show more sample uses
# - see also original notes and FIXME below
# - remove all obsolete FIXMEs
# IDEAS:
# - allow _raise_defect to raise different exceptions, not only IOError
# - provide a class with named attributes to get well-known properties of
# MS Office documents (title, author, ...) ?
# - in OleFileIO._open and _OleStream, use size=None instead of 0x7FFFFFFF for
# streams with unknown size
# - use arrays of int instead of long integers for FAT/MiniFAT, to improve
# performance and reduce memory usage ? (possible issue with values >2^31)
# - provide tests with unittest (may need write support to create samples)
# - move all debug code (and maybe dump methods) to a separate module, with
# a class which inherits OleFileIO ?
# - fix docstrings to follow epydoc format
# - add support for 4K sectors ?
# - add support for big endian byte order ?
# - create a simple OLE explorer with wxPython
# FUTURE EVOLUTIONS to add write support:
# 1) add ability to write a stream back on disk from StringIO (same size, no
# change in FAT/MiniFAT).
# 2) rename a stream/storage if it doesn't change the RB tree
# 3) use rbtree module to update the red-black tree + any rename
# 4) remove a stream/storage: free sectors in FAT/MiniFAT
# 5) allocate new sectors in FAT/MiniFAT
# 6) create new storage/stream
#-----------------------------------------------------------------------------
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id: OleFileIO.py 2339 2005-03-25 08:02:17Z fredrik $
#
# stuff to deal with OLE2 Structured Storage files. this module is
# used by PIL to read Image Composer and FlashPix files, but can also
# be used to read other files of this type.
#
# History:
# 1997-01-20 fl Created
# 1997-01-22 fl Fixed 64-bit portability quirk
# 2003-09-09 fl Fixed typo in OleFileIO.loadfat (noted by Daniel Haertle)
# 2004-02-29 fl Changed long hex constants to signed integers
#
# Notes:
# FIXME: sort out sign problem (eliminate long hex constants)
# FIXME: change filename to use "a/b/c" instead of ["a", "b", "c"]
# FIXME: provide a glob mechanism function (using fnmatchcase)
#
# Literature:
#
# "FlashPix Format Specification, Appendix A", Kodak and Microsoft,
# September 1996.
#
# Quotes:
#
# "If this document and functionality of the Software conflict,
# the actual functionality of the Software represents the correct
# functionality" -- Microsoft, in the OLE format specification
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
#------------------------------------------------------------------------------
import string, StringIO, struct, array, os.path, sys
#[PL] Define explicitly the public API to avoid private objects in pydoc:
__all__ = ['OleFileIO', 'isOleFile']
#[PL] workaround to fix an issue with array item size on 64 bits systems:
if array.array('L').itemsize == 4:
# on 32 bits platforms, long integers in an array are 32 bits:
UINT32 = 'L'
elif array.array('I').itemsize == 4:
# on 64 bits platforms, integers in an array are 32 bits:
UINT32 = 'I'
else:
raise ValueError, 'Need to fix a bug with 32 bit arrays, please contact author...'
#[PL] These workarounds were inspired from the Path module
# (see http://www.jorendorff.com/articles/python/path/)
#TODO: test with old Python versions
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
try:
# is Unicode supported (Python >2.0 or >1.6 ?)
basestring = (str, unicode)
except NameError:
basestring = str
#[PL] Experimental setting: if True, OLE filenames will be kept in Unicode
# if False (default PIL behaviour), all filenames are converted to Latin-1.
KEEP_UNICODE_NAMES = False
#[PL] DEBUG display mode: False by default, use set_debug_mode() or "-d" on
# command line to change it.
DEBUG_MODE = False
def debug_print(msg):
print msg
def debug_pass(msg):
pass
debug = debug_pass
def set_debug_mode(debug_mode):
"""
Set debug mode on or off, to control display of debugging messages.
mode: True or False
"""
global DEBUG_MODE, debug
DEBUG_MODE = debug_mode
if debug_mode:
debug = debug_print
else:
debug = debug_pass
#TODO: convert this to hex
MAGIC = '\320\317\021\340\241\261\032\341'
#[PL]: added constants for Sector IDs (from AAF specifications)
MAXREGSECT = 0xFFFFFFFAL; # maximum SECT
DIFSECT = 0xFFFFFFFCL; # (-4) denotes a DIFAT sector in a FAT
FATSECT = 0xFFFFFFFDL; # (-3) denotes a FAT sector in a FAT
ENDOFCHAIN = 0xFFFFFFFEL; # (-2) end of a virtual stream chain
FREESECT = 0xFFFFFFFFL; # (-1) unallocated sector
#[PL]: added constants for Directory Entry IDs (from AAF specifications)
MAXREGSID = 0xFFFFFFFAL; # maximum directory entry ID
NOSTREAM = 0xFFFFFFFFL; # (-1) unallocated directory entry
#[PL] object types in storage (from AAF specifications)
STGTY_EMPTY = 0 # empty directory entry (according to OpenOffice.org doc)
STGTY_STORAGE = 1 # element is a storage object
STGTY_STREAM = 2 # element is a stream object
STGTY_LOCKBYTES = 3 # element is an ILockBytes object
STGTY_PROPERTY = 4 # element is an IPropertyStorage object
STGTY_ROOT = 5 # element is a root storage
#
# --------------------------------------------------------------------
# property types
VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
VT_VECTOR=0x1000;
# map property id to name (for debugging purposes)
VT = {}
for keyword, var in vars().items():
if keyword[:3] == "VT_":
VT[var] = keyword
#
# --------------------------------------------------------------------
# Some common document types (root.clsid fields)
WORD_CLSID = "00020900-0000-0000-C000-000000000046"
#TODO: check Excel, PPT, ...
#[PL]: Defect levels to classify parsing errors - see OleFileIO._raise_defect()
DEFECT_UNSURE = 10 # a case which looks weird, but not sure it's a defect
DEFECT_POTENTIAL = 20 # a potential defect
DEFECT_INCORRECT = 30 # an error according to specifications, but parsing
# can go on
DEFECT_FATAL = 40 # an error which cannot be ignored, parsing is
# impossible
#[PL] add useful constants to __all__:
for key in vars().keys():
if key.startswith('STGTY_') or key.startswith('DEFECT_'):
__all__.append(key)
#--- FUNCTIONS ----------------------------------------------------------------
def isOleFile (filename):
"""
Test if file is an OLE container (according to its header).
filename: file name or path (str, unicode)
return: True if OLE, False otherwise.
"""
f = open(filename, 'rb')
header = f.read(len(MAGIC))
if header == MAGIC:
return True
else:
return False
#TODO: replace i16 and i32 with more readable struct.unpack equivalent
def i16(c, o = 0):
"""
Converts a 2-bytes (16 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return ord(c[o])+(ord(c[o+1])<<8)
def i32(c, o = 0):
"""
Converts a 4-bytes (32 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return int(ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24))
# [PL]: added int() because "<<" gives long int since Python 2.4
def _clsid(clsid):
"""
Converts a CLSID to a human-readable string.
clsid: string of length 16.
"""
assert len(clsid) == 16
if clsid == "\0" * len(clsid):
return ""
return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
tuple(map(ord, clsid[8:16]))))
# UNICODE support for Old Python versions:
# (necessary to handle storages/streams names which use Unicode)
try:
# is Unicode supported ?
unicode
def _unicode(s, errors='replace'):
"""
Map unicode string to Latin 1. (Python with Unicode support)
s: UTF-16LE unicode string to convert to Latin-1
errors: 'replace', 'ignore' or 'strict'. See Python doc for unicode()
"""
#TODO: test if it OleFileIO works with Unicode strings, instead of
# converting to Latin-1.
try:
# First the string is converted to plain Unicode:
# (assuming it is encoded as UTF-16 little-endian)
u = s.decode('UTF-16LE', errors)
if KEEP_UNICODE_NAMES:
return u
else:
# Second the unicode string is converted to Latin-1
return u.encode('latin_1', errors)
except:
# there was an error during Unicode to Latin-1 conversion:
raise IOError, 'incorrect Unicode name'
except NameError:
def _unicode(s, errors='replace'):
"""
Map unicode string to Latin 1. (Python without native Unicode support)
s: UTF-16LE unicode string to convert to Latin-1
errors: 'replace', 'ignore' or 'strict'. (ignored in this version)
"""
# If the unicode function does not exist, we assume this is an old
# Python version without Unicode support.
# Null bytes are simply removed (this only works with usual Latin-1
# strings which do not contain unicode characters>256):
return filter(ord, s)
#=== CLASSES ==================================================================
#--- _OleStream ---------------------------------------------------------------
class _OleStream(StringIO.StringIO):
"""
OLE2 Stream
Returns a read-only file object which can be used to read
the contents of a OLE stream (instance of the StringIO class).
To open a stream, use the openstream method in the OleFile class.
This function can be used with either ordinary streams,
or ministreams, depending on the offset, sectorsize, and
fat table arguments.
Attributes:
- size: actual size of data stream, after it was opened.
"""
# FIXME: should store the list of sects obtained by following
# the fat chain, and load new sectors on demand instead of
# loading it all in one go.
def __init__(self, fp, sect, size, offset, sectorsize, fat):
"""
Constructor for _OleStream class.
fp : file object, the OLE container or the MiniFAT stream
sect : sector index of first sector in the stream
size : total size of the stream
offset : offset in bytes for the first FAT or MiniFAT sector
sectorsize: size of one sector
fat : array/list of sector indexes (FAT or MiniFAT)
return : a StringIO instance containing the OLE stream
"""
debug('_OleStream.__init__:')
debug(' sect=%d (%X), size=%d, offset=%d, sectorsize=%d, len(fat)=%d, fp=%s'
%(sect,sect,size,offset,sectorsize,len(fat), repr(fp)))
# for debugging messages, size of file where stream is read:
if isinstance(fp, StringIO.StringIO):
filesize = len(fp.getvalue()) # file in MiniFAT
else:
filesize = os.path.getsize(fp.name) # file on disk
#[PL] To detect malformed documents with FAT loops, we compute the
# expected number of sectors in the stream:
unknown_size = False
if size==0x7FFFFFFF:
# this is the case when called from OleFileIO._open(), and stream
# size is not known in advance (for example when reading the
# Directory stream). Then we can only guess maximum size:
size = len(fat)*sectorsize
# and we keep a record that size was unknown:
unknown_size = True
debug(' stream with UNKNOWN SIZE')
nb_sectors = (size + (sectorsize-1)) / sectorsize
debug('nb_sectors = %d' % nb_sectors)
# This number should (at least) be less than the total number of
# sectors in the given FAT:
if nb_sectors > len(fat):
raise IOError, 'malformed OLE document, stream too large'
# optimization(?): data is first a list of strings, and join() is called
# at the end to concatenate all in one string.
# (this may not be really useful with recent Python versions)
data = []
# if size is zero, then first sector index should be ENDOFCHAIN:
if size == 0 and sect != ENDOFCHAIN:
debug('size == 0 and sect != ENDOFCHAIN:')
raise IOError, 'incorrect OLE sector index for empty stream'
#[PL] A fixed-length for loop is used instead of an undefined while
# loop to avoid DoS attacks:
for i in xrange(nb_sectors):
# Sector index may be ENDOFCHAIN, but only if size was unknown
if sect == ENDOFCHAIN:
if unknown_size:
break
else:
# else this means that the stream is smaller than declared:
debug('sect=ENDOFCHAIN before expected size')
raise IOError, 'incomplete OLE stream'
# sector index should be within FAT:
if sect<0 or sect>=len(fat):
debug('sect=%d (%X) / len(fat)=%d' % (sect, sect, len(fat)))
debug('i=%d / nb_sectors=%d' %(i, nb_sectors))
## tmp_data = string.join(data, "")
## f = open('test_debug.bin', 'wb')
## f.write(tmp_data)
## f.close()
## debug('data read so far: %d bytes' % len(tmp_data))
raise IOError, 'incorrect OLE FAT, sector index out of range'
#TODO: merge this code with OleFileIO.getsect() ?
#TODO: check if this works with 4K sectors:
try:
fp.seek(offset + sectorsize * sect)
except:
debug('sect=%d, seek=%d, filesize=%d' %
(sect, offset+sectorsize*sect, filesize))
raise IOError, 'OLE sector index out of range'
sector_data = fp.read(sectorsize)
# [PL] check if there was enough data:
# Note: if sector is the last of the file, sometimes it is not a
# complete sector (of 512 or 4K), so we may read less than
# sectorsize.
if len(sector_data)!=sectorsize and sect!=(len(fat)-1):
debug('sect=%d / len(fat)=%d, seek=%d / filesize=%d, len read=%d' %
(sect, len(fat), offset+sectorsize*sect, filesize, len(sector_data)))
debug('seek+len(read)=%d' % (offset+sectorsize*sect+len(sector_data)))
raise IOError, 'incomplete OLE sector'
data.append(sector_data)
# jump to next sector in the FAT:
try:
sect = fat[sect]
except IndexError:
# [PL] if pointer is out of the FAT an exception is raised
raise IOError, 'incorrect OLE FAT, sector index out of range'
#[PL] Last sector should be a "end of chain" marker:
if sect != ENDOFCHAIN:
raise IOError, 'incorrect last sector index in OLE stream'
data = string.join(data, "")
# Data is truncated to the actual stream size:
if len(data) >= size:
data = data[:size]
# actual stream size is stored for future use:
self.size = size
elif unknown_size:
# actual stream size was not known, now we know the size of read
# data:
self.size = len(data)
else:
# read data is less than expected:
debug('len(data)=%d, size=%d' % (len(data), size))
raise IOError, 'OLE stream size is less than declared'
# when all data is read in memory, StringIO constructor is called
StringIO.StringIO.__init__(self, data)
# Then the _OleStream object can be used as a read-only file object.
#--- _OleDirectoryEntry -------------------------------------------------------
class _OleDirectoryEntry:
"""
OLE2 Directory Entry
"""
#[PL] parsing code moved from OleFileIO.loaddirectory
# struct to parse directory entries:
# <: little-endian byte order
# 64s: string containing entry name in unicode (max 31 chars) + null char
# H: uint16, number of bytes used in name buffer, including null = (len+1)*2
# B: uint8, dir entry type (between 0 and 5)
# B: uint8, color: 0=black, 1=red
# I: uint32, index of left child node in the red-black tree, NOSTREAM if none
# I: uint32, index of right child node in the red-black tree, NOSTREAM if none
# I: uint32, index of child root node if it is a storage, else NOSTREAM
# 16s: CLSID, unique identifier (only used if it is a storage)
# I: uint32, user flags
# 8s: uint64, creation timestamp or zero
# 8s: uint64, modification timestamp or zero
# I: uint32, SID of first sector if stream or ministream, SID of 1st sector
# of stream containing ministreams if root entry, 0 otherwise
# I: uint32, total stream size in bytes if stream (low 32 bits), 0 otherwise
# I: uint32, total stream size in bytes if stream (high 32 bits), 0 otherwise
STRUCT_DIRENTRY = '<64sHBBIII16sI8s8sIII'
# size of a directory entry: 128 bytes
DIRENTRY_SIZE = 128
assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE
def __init__(self, entry, sid, olefile):
"""
Constructor for an _OleDirectoryEntry object.
Parses a 128-bytes entry from the OLE Directory stream.
entry : string (must be 128 bytes long)
sid : index of this directory entry in the OLE file directory
olefile: OleFileIO containing this directory entry
"""
self.sid = sid
# ref to olefile is stored for future use
self.olefile = olefile
# kids is a list of children entries, if this entry is a storage:
# (list of _OleDirectoryEntry objects)
self.kids = []
# kids_dict is a dictionary of children entries, indexed by their
# name in lowercase: used to quickly find an entry, and to detect
# duplicates
self.kids_dict = {}
# flag used to detect if the entry is referenced more than once in
# directory:
self.used = False
# decode DirEntry
(
name,
namelength,
self.entry_type,
self.color,
self.sid_left,
self.sid_right,
self.sid_child,
clsid,
self.dwUserFlags,
self.createTime,
self.modifyTime,
self.isectStart,
sizeLow,
sizeHigh
) = struct.unpack(_OleDirectoryEntry.STRUCT_DIRENTRY, entry)
if self.entry_type not in [STGTY_ROOT, STGTY_STORAGE, STGTY_STREAM, STGTY_EMPTY]:
olefile._raise_defect(DEFECT_INCORRECT, 'unhandled OLE storage type')
# only first directory entry can (and should) be root:
if self.entry_type == STGTY_ROOT and sid != 0:
olefile._raise_defect(DEFECT_INCORRECT, 'duplicate OLE root entry')
if sid == 0 and self.entry_type != STGTY_ROOT:
olefile._raise_defect(DEFECT_INCORRECT, 'incorrect OLE root entry')
#debug (struct.unpack(fmt_entry, entry[:len_entry]))
# name should be at most 31 unicode characters + null character,
# so 64 bytes in total (31*2 + 2):
if namelength>64:
olefile._raise_defect(DEFECT_INCORRECT, 'incorrect DirEntry name length')
# if exception not raised, namelength is set to the maximum value:
namelength = 64
# only characters without ending null char are kept:
name = name[:(namelength-2)]
# name is converted from unicode to Latin-1:
self.name = _unicode(name)
debug('DirEntry SID=%d: %s' % (self.sid, repr(self.name)))
debug(' - type: %d' % self.entry_type)
debug(' - sect: %d' % self.isectStart)
debug(' - SID left: %d, right: %d, child: %d' % (self.sid_left,
self.sid_right, self.sid_child))
# sizeHigh is only used for 4K sectors, it should be zero for 512 bytes
# sectors, BUT apparently some implementations set it as 0xFFFFFFFFL, 1
# or some other value so it cannot be raised as a defect in general:
if olefile.sectorsize == 512:
if sizeHigh != 0 and sizeHigh != 0xFFFFFFFFL:
debug('sectorsize=%d, sizeLow=%d, sizeHigh=%d (%X)' %
(olefile.sectorsize, sizeLow, sizeHigh, sizeHigh))
olefile._raise_defect(DEFECT_UNSURE, 'incorrect OLE stream size')
self.size = sizeLow
else:
self.size = sizeLow + (long(sizeHigh)<<32)
debug(' - size: %d (sizeLow=%d, sizeHigh=%d)' % (self.size, sizeLow, sizeHigh))
self.clsid = _clsid(clsid)
# a storage should have a null size, BUT some implementations such as
# Word 8 for Mac seem to allow non-null values => Potential defect:
if self.entry_type == STGTY_STORAGE and self.size != 0:
olefile._raise_defect(DEFECT_POTENTIAL, 'OLE storage with size>0')
# check if stream is not already referenced elsewhere:
if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size>0:
if self.size < olefile.minisectorcutoff \
and self.entry_type==STGTY_STREAM: # only streams can be in MiniFAT
# ministream object
minifat = True
else:
minifat = False
olefile._check_duplicate_stream(self.isectStart, minifat)
def build_storage_tree(self):
"""
Read and build the red-black tree attached to this _OleDirectoryEntry
object, if it is a storage.
Note that this method builds a tree of all subentries, so it should
only be called for the root object once.
"""
debug('build_storage_tree: SID=%d - %s - sid_child=%d'
% (self.sid, repr(self.name), self.sid_child))
if self.sid_child != NOSTREAM:
# if child SID is not NOSTREAM, then this entry is a storage.
# Let's walk through the tree of children to fill the kids list:
self.append_kids(self.sid_child)
# Note from OpenOffice documentation: the safest way is to
# recreate the tree because some implementations may store broken
# red-black trees...
# in the OLE file, entries are sorted on (length, name).
# for convenience, we sort them on name instead:
# (see __cmp__ method in this class)
self.kids.sort()
def append_kids(self, child_sid):
"""
Walk through red-black tree of children of this directory entry to add
all of them to the kids list. (recursive method)
child_sid : index of child directory entry to use, or None when called
first time for the root. (only used during recursion)
"""
#[PL] this method was added to use simple recursion instead of a complex
# algorithm.
# if this is not a storage or a leaf of the tree, nothing to do:
if child_sid == NOSTREAM:
return
# check if child SID is in the proper range:
if child_sid<0 or child_sid>=len(self.olefile.direntries):
self.olefile._raise_defect(DEFECT_FATAL, 'OLE DirEntry index out of range')
# get child direntry:
child = self.olefile._load_direntry(child_sid) #direntries[child_sid]
debug('append_kids: child_sid=%d - %s - sid_left=%d, sid_right=%d, sid_child=%d'
% (child.sid, repr(child.name), child.sid_left, child.sid_right, child.sid_child))
# the directory entries are organized as a red-black tree.
# (cf. Wikipedia for details)
# First walk through left side of the tree:
self.append_kids(child.sid_left)
# Check if its name is not already used (case-insensitive):
name_lower = child.name.lower()
if self.kids_dict.has_key(name_lower):
self.olefile._raise_defect(DEFECT_INCORRECT,
"Duplicate filename in OLE storage")
# Then the child_sid _OleDirectoryEntry object is appended to the
# kids list and dictionary:
self.kids.append(child)
self.kids_dict[name_lower] = child
# Check if kid was not already referenced in a storage:
if child.used:
self.olefile._raise_defect(DEFECT_INCORRECT,
'OLE Entry referenced more than once')
child.used = True
# Finally walk through right side of the tree:
self.append_kids(child.sid_right)
# Afterwards build kid's own tree if it's also a storage:
child.build_storage_tree()
def __cmp__(self, other):
"Compare entries by name"
return cmp(self.name, other.name)
#TODO: replace by the same function as MS implementation ?
# (order by name length first, then case-insensitive order)
def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
"(property)", "(root)"]
print " "*tab + repr(self.name), TYPES[self.entry_type],
if self.entry_type in (STGTY_STREAM, STGTY_ROOT):
print self.size, "bytes",
print
if self.entry_type in (STGTY_STORAGE, STGTY_ROOT) and self.clsid:
print " "*tab + "{%s}" % self.clsid
for kid in self.kids:
kid.dump(tab + 2)
#--- OleFileIO ----------------------------------------------------------------
class OleFileIO:
"""
OLE container object
This class encapsulates the interface to an OLE 2 structured
storage file. Use the {@link listdir} and {@link openstream} methods to
access the contents of this file.
Object names are given as a list of strings, one for each subentry
level. The root entry should be omitted. For example, the following
code extracts all image streams from a Microsoft Image Composer file:
ole = OleFileIO("fan.mic")
for entry in ole.listdir():
if entry[1:2] == "Image":
fin = ole.openstream(entry)
fout = open(entry[0:1], "wb")
while 1:
s = fin.read(8192)
if not s:
break
fout.write(s)
You can use the viewer application provided with the Python Imaging
Library to view the resulting files (which happens to be standard
TIFF files).
"""
def __init__(self, filename = None, raise_defects=DEFECT_FATAL):
"""
Constructor for OleFileIO class.
filename: file to open.
raise_defects: minimal level for defects to be raised as exceptions.
(use DEFECT_FATAL for a typical application, DEFECT_INCORRECT for a
security-oriented application, see source code for details)
"""
self._raise_defects_level = raise_defects
if filename:
self.open(filename)
def _raise_defect(self, defect_level, message):
"""
This method should be called for any defect found during file parsing.
It may raise an IOError exception according to the minimal level chosen
for the OleFileIO object.
defect_level: defect level, possible values are:
DEFECT_UNSURE : a case which looks weird, but not sure it's a defect
DEFECT_POTENTIAL : a potential defect
DEFECT_INCORRECT : an error according to specifications, but parsing can go on
DEFECT_FATAL : an error which cannot be ignored, parsing is impossible
message: string describing the defect, used with raised exception.
"""
# added by [PL]
if defect_level >= self._raise_defects_level:
raise IOError, message
def open(self, filename):
"""
Open an OLE2 file.
Reads the header, FAT and directory.
filename: string-like or file-like object
"""
#[PL] check if filename is a string-like or file-like object:
# (it is better to check for a read() method)
if hasattr(filename, 'read'):
# file-like object
self.fp = filename
else:
# string-like object
self.fp = open(filename, "rb")
# old code fails if filename is not a plain string:
#if type(filename) == type(""):
# self.fp = open(filename, "rb")
#else:
# self.fp = filename
# lists of streams in FAT and MiniFAT, to detect duplicate references
# (list of indexes of first sectors of each stream)
self._used_streams_fat = []
self._used_streams_minifat = []
header = self.fp.read(512)
if len(header) != 512 or header[:8] != MAGIC:
self._raise_defect(DEFECT_FATAL, "not an OLE2 structured storage file")
# [PL] header structure according to AAF specifications:
##Header
##struct StructuredStorageHeader { // [offset from start (bytes), length (bytes)]
##BYTE _abSig[8]; // [00H,08] {0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1,
## // 0x1a, 0xe1} for current version
##CLSID _clsid; // [08H,16] reserved must be zero (WriteClassStg/
## // GetClassFile uses root directory class id)
##USHORT _uMinorVersion; // [18H,02] minor version of the format: 33 is
## // written by reference implementation
##USHORT _uDllVersion; // [1AH,02] major version of the dll/format: 3 for
## // 512-byte sectors, 4 for 4 KB sectors
##USHORT _uByteOrder; // [1CH,02] 0xFFFE: indicates Intel byte-ordering
##USHORT _uSectorShift; // [1EH,02] size of sectors in power-of-two;
## // typically 9 indicating 512-byte sectors
##USHORT _uMiniSectorShift; // [20H,02] size of mini-sectors in power-of-two;
## // typically 6 indicating 64-byte mini-sectors
##USHORT _usReserved; // [22H,02] reserved, must be zero
##ULONG _ulReserved1; // [24H,04] reserved, must be zero
##FSINDEX _csectDir; // [28H,04] must be zero for 512-byte sectors,
## // number of SECTs in directory chain for 4 KB
## // sectors
##FSINDEX _csectFat; // [2CH,04] number of SECTs in the FAT chain
##SECT _sectDirStart; // [30H,04] first SECT in the directory chain
##DFSIGNATURE _signature; // [34H,04] signature used for transactions; must
## // be zero. The reference implementation
## // does not support transactions
##ULONG _ulMiniSectorCutoff; // [38H,04] maximum size for a mini stream;
## // typically 4096 bytes
##SECT _sectMiniFatStart; // [3CH,04] first SECT in the MiniFAT chain
##FSINDEX _csectMiniFat; // [40H,04] number of SECTs in the MiniFAT chain
##SECT _sectDifStart; // [44H,04] first SECT in the DIFAT chain
##FSINDEX _csectDif; // [48H,04] number of SECTs in the DIFAT chain
##SECT _sectFat[109]; // [4CH,436] the SECTs of first 109 FAT sectors
##};
# [PL] header decoding:
# '<' indicates little-endian byte ordering for Intel (cf. struct module help)
fmt_header = '<8s16sHHHHHHLLLLLLLLLL'
header_size = struct.calcsize(fmt_header)
debug( "fmt_header size = %d, +FAT = %d" % (header_size, header_size + 109*4) )
header1 = header[:header_size]
(
self.Sig,
self.clsid,
self.MinorVersion,
self.DllVersion,
self.ByteOrder,
self.SectorShift,
self.MiniSectorShift,
self.Reserved, self.Reserved1,
self.csectDir,
self.csectFat,
self.sectDirStart,
self.signature,
self.MiniSectorCutoff,
self.MiniFatStart,
self.csectMiniFat,
self.sectDifStart,
self.csectDif
) = struct.unpack(fmt_header, header1)
debug( struct.unpack(fmt_header, header1))
if self.Sig != '\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1':
# OLE signature should always be present
self._raise_defect(DEFECT_FATAL, "incorrect OLE signature")
if self.clsid != '\x00'*16:
# according to AAF specs, CLSID should always be zero
self._raise_defect(DEFECT_INCORRECT, "incorrect CLSID in OLE header")
debug( "MinorVersion = %d" % self.MinorVersion )
debug( "DllVersion = %d" % self.DllVersion )
if self.DllVersion not in [3, 4]:
# version 3: usual format, 512 bytes per sector
# version 4: large format, 4K per sector
self._raise_defect(DEFECT_INCORRECT, "incorrect DllVersion in OLE header")
debug( "ByteOrder = %X" % self.ByteOrder )
if self.ByteOrder != 0xFFFE:
# For now only common little-endian documents are handled correctly
self._raise_defect(DEFECT_FATAL, "incorrect ByteOrder in OLE header")
# TODO: add big-endian support for documents created on Mac ?
self.SectorSize = 2**self.SectorShift
debug( "SectorSize = %d" % self.SectorSize )
if self.SectorSize not in [512, 4096]:
self._raise_defect(DEFECT_INCORRECT, "incorrect SectorSize in OLE header")
if (self.DllVersion==3 and self.SectorSize!=512) \
or (self.DllVersion==4 and self.SectorSize!=4096):
self._raise_defect(DEFECT_INCORRECT, "SectorSize does not match DllVersion in OLE header")
self.MiniSectorSize = 2**self.MiniSectorShift
debug( "MiniSectorSize = %d" % self.MiniSectorSize )
if self.MiniSectorSize not in [64]:
self._raise_defect(DEFECT_INCORRECT, "incorrect MiniSectorSize in OLE header")
if self.Reserved != 0 or self.Reserved1 != 0:
self._raise_defect(DEFECT_INCORRECT, "incorrect OLE header (non-null reserved bytes)")
debug( "csectDir = %d" % self.csectDir )
if self.SectorSize==512 and self.csectDir!=0:
self._raise_defect(DEFECT_INCORRECT, "incorrect csectDir in OLE header")
debug( "csectFat = %d" % self.csectFat )
debug( "sectDirStart = %X" % self.sectDirStart )
debug( "signature = %d" % self.signature )
# Signature should be zero, BUT some implementations do not follow this
# rule => only a potential defect:
if self.signature != 0:
self._raise_defect(DEFECT_POTENTIAL, "incorrect OLE header (signature>0)")
debug( "MiniSectorCutoff = %d" % self.MiniSectorCutoff )
debug( "MiniFatStart = %X" % self.MiniFatStart )
debug( "csectMiniFat = %d" % self.csectMiniFat )
debug( "sectDifStart = %X" % self.sectDifStart )
debug( "csectDif = %d" % self.csectDif )
# calculate the number of sectors in the file
# (-1 because header doesn't count)
filesize = os.path.getsize(filename)
self.nb_sect = ( (filesize + self.SectorSize-1) / self.SectorSize) - 1
debug( "Number of sectors in the file: %d" % self.nb_sect )
# file clsid (probably never used, so we don't store it)
clsid = _clsid(header[8:24])
self.sectorsize = self.SectorSize #1 << i16(header, 30)
self.minisectorsize = self.MiniSectorSize #1 << i16(header, 32)
self.minisectorcutoff = self.MiniSectorCutoff # i32(header, 56)
# check known streams for duplicate references (these are always in FAT,
# never in MiniFAT):
self._check_duplicate_stream(self.sectDirStart)
# check MiniFAT only if it is not empty:
if self.csectMiniFat:
self._check_duplicate_stream(self.MiniFatStart)
# check DIFAT only if it is not empty:
if self.csectDif:
self._check_duplicate_stream(self.sectDifStart)
# Load file allocation tables
self.loadfat(header)
# Load direcory. This sets both the direntries list (ordered by sid)
# and the root (ordered by hierarchy) members.
self.loaddirectory(self.sectDirStart)#i32(header, 48))
self.ministream = None
self.minifatsect = self.MiniFatStart #i32(header, 60)
def close(self):
"""
close the OLE file, to release the file object
"""
self.fp.close()
def _check_duplicate_stream(self, first_sect, minifat=False):
"""
Checks if a stream has not been already referenced elsewhere.
This method should only be called once for each known stream, and only
if stream size is not null.
first_sect: index of first sector of the stream in FAT
minifat: if True, stream is located in the MiniFAT, else in the FAT
"""
if minifat:
debug('_check_duplicate_stream: sect=%d in MiniFAT' % first_sect)
used_streams = self._used_streams_minifat
else:
debug('_check_duplicate_stream: sect=%d in FAT' % first_sect)
# some values can be safely ignored (not a real stream):
if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT):
return
used_streams = self._used_streams_fat
#TODO: would it be more efficient using a dict or hash values, instead
# of a list of long ?
if first_sect in used_streams:
self._raise_defect(DEFECT_INCORRECT, 'Stream referenced twice')
else:
used_streams.append(first_sect)
def dumpfat(self, fat, firstindex=0):
"Displays a part of FAT in human-readable form for debugging purpose"
# [PL] added only for debug
if not DEBUG_MODE:
return
# dictionary to convert special FAT values in human-readable strings
VPL=8 # valeurs par ligne (8+1 * 8+1 = 81)
fatnames = {
FREESECT: "..free..",
ENDOFCHAIN: "[ END. ]",
FATSECT: "FATSECT ",
DIFSECT: "DIFSECT "
}
nbsect = len(fat)
nlines = (nbsect+VPL-1)/VPL
print "index",
for i in range(VPL):
print ("%8X" % i),
print ""
for l in range(nlines):
index = l*VPL
print ("%8X:" % (firstindex+index)),
for i in range(index, index+VPL):
if i>=nbsect:
break
sect = fat[i]
if sect in fatnames:
nom = fatnames[sect]
else:
if sect == i+1:
nom = " --->"
else:
nom = "%8X" % sect
print nom,
print ""
def dumpsect(self, sector, firstindex=0):
"Displays a sector in a human-readable form, for debugging purpose."
if not DEBUG_MODE:
return
VPL=8 # number of values per line (8+1 * 8+1 = 81)
tab = array.array(UINT32, sector)
nbsect = len(tab)
nlines = (nbsect+VPL-1)/VPL
print "index",
for i in range(VPL):
print ("%8X" % i),
print ""
for l in range(nlines):
index = l*VPL
print ("%8X:" % (firstindex+index)),
for i in range(index, index+VPL):
if i>=nbsect:
break
sect = tab[i]
nom = "%8X" % sect
print nom,
print ""
def sect2array(self, sect):
"""
convert a sector to an array of 32 bits unsigned integers,
swapping bytes on big endian CPUs such as PowerPC (old Macs)
"""
a = array.array(UINT32, sect)
# if CPU is big endian, swap bytes:
if sys.byteorder == 'big':
a.byteswap()
return a
def loadfat_sect(self, sect):
"""
Adds the indexes of the given sector to the FAT
sect: string containing the first FAT sector, or array of long integers
return: index of last FAT sector.
"""
# a FAT sector is an array of ulong integers.
if isinstance(sect, array.array):
# if sect is already an array it is directly used
fat1 = sect
else:
# if it's a raw sector, it is parsed in an array
fat1 = self.sect2array(sect)
self.dumpsect(sect)
# The FAT is a sector chain starting at the first index of itself.
for isect in fat1:
#print "isect = %X" % isect
if isect == ENDOFCHAIN or isect == FREESECT:
# the end of the sector chain has been reached
break
# read the FAT sector
s = self.getsect(isect)
# parse it as an array of 32 bits integers, and add it to the
# global FAT array
nextfat = self.sect2array(s)
self.fat = self.fat + nextfat
return isect
def loadfat(self, header):
"""
Load the FAT table.
"""
# The header contains a sector numbers
# for the first 109 FAT sectors. Additional sectors are
# described by DIF blocks
sect = header[76:512]
debug( "len(sect)=%d, so %d integers" % (len(sect), len(sect)/4) )
#fat = []
# [PL] FAT is an array of 32 bits unsigned ints, it's more effective
# to use an array than a list in Python.
# It's initialized as empty first:
self.fat = array.array(UINT32)
self.loadfat_sect(sect)
#self.dumpfat(self.fat)
## for i in range(0, len(sect), 4):
## ix = i32(sect, i)
## #[PL] if ix == -2 or ix == -1: # ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL:
## if ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL:
## break
## s = self.getsect(ix)
## #fat = fat + map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
## fat = fat + array.array(UINT32, s)
if self.csectDif != 0:
# [PL] There's a DIFAT because file is larger than 6.8MB
# some checks just in case:
if self.csectFat <= 109:
# there must be at least 109 blocks in header and the rest in
# DIFAT, so number of sectors must be >109.
self._raise_defect(DEFECT_INCORRECT, 'incorrect DIFAT, not enough sectors')
if self.sectDifStart >= self.nb_sect:
# initial DIFAT block index must be valid
self._raise_defect(DEFECT_FATAL, 'incorrect DIFAT, first index out of range')
debug( "DIFAT analysis..." )
# We compute the necessary number of DIFAT sectors :
# (each DIFAT sector = 127 pointers + 1 towards next DIFAT sector)
nb_difat = (self.csectFat-109 + 126)/127
debug( "nb_difat = %d" % nb_difat )
if self.csectDif != nb_difat:
raise IOError, 'incorrect DIFAT'
isect_difat = self.sectDifStart
for i in xrange(nb_difat):
debug( "DIFAT block %d, sector %X" % (i, isect_difat) )
#TODO: check if corresponding FAT SID = DIFSECT
sector_difat = self.getsect(isect_difat)
difat = self.sect2array(sector_difat)
self.dumpsect(sector_difat)
self.loadfat_sect(difat[:127])
# last DIFAT pointer is next DIFAT sector:
isect_difat = difat[127]
debug( "next DIFAT sector: %X" % isect_difat )
# checks:
if isect_difat not in [ENDOFCHAIN, FREESECT]:
# last DIFAT pointer value must be ENDOFCHAIN or FREESECT
raise IOError, 'incorrect end of DIFAT'
## if len(self.fat) != self.csectFat:
## # FAT should contain csectFat blocks
## print "FAT length: %d instead of %d" % (len(self.fat), self.csectFat)
## raise IOError, 'incorrect DIFAT'
# since FAT is read from fixed-size sectors, it may contain more values
# than the actual number of sectors in the file.
# Keep only the relevant sector indexes:
if len(self.fat) > self.nb_sect:
debug('len(fat)=%d, shrunk to nb_sect=%d' % (len(self.fat), self.nb_sect))
self.fat = self.fat[:self.nb_sect]
debug('\nFAT:')
self.dumpfat(self.fat)
def loadminifat(self):
"""
Load the MiniFAT table.
"""
# MiniFAT is stored in a standard sub-stream, pointed to by a header
# field.
# NOTE: there are two sizes to take into account for this stream:
# 1) Stream size is calculated according to the number of sectors
# declared in the OLE header. This allocated stream may be more than
# needed to store the actual sector indexes.
# (self.csectMiniFat is the number of sectors of size self.SectorSize)
stream_size = self.csectMiniFat * self.SectorSize
# 2) Actually used size is calculated by dividing the MiniStream size
# (given by root entry size) by the size of mini sectors, *4 for
# 32 bits indexes:
nb_minisectors = (self.root.size + self.MiniSectorSize-1) / self.MiniSectorSize
used_size = nb_minisectors * 4
debug('loadminifat(): minifatsect=%d, nb FAT sectors=%d, used_size=%d, stream_size=%d, nb MiniSectors=%d' %
(self.minifatsect, self.csectMiniFat, used_size, stream_size, nb_minisectors))
if used_size > stream_size:
# This is not really a problem, but may indicate a wrong implementation:
self._raise_defect(DEFECT_INCORRECT, 'OLE MiniStream is larger than MiniFAT')
# In any case, first read stream_size:
s = self._open(self.minifatsect, stream_size, force_FAT=True).read()
#[PL] Old code replaced by an array:
#self.minifat = map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
self.minifat = self.sect2array(s)
# Then shrink the array to used size, to avoid indexes out of MiniStream:
debug('MiniFAT shrunk from %d to %d sectors' % (len(self.minifat), nb_minisectors))
self.minifat = self.minifat[:nb_minisectors]
debug('loadminifat(): len=%d' % len(self.minifat))
debug('\nMiniFAT:')
self.dumpfat(self.minifat)
def getsect(self, sect):
"""
Read given sector from file on disk.
sect: sector index
returns a string containing the sector data.
"""
# [PL] this original code was wrong when sectors are 4KB instead of
# 512 bytes:
#self.fp.seek(512 + self.sectorsize * sect)
#[PL]: added safety checks:
#print "getsect(%X)" % sect
try:
self.fp.seek(self.sectorsize * (sect+1))
except:
debug('getsect(): sect=%X, seek=%d, filesize=%d' %
(sect, self.sectorsize*(sect+1), os.path.getsize(self.fp.name)))
self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
sector = self.fp.read(self.sectorsize)
if len(sector) != self.sectorsize:
debug('getsect(): sect=%X, read=%d, sectorsize=%d' %
(sect, len(sector), self.sectorsize))
self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
return sector
def loaddirectory(self, sect):
"""
Load the directory.
sect: sector index of directory stream.
"""
# The directory is stored in a standard
# substream, independent of its size.
# open directory stream as a read-only file:
# (stream size is not known in advance)
self.directory_fp = self._open(sect)
#[PL] to detect malformed documents and avoid DoS attacks, the maximum
# number of directory entries can be calculated:
max_entries = self.directory_fp.size / 128
debug('loaddirectory: size=%d, max_entries=%d' %
(self.directory_fp.size, max_entries))
# Create list of directory entries
#self.direntries = []
# We start with a list of "None" object
self.direntries = [None] * max_entries
## for sid in xrange(max_entries):
## entry = fp.read(128)
## if not entry:
## break
## self.direntries.append(_OleDirectoryEntry(entry, sid, self))
# load root entry:
root_entry = self._load_direntry(0)
# Root entry is the first entry:
self.root = self.direntries[0]
# read and build all storage trees, starting from the root:
self.root.build_storage_tree()
def _load_direntry (self, sid):
"""
Load a directory entry from the directory.
This method should only be called once for each storage/stream when
loading the directory.
sid: index of storage/stream in the directory.
return: a _OleDirectoryEntry object
raise: IOError if the entry has always been referenced.
"""
# check if SID is OK:
if sid<0 or sid>=len(self.direntries):
self._raise_defect(DEFECT_FATAL, "OLE directory index out of range")
# check if entry was already referenced:
if self.direntries[sid] is not None:
self._raise_defect(DEFECT_INCORRECT,
"double reference for OLE stream/storage")
# if exception not raised, return the object
return self.direntries[sid]
self.directory_fp.seek(sid * 128)
entry = self.directory_fp.read(128)
self.direntries[sid] = _OleDirectoryEntry(entry, sid, self)
return self.direntries[sid]
def dumpdirectory(self):
"""
Dump directory (for debugging only)
"""
self.root.dump()
def _open(self, start, size = 0x7FFFFFFF, force_FAT=False):
"""
Open a stream, either in FAT or MiniFAT according to its size.
(openstream helper)
start: index of first sector
size: size of stream (or nothing if size is unknown)
force_FAT: if False (default), stream will be opened in FAT or MiniFAT
according to size. If True, it will always be opened in FAT.
"""
debug('OleFileIO.open(): sect=%d, size=%d, force_FAT=%s' %
(start, size, str(force_FAT)))
# stream size is compared to the MiniSectorCutoff threshold:
if size < self.minisectorcutoff and not force_FAT:
# ministream object
if not self.ministream:
# load MiniFAT if it wasn't already done:
self.loadminifat()
# The first sector index of the miniFAT stream is stored in the
# root directory entry:
size_ministream = self.root.size
debug('Opening MiniStream: sect=%d, size=%d' %
(self.root.isectStart, size_ministream))
self.ministream = self._open(self.root.isectStart,
size_ministream, force_FAT=True)
return _OleStream(self.ministream, start, size, 0,
self.minisectorsize, self.minifat)
else:
# standard stream
return _OleStream(self.fp, start, size, 512,
self.sectorsize, self.fat)
def _list(self, files, prefix, node):
"""
(listdir helper)
files: list of files to fill in
prefix: current location in storage tree (list of names)
node: current node (_OleDirectoryEntry object)
"""
prefix = prefix + [node.name]
for entry in node.kids:
if entry.kids:
self._list(files, prefix, entry)
else:
files.append(prefix[1:] + [entry.name])
def listdir(self):
"""
Return a list of streams stored in this file
"""
files = []
self._list(files, [], self.root)
return files
def _find(self, filename):
"""
Returns directory entry of given filename. (openstream helper)
Note: this method is case-insensitive.
filename: path of stream in storage tree (except root entry), either:
- a string using Unix path syntax, for example:
'storage_1/storage_1.2/stream'
- a list of storage filenames, path to the desired stream/storage.
Example: ['storage_1', 'storage_1.2', 'stream']
return: sid of requested filename
raise IOError if file not found
"""
# if filename is a string instead of a list, split it on slashes to
# convert to a list:
if isinstance(filename, basestring):
filename = filename.split('/')
# walk across storage tree, following given path:
node = self.root
for name in filename:
for kid in node.kids:
if kid.name.lower() == name.lower():
break
else:
raise IOError, "file not found"
node = kid
return node.sid
def openstream(self, filename):
"""
Open a stream as a read-only file object (StringIO).
filename: path of stream in storage tree (except root entry), either:
- a string using Unix path syntax, for example:
'storage_1/storage_1.2/stream'
- a list of storage filenames, path to the desired stream/storage.
Example: ['storage_1', 'storage_1.2', 'stream']
return: file object (read-only)
raise IOError if filename not found, or if this is not a stream.
"""
sid = self._find(filename)
entry = self.direntries[sid]
if entry.entry_type != STGTY_STREAM:
raise IOError, "this file is not a stream"
return self._open(entry.isectStart, entry.size)
def get_type(self, filename):
"""
Test if given filename exists as a stream or a storage in the OLE
container, and return its type.
filename: path of stream in storage tree. (see openstream for syntax)
return: False if object does not exist, its entry type (>0) otherwise:
- STGTY_STREAM: a stream
- STGTY_STORAGE: a storage
- STGTY_ROOT: the root entry
"""
try:
sid = self._find(filename)
entry = self.direntries[sid]
return entry.entry_type
except:
return False
def exists(self, filename):
"""
Test if given filename exists as a stream or a storage in the OLE
container.
filename: path of stream in storage tree. (see openstream for syntax)
return: True if object exist, else False.
"""
try:
sid = self._find(filename)
return True
except:
return False
def get_size(self, filename):
"""
Return size of a stream in the OLE container, in bytes.
filename: path of stream in storage tree (see openstream for syntax)
return: size in bytes (long integer)
raise: IOError if file not found, TypeError if this is not a stream.
"""
sid = self._find(filename)
entry = self.direntries[sid]
if entry.entry_type != STGTY_STREAM:
#TODO: Should it return zero instead of raising an exception ?
raise TypeError, 'object is not an OLE stream'
return entry.size
def get_rootentry_name(self):
"""
Return root entry name. Should usually be 'Root Entry' or 'R' in most
implementations.
"""
return self.root.name
def getproperties(self, filename):
"""
Return properties described in substream.
filename: path of stream in storage tree (see openstream for syntax)
return: a dictionary of values indexed by id (integer)
"""
fp = self.openstream(filename)
data = {}
# header
s = fp.read(28)
clsid = _clsid(s[8:24])
# format id
s = fp.read(20)
fmtid = _clsid(s[:16])
fp.seek(i32(s, 16))
# get section
s = "****" + fp.read(i32(fp.read(4))-4)
for i in range(i32(s, 4)):
id = i32(s, 8+i*8)
offset = i32(s, 12+i*8)
type = i32(s, offset)
debug ('property id=%d: type=%d offset=%X' % (id, type, offset))
# test for common types first (should perhaps use
# a dictionary instead?)
if type == VT_I2:
value = i16(s, offset+4)
if value >= 32768:
value = value - 65536
elif type == VT_UI2:
value = i16(s, offset+4)
elif type in (VT_I4, VT_ERROR):
value = i32(s, offset+4)
elif type == VT_UI4:
value = i32(s, offset+4) # FIXME
elif type in (VT_BSTR, VT_LPSTR):
count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1]
elif type == VT_BLOB:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif type == VT_LPWSTR:
count = i32(s, offset+4)
value = _unicode(s[offset+8:offset+8+count*2])
elif type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
# FIXME: this is a 64-bit int: "number of 100ns periods
# since Jan 1,1601". Should map this to Python time
value = value / 10000000L # seconds
elif type == VT_UI1:
value = ord(s[offset+4])
elif type == VT_CLSID:
value = _clsid(s[offset+4:offset+20])
elif type == VT_CF:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
else:
value = None # everything else yields "None"
# FIXME: add support for VT_VECTOR
#print "%08x" % id, repr(value),
#print "(%s)" % VT[i32(s, offset) & 0xFFF]
data[id] = value
return data
#
# --------------------------------------------------------------------
# This script can be used to dump the directory of any OLE2 structured
# storage file.
if __name__ == "__main__":
import sys
# [PL] display quick usage info if launched from command-line
if len(sys.argv) <= 1:
print __doc__
print """
Launched from command line, this script parses OLE files and prints info.
Usage: OleFileIO_PL.py [-d] [-c] <file> [file2 ...]
Options:
-d : debug mode (display a lot of debug information, for developers only)
-c : check all streams (for debugging purposes)
"""
sys.exit()
check_streams = False
for filename in sys.argv[1:]:
## try:
# OPTIONS:
if filename == '-d':
# option to switch debug mode on:
set_debug_mode(True)
continue
if filename == '-c':
# option to switch check streams mode on:
check_streams = True
continue
ole = OleFileIO(filename, raise_defects=DEFECT_INCORRECT)
print "-" * 68
print filename
print "-" * 68
ole.dumpdirectory()
for streamname in ole.listdir():
if streamname[-1][0] == "\005":
print streamname, ": properties"
props = ole.getproperties(streamname)
props = props.items()
props.sort()
for k, v in props:
#[PL]: avoid to display too large or binary values:
if isinstance(v, basestring):
if len(v) > 50:
v = v[:50]
# quick and dirty binary check:
for c in (1,2,3,4,5,6,7,11,12,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,31):
if chr(c) in v:
v = '(binary data)'
break
print " ", k, v
if check_streams:
# Read all streams to check if there are errors:
print '\nChecking streams...'
for streamname in ole.listdir():
# print name using repr() to convert binary chars to \xNN:
print '-', repr('/'.join(streamname)),'-',
st_type = ole.get_type(streamname)
if st_type == STGTY_STREAM:
print 'size %d' % ole.get_size(streamname)
# just try to read stream in memory:
ole.openstream(streamname)
else:
print 'NOT a stream : type=%d' % st_type
print ''
#[PL] Test a few new methods:
root = ole.get_rootentry_name()
print 'Root entry name: "%s"' % root
if ole.exists('worddocument'):
print "This is a Word document."
print "type of stream 'WordDocument':", ole.get_type('worddocument')
print "size :", ole.get_size('worddocument')
if ole.exists('macros/vba'):
print "This document may contain VBA macros."
## except IOError, v:
## print "***", "cannot read", file, "-", v
| [
"[email protected]"
]
| |
68a84d7927a983eaa34d6441082611ceb533e83c | ce661026009d622db924080d85ab529f1cae6b60 | /projecteuler.net/23.py | f7409566c21916e771943acbeec9b7d44db05cd5 | []
| no_license | predavlad/projecteuler | d54f5d85ab0133b19b54b4168990b90f09a0184c | 58e1637733bb7e01e44bfac707353ecfe84d9b19 | refs/heads/master | 2021-01-23T15:29:26.257019 | 2019-02-09T10:11:23 | 2019-02-09T10:11:23 | 12,952,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import time
import math
# 0.13 seconds
start_time = time.time()
def proper_divisors(n):
"""
Get the proper divisors from a number
"""
yield 1
for i in xrange(2, int(math.sqrt(n)) + 1):
if n % i == 0:
yield i
if i != n / i:
yield n / i
def is_abundant(n):
"""
Calculates if n is abundant (the sum of its proper divisors is larger than the number)
"""
if n % 2 != 0 and n % 3 != 0:
return False
return sum(proper_divisors(n)) > n
def is_abundant_sum(n):
"""
at this stage, n is odd (and over 48). This means that we need to write it
as the sum of an odd + even abundant number
since there are fewer odd numbers, this is the way to do it
"""
global odd_abundant, even_abundant
for i in odd_abundant:
if i > n:
continue
if (n - i) in abundant:
return True
return False
# set up initial values we will need later on
abundant_under_49 = [24, 30, 32, 36, 38, 40, 42, 44, 48]
non_abundant_sum = sum([i for i in xrange(1, 49) if i not in abundant_under_49])
abundant = set(i for i in xrange(1, 20162) if is_abundant(i))
odd_abundant = set(i for i in abundant if i % 2 == 1)
# this is the big loop that calculates everything
non_abundant_sum += sum([i for i in xrange(49, 20162, 2) if not is_abundant_sum(i)])
print non_abundant_sum
print time.time() - start_time, "seconds"
| [
"[email protected]"
]
| |
33f4a2ddbfd17a19e3cf63627ca85e66d784c4d3 | 2e8d5422aba03edc10154225db2fc39af5e98660 | /Code/GUI/OnscreenDebugger.py | 89a939ac9c3029852a4b2d118d46dc23f8bd26d2 | [
"MIT"
]
| permissive | MYheavyGo/RenderPipeline | f500611bef020f45ac63023df206f978be887fc5 | 70002e71c25ba93f05c73d041943d07eb639641c | refs/heads/master | 2021-01-15T14:49:58.756014 | 2016-01-18T15:59:14 | 2016-01-18T15:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,818 | py | """
RenderPipeline
Copyright (c) 2014-2015 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from six.moves import range
from functools import partial
from panda3d.core import Vec4, Vec3, Vec2, RenderState, TransformState
from direct.gui.DirectFrame import DirectFrame
from direct.interval.IntervalGlobal import Parallel, Sequence
from .BetterOnscreenImage import BetterOnscreenImage
from .BufferViewer import BufferViewer
from .PipeViewer import PipeViewer
from .BetterOnscreenText import BetterOnscreenText
from .BetterLabeledCheckbox import BetterLabeledCheckbox
from .CheckboxCollection import CheckboxCollection
from .FastTextNode import FastTextNode
from .ErrorMessageDisplay import ErrorMessageDisplay
from .ExposureWidget import ExposureWidget
from ..Util.DebugObject import DebugObject
from ..Globals import Globals
from ..BaseManager import BaseManager
from ..Native import NATIVE_CXX_LOADED
from ..RenderTarget import RenderTarget
from ..Util.Image import Image
class OnscreenDebugger(BaseManager):
""" This class manages the onscreen gui """
def __init__(self, pipeline):
BaseManager.__init__(self)
self.debug("Creating debugger")
self._pipeline = pipeline
self._fullscreen_node = Globals.base.pixel2d.attach_new_node(
"PipelineDebugger")
self._create_components()
self._init_keybindings()
self._init_notify()
def _create_components(self):
""" Creates the gui components """
# When using small resolutions, scale the GUI so its still useable,
# otherwise the sub-windows are bigger than the main window
scale_factor = min(1.0, Globals.base.win.get_x_size() / 1920.0)
self._fullscreen_node.set_scale(scale_factor)
self._gui_scale = scale_factor
# Component values
self._debugger_width = 460
# Create states
self._debugger_visible = False
# Create intervals
self._debugger_interval = None
# Create the actual GUI
self._create_debugger()
self._create_topbar()
self._create_stats()
self._create_hints()
self._buffer_viewer = BufferViewer(self._pipeline, self._fullscreen_node)
self._pipe_viewer = PipeViewer(self._pipeline, self._fullscreen_node)
self._exposure_node = self._fullscreen_node.attach_new_node("ExposureWidget")
self._exposure_node.set_pos((Globals.base.win.get_x_size()) / self._gui_scale - 200,
1, -(Globals.base.win.get_y_size()) / self._gui_scale + 120)
self._exposure_widget = ExposureWidget(self._pipeline, self._exposure_node)
def _init_notify(self):
""" Inits the notify stream which gets all output from panda and parses
it """
self._error_msg_handler = ErrorMessageDisplay()
def do_update(self):
""" Updates the gui """
self._update_stats()
self._error_msg_handler.update()
def get_error_msg_handler(self):
""" Returns the error message handler """
return self._error_msg_handler
def _create_topbar(self):
""" Creates the topbar """
self._pipeline_logo = BetterOnscreenImage(
image="Data/GUI/OnscreenDebugger/PipelineLogoText.png", x=30, y=30,
parent=self._fullscreen_node)
def _create_stats(self):
""" Creates the stats overlay """
self._overlay_node = Globals.base.aspect2d.attach_new_node("Overlay")
self._overlay_node.set_pos(Globals.base.getAspectRatio() - 0.07, 1, 1.0 - 0.07)
self._debug_lines = []
for i in range(4):
self._debug_lines.append(FastTextNode(
pos=Vec2(0, -i * 0.046), parent=self._overlay_node,
pixel_size=16, align="right", color=Vec3(1)))
def _create_hints(self):
""" Creates the hints like keybindings and when reloading shaders """
self._hint_reloading = BetterOnscreenImage(
image="Data/GUI/OnscreenDebugger/ShaderReloadHint.png",
x=float((Globals.base.win.get_x_size()) // 2) / self._gui_scale - 465 // 2, y=220,
parent=self._fullscreen_node)
self.set_reload_hint_visible(False)
if not NATIVE_CXX_LOADED:
# Warning when using the python version
python_warning = BetterOnscreenImage(
image="Data/GUI/OnscreenDebugger/PythonWarning.png",
x=((Globals.base.win.get_x_size()/self._gui_scale) - 1054) // 2,
y=(Globals.base.win.get_y_size()/self._gui_scale) - 118 - 40, parent=self._fullscreen_node)
Sequence(
python_warning.color_scale_interval(0.7, Vec4(0.3, 1, 1, 0.7), blendType="easeOut"),
python_warning.color_scale_interval(0.7, Vec4(1, 1, 1, 1.0), blendType="easeOut"),
).loop()
# Keybinding hints
self._keybinding_instructions = BetterOnscreenImage(
image="Data/GUI/OnscreenDebugger/KeyBindings.png", x=30,
y=Globals.base.win.get_y_size()//self._gui_scale - 510.0,
parent=self._fullscreen_node, any_filter=False)
def _update_stats(self):
""" Updates the stats overlay """
clock = Globals.clock
self._debug_lines[0].set_text("{:3.0f} fps | {:3.1f} ms | {:3.1f} ms max".format(
clock.get_average_frame_rate(),
1000.0 / max(0.001, clock.get_average_frame_rate()),
clock.get_max_frame_duration() * 1000.0))
text = "{:4d} render states | {:4d} transforms"
text += " | {:4d} commands | {:6d} lights | {:5d} shadow sources"
self._debug_lines[1].set_text(text.format(
RenderState.get_num_states(), TransformState.get_num_states(),
self._pipeline.light_mgr.get_cmd_queue().get_num_processed_commands(),
self._pipeline.light_mgr.get_num_lights(),
self._pipeline.light_mgr.get_num_shadow_sources(),
))
text = "{:3.0f} MiB VRAM usage | {:5d} images | {:5d} textures | "
text += "{:5d} render targets | {:3d} plugins"
tex_info = self._buffer_viewer.get_stage_information()
self._debug_lines[2].set_text(text.format(
tex_info["memory"] / (1024**2) ,
Image._NUM_IMAGES,
tex_info["count"],
RenderTarget._NUM_BUFFERS_ALLOCATED,
self._pipeline.plugin_mgr.get_interface().get_active_plugin_count()
))
text = "{} ({:1.3f}) | {:3d} active constraints"
self._debug_lines[3].set_text(text.format(
self._pipeline.daytime_mgr.get_time_str(),
self._pipeline.daytime_mgr.get_time(),
self._pipeline.daytime_mgr.get_num_constraints()
))
def _create_debugger(self):
""" Creates the debugger contents """
debugger_opacity = 1.0
self._debugger_node = self._fullscreen_node.attach_new_node("DebuggerNode")
self._debugger_node.set_pos(30, 0, -Globals.base.win.get_y_size()//self._gui_scale + 820.0)
self._debugger_bg_img = BetterOnscreenImage(
image="Data/GUI/OnscreenDebugger/DebuggerBackground.png", x=0, y=0,
parent=self._debugger_node, any_filter=False
)
self._create_debugger_content()
def set_reload_hint_visible(self, flag):
""" Sets whether the shader reload hint is visible """
if flag:
self._hint_reloading.show()
else:
self._hint_reloading.hide()
def _create_debugger_content(self):
""" Internal method to create the content of the debugger """
debugger_content = self._debugger_node.attach_new_node("DebuggerContent")
debugger_content.set_z(-20)
debugger_content.set_x(20)
heading_color = Vec3(0.7, 0.7, 0.24) * 1.2
render_modes = [
("Default", "", False, ""),
("Diffuse", "DIFFUSE", False, ""),
("Roughness", "ROUGHNESS", False, ""),
("Specular", "SPECULAR", False, ""),
("Normal", "NORMAL", False, ""),
("Metallic", "METALLIC", False, ""),
("Translucency", "TRANSLUCENCY", False, ""),
("PSSM Splits", "PSSM_SPLITS", True , "PSSM"),
("Ambient Occlusion", "OCCLUSION", False, "AO")
]
row_width = 200
collection = CheckboxCollection()
for idx, (mode, mode_id, requires_cxx, requires_plugin) in enumerate(render_modes):
offs_y = idx * 24 + 45
offs_x = 0
enabled = True
if requires_cxx and not NATIVE_CXX_LOADED:
enabled = False
if requires_plugin:
if not self._pipeline.plugin_mgr.get_interface().is_plugin_enabled(requires_plugin):
enabled = False
box = BetterLabeledCheckbox(
parent=debugger_content, x=offs_x, y=offs_y, text=mode.upper(),
text_color=Vec3(0.4), radio=True, chb_checked=(mode == "Default"),
chb_callback=partial(self._set_render_mode, mode_id),
text_size=14, expand_width=230, enabled=enabled)
collection.add(box.get_checkbox())
def _set_render_mode(self, mode_id, value):
""" Callback which gets called when a render mode got selected """
if not value:
return
# Clear old defines
self._pipeline.stage_mgr.remove_define_if(lambda name: name.startswith("_RM__"))
if mode_id == "":
self._pipeline.stage_mgr.define("ANY_DEBUG_MODE", 0)
else:
self._pipeline.stage_mgr.define("ANY_DEBUG_MODE", 1)
self._pipeline.stage_mgr.define("_RM__" + mode_id, 1)
# Reload all shaders
self._pipeline.reload_shaders()
def _init_keybindings(self):
""" Inits the debugger keybindings """
Globals.base.accept("g", self._toggle_debugger)
Globals.base.accept("v", self._buffer_viewer.toggle)
Globals.base.accept("c", self._pipe_viewer.toggle)
Globals.base.accept("f5", self._toggle_gui_visible)
def _toggle_gui_visible(self):
""" Shows / Hides the gui """
if not Globals.base.pixel2d.is_hidden():
Globals.base.pixel2d.hide()
Globals.base.aspect2d.hide()
Globals.base.render2d.hide()
else:
Globals.base.pixel2d.show()
Globals.base.aspect2d.show()
Globals.base.render2d.show()
def _toggle_debugger(self):
""" Internal method to hide or show the debugger """
# TODO
return
| [
"[email protected]"
]
| |
ee87b98fe00950d2fccbf4df732138303f77f39e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03072/s818698477.py | c7275aa061f307cff2975d269d354c5e089d5fde | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | N = int(input())
H = list(map(int,input().split()))
M = 0
ans=0
for h in H:
if h >= M:
ans+=1
M = max(M,h)
print(ans) | [
"[email protected]"
]
| |
1f9d57a3e79928061f3a9b6b6b38bb612649af94 | 84a3adb95d4c3340c266fd9ec0d19000f362e11f | /infoarena/ninja/tests.py | 7e527c09e2860e9312fbfcc8f8d5391b343fb02b | []
| no_license | lavandalia/work | 50b4b3eb4a63a5a0d0ab8ab4670d14ecb3d71293 | a591adfd2fd2ff0fa8c65dc5829a15bc8cc60245 | refs/heads/master | 2020-05-01T02:25:31.229379 | 2017-12-17T08:39:32 | 2017-12-17T08:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | TESTS = [
(5, 5, 5),
(300, 300, 300),
(2000, 2000, 2000),
(20000, 20000, 20000),
(30000, 30000, 30000),
(40000, 40000, 40000),
(50000, 50000, 50000),
(60000, 60000, 60000),
(100000, 100000, 100000),
(100000, 100000, 100000)
]
from subprocess import call
for index, test in enumerate(TESTS):
N, M, K = test
call("./generator " + str(N) + " " + str(M) + " " + str(K) + " > ninja.in", shell=True)
call("./main")
call("cp ninja.in grader_test" + str(index + 1) + ".in", shell=True)
call("cp ninja.out grader_test" + str(index + 1) + ".ok", shell=True)
print("Done test ", index + 1)
| [
"[email protected]"
]
| |
a4256da30bcf07a2710b53136e6c4e96dbe16327 | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /construct2DArray.py | 9453894a5ac8b86e076004c0a69fce6ab2f1a457 | []
| no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from typing import List
class Solution:
def construct2DArray(self, original: List[int], m: int,
n: int) -> List[List[int]]:
if len(original) != n * m:
return []
out = []
idx = 0
for i in range(m):
tmp = []
for j in range(n):
tmp.append(original[idx])
idx += 1
out.append(tmp)
return out
original = [1, 2, 3, 4]
m = 2
n = 2
original = [1, 2, 3]
m = 1
n = 3
original = [1, 2]
m = 1
n = 1
original = [3]
m = 1
n = 2
print(Solution().construct2DArray(original, m, n))
| [
"[email protected]"
]
| |
384064fb13891aac627fd125777d42fd016ce307 | 237fe532664c70630da4ca9e668cd18e2f28e6d4 | /epi/palindrome_decomposition.py | e40e36693a88f293dc2913198ca385d9c1f1fe15 | []
| no_license | mkebrahimpour/DataStructures_Python | 5ef889dbaa2f3754cd787866420bd36b9856404a | f18f6683f07b5d68f736bbc70908655a3939bcdc | refs/heads/master | 2022-01-09T07:09:08.989769 | 2019-06-24T20:46:07 | 2019-06-24T20:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 16:38:52 2019
@author: sbk
"""
def palindrome_decompositions(input):
def directed_palindrome_decompositions(offset, partial_partition):
if offset == len(input):
result.append(list(partial_partition))
print(result)
return
for i in range(offset + 1, len(input) + 1):
prefix = input[offset:i]
if prefix == prefix[::-1]:
directed_palindrome_decompositions(i, partial_partition + [prefix])
result = []
directed_palindrome_decompositions(0, [])
return result
input_raw = '0204451881'
palindrome_decompositions(input_raw) | [
"[email protected]"
]
| |
c1d53722ccc6b61714dbf2b08bf85faf6024b8cf | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /other/panda365/panda365/migrations/versions/1f74ea45e765_add_is_active_and_publish_at_to_post.py | a706c6531a52ee8d8ae265bcd2ab9bf7b0d0dfa2 | []
| no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 800 | py | """add is_active and publish_at to post
Revision ID: 1f74ea45e765
Revises: 5ad67785ca96
Create Date: 2017-04-26 17:28:40.186993
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils import ArrowType
# revision identifiers, used by Alembic.
revision = '1f74ea45e765'
down_revision = '5ad67785ca96'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('post', sa.Column('is_active', sa.Boolean(), nullable=True))
op.execute('UPDATE post SET is_active = true')
op.alter_column('post', 'is_active', nullable=False)
op.add_column('post', sa.Column('publish_at', ArrowType(), nullable=True))
op.execute('UPDATE post SET publish_at = created_at')
def downgrade():
op.drop_column('post', 'publish_at')
op.drop_column('post', 'is_active')
| [
"[email protected]"
]
| |
6d8a82a5a8833a9f27e3ea2ad21a97d0aa04612c | 0b83750815203a0b0ce46e9d7d7baa474042072b | /helper/sidebar.py | c26d4b0dd0ba08d6fd589b832722626dea780890 | [
"LicenseRef-scancode-public-domain"
]
| permissive | hanya/SidebarByMacros | 735ff808b0cb122613c7e2ab7d53b238cef82a08 | a7bbf394d868d29f094fefcef612231558260832 | refs/heads/master | 2021-01-01T05:02:11.767717 | 2016-05-13T13:27:06 | 2016-05-13T13:27:06 | 58,472,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,958 | py |
import uno
import unohelper
from com.sun.star.beans import PropertyValue
from com.sun.star.beans.PropertyState import DIRECT_VALUE
from com.sun.star.container import XNameContainer, NoSuchElementException, ElementExistException
from com.sun.star.lang import XServiceInfo, \
IllegalArgumentException
from com.sun.star.ui import XUIElementFactory, XUIElement, XToolPanel, XSidebarPanel, \
LayoutSize
from com.sun.star.ui.UIElementType import TOOLPANEL as UET_TOOLPANEL
from com.sun.star.uno import RuntimeException
class SidebarHelperForMacros(unohelper.Base, XServiceInfo, XUIElementFactory):
""" Helps to someone implements sidebar components in Macros.
The factory for UI element have to be registered under
/org.openoffice.Office.UI.Factories/Registered/UIElementFactories.
And the components have to be implemented acoording to
css.ui.UIElementFactory service.
"""
IMPLE_NAME = "mytools.ui.SidebarHelperForMacros"
SERVICE_NAMES = IMPLE_NAME,
CONFIG = "/mytools.UI.SidebarsByMacros/Content/Imples"
@staticmethod
def get():
klass = SidebarHelperForMacros
return klass, klass.IMPLE_NAME, klass.SERVICE_NAMES
def __init__(self, ctx, *args):
self.ctx = ctx
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name in self.SERVICE_NAMES
def getSupportedServiceNames(self):
return self.SERVICE_NAMES
# XUIElementFactory
def createUIElement(self, res_url, args):
# see css.ui.XUIElementFactory
# check the res_url is in the configuration
settings = self._get_settings(res_url)
if not settings:
# no UI element found
raise NoSuchElementException()
frame = parent = None
for arg in args:
if arg.Name == "Frame":
frame = arg.Value
elif arg.Name == "ParentWindow":
parent = arg.Value
#elif arg.Name == "Sidebar":
# If you need css.ui.XSidebar interface to request to
# re-layout, keep it.
#elif arg.Name == "SfxBindings":
# This is just pointer address, not useful for extensions.
if not frame:
raise IllegalArgumentException()
if not parent:
raise IllegalArgumentException()
try:
# new instance of requested UI element
return SidebarUIElement(self.ctx, frame, parent, res_url, settings)
except Exception as e:
print("Error in SidebarUIElement.ctor: " + str(e))
def _create(self, name):
return self.ctx.getServiceManager().createInstanceWithContext(name, self.ctx)
def _create_configuration_reader(self, nodepath, res_url):
cp = self._create("com.sun.star.configuration.ConfigurationProvider")
try:
return cp.createInstanceWithArguments(
"com.sun.star.configuration.ConfigurationAccess",
(PropertyValue("nodepath", -1, nodepath, DIRECT_VALUE),))
except:
pass
return None
def _get_settings(self, res_url):
reader = self._create_configuration_reader(self.CONFIG, res_url)
if reader and reader.hasByName(res_url):
try:
return reader.getByName(res_url)
except:
pass
return None
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation(*SidebarHelperForMacros.get())
class SidebarUIElement(unohelper.Base, XUIElement, XToolPanel, XSidebarPanel, XNameContainer):
""" Individual panel element in deck of sidebar.
Should be implemented according to css.ui.UIElement service.
In the case of toolpanel element, you need additional interfaces:
- css.ui.XToolPanel: describes panel
- css.ui.XSidebarPanel: panel (optional, but if not, unusable)
"""
def __init__(self, ctx, frame, parent, res_url, settings):
self.res_url = res_url
self.ctx = ctx
self.frame = frame
self.parent = parent
self._values = {}
try:
self.window = self._call_macro(settings.Initialize, (self, self.parent))
except Exception as e:
print(e)
raise RuntimeException("Error while calling Initialize for " + self.res_url, None)
# XUIElement
@property
def Frame(self):
return self.frame
@property
def ResourceURL(self):
return self.res_url
@property
def Type(self):
return UET_TOOLPANEL
def getRealInterface(self):
return self # ToDo weakref?
# XToolPanel
def createAccessible(self, parent):
return None
@property
def Window(self):
return self.window
# XSidebarPanel
def getHeightForWidth(self, width):
v = self._values.get("XSidebarPanel", None)
if v:
try:
return v.getHeightForWidth(width)
except:
pass
return LayoutSize(0, -1, 0)
# LO5.1-
def getMinimalWidth(self):
return 50
#
def _call_macro(self, uri, args=()):
script = self._create_script_provider().getScript(uri)
if script:
try:
r ,_ ,_ = script.invoke(args, (), ())
return r
except Exception as e:
print(e)
return None
def _create_script_provider(self):
mspf = self.ctx.getValueByName(
"/singletons/com.sun.star.script.provider.theMasterScriptProviderFactory")
return mspf.createScriptProvider("")
# ToDo language specific script provider
# XNameContainer
# this interface is not required by the panel, just for helper
def insertByName(self, name, value):
if name in self._values:
raise ElementExistException(name, self)
else:
self._values[name] = value
def removeByName(self, name):
if name in self._values:
self._values.pop(name)
else:
raise NoSuchElementException(name, self)
def replaceByName(self, name, value):
if name in self._values:
self._values[name] = value
else:
raise NoSuchElementException(name, self)
def getByName(self, name):
try:
return self._values[name]
except:
raise NoSuchElementException(name, self)
def getElementNames(self):
return tuple(self._values.names())
def hasByName(self, name):
return name in self._values
def getElementType(self):
return uno.getTypeByName("any")
def hasElements(self):
return len(self._values) != 0
| [
"[email protected]"
]
| |
4f487f55673637fbd76eb531e993577af21b5650 | ea378480ba678eb123ef826e3ca0c3eb8f4e538f | /paused/05. bk old/1.05 rule is class again/bw/field.py | 28716f9f153ed2ee767cb7a4f1ab450d64d25325 | []
| no_license | msarch/py | 67235643666b1ed762d418263f7eed3966d3f522 | dcd25e633a87cdb3710e90224e5387d3516c1cd3 | refs/heads/master | 2021-01-01T05:21:58.175043 | 2017-05-25T08:15:26 | 2017-05-25T08:15:26 | 87,453,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,514 | py | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
'''
[email protected] * aug 2014 * bw-rev105
this is the pyglet engine.
- displays cells on windows redraws
- cycle through rules at each clock tick
'''
##---IMPORTS ------------------------------------------------------------------
from itertools import izip
from pyglet import clock
import pyglet.gl
from pyglet.gl import *
from pyglet.window import key, get_platform
from pyglet.gl import(
glLoadIdentity,
glMatrixMode,
gluOrtho2D,
GL_MODELVIEW, GL_PROJECTION,
)
from colors import *
from shapes import *
from rules import *
#--- CONSTANTS ----------------------------------------------------------------
FRAMERATE = 1.0/30
MOVIE_FRAMERATE = 1.0 / 25 # framerate for movie export
CLOCKDISPLAY = clock.ClockDisplay()
_screen = get_platform().get_default_display().get_default_screen()
WIDTH, HEIGHT = _screen.width*1.0 ,_screen.height*1.0
CENTX, CENTY = WIDTH*0.5, HEIGHT*0.5
SCREEN = AABB(-CENTX, -CENTY, CENTX, CENTY)
ASPECT = WIDTH / HEIGHT
# @TODO : check how to go bigger than screen, then resize to fullscreen
#--- GLOBALS ------------------------------------------------------------------
paused = False
show_fps = True
fullscreen = True
field_color = white
#--- PYGLET Window setup ------------------------------------------------------
VIEW = pyglet.window.Window(resizable = True)
VIEW.set_fullscreen(fullscreen)
VIEW.set_mouse_visible(not fullscreen)
def set_field_color(new_color): # change background color
global field_color
field_color = new_color
glClearColor(new_color.r,new_color.g,new_color.b,new_color.a)
def view_setup(): # general GL setup
glMatrixMode(GL_PROJECTION)
glMatrixMode(GL_MODELVIEW)
gluOrtho2D(0, WIDTH, 0, HEIGHT)
glLoadIdentity()
glTranslatef(CENTX, CENTY, 0)
#--- VIEW key handling --------------------------------------------------------
@VIEW.event
def on_key_press(symbol, modifiers): # override pyglet window's
global paused, show_fps, fullscreen
if symbol == key.ESCAPE:
exit(0)
elif symbol == key.F:
fullscreen = (True,False)[fullscreen]
print fullscreen
VIEW.set_fullscreen(fullscreen)
VIEW.set_mouse_visible(not fullscreen)
else:
paused = (True,False)[paused]
show_fps = (True,False)[show_fps]
#--- PYGLET ENGINE ------------------------------------------------------------
@VIEW.event
def on_draw():
glClear(GL_COLOR_BUFFER_BIT)
# @TODO : inserer le facteur d'adaptation a la taille de l'ecran,
for shp in setup:
if shp.peg:
shp.paint(shp.peg)
else:
pass
if show_fps:
CLOCKDISPLAY.draw()
#--- scene update -------------------------------------------------------------
def tick(dt):
if paused:
pass
else:
for rule in ruleset:
rule.tick(dt)
#--- run mode options 1 : fullscreen animation --------------------------------
def animate():
set_field_color(field_color)
view_setup()
clock.schedule_interval(tick, FRAMERATE) # infinite loop ------------
pyglet.app.run()
#--- NOTES --------------------------------------------------------------------
'''
from ThinkingParticles, reuse:
- IDS/ODS : input data stream, output data stream
- memory node : allows the storage of any kind of data.
- IN/OUT volume testing algorithm has been added
- PSearch node, search the nearest/furthest particle in a specific radius
'''
| [
"[email protected]"
]
| |
a6853dc9a6c90720932e56e043d48372eb710b4b | 681566c88f834bd05cfb85a6acfd218429a24edd | /notes/middleware.py | 98ceffd6d154480c43355f781a62309c74f05891 | []
| no_license | cmwaura/social-site | 7eaaa03ee2aef0a5a8ef4612ee0c15a135b344f7 | d90b34334b873ac2fecb7e30b40e3d295b2dd5b7 | refs/heads/master | 2023-07-06T11:05:55.273126 | 2016-12-08T03:09:10 | 2016-12-08T03:09:10 | 70,009,809 | 1 | 0 | null | 2023-09-04T23:24:30 | 2016-10-04T21:56:54 | Python | UTF-8 | Python | false | false | 737 | py | from django.contrib.auth.decorators import login_required
from django.conf import settings
EXCEPTION_URL_SUFFIX_LIST = getattr(settings, 'LOGIN_EXCEPTION_URL_SUFFIX_LIST', ())
class LoginRequiredMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
# This middleware is meant to handle all the login required webpages and give access to the
# anonymous users based on approved views.
path = request.path
for exception_url in EXCEPTION_URL_SUFFIX_LIST:
if path.startswith(exception_url):
return None
is_login_required = getattr(view_func, 'login_required', True)
if not is_login_required:
return None
return login_required(view_func)(request, *view_args, **view_kwargs)
| [
"[email protected]"
]
| |
105190afe7758cee2983dd93d88db6cc7c9e51c6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_stares.py | ef0bb31379fcba9a8dc76e5d0d0f105e0183914f | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.verbs._stare import _STARE
#calss header
class _STARES(_STARE, ):
def __init__(self,):
_STARE.__init__(self)
self.name = "STARES"
self.specie = 'verbs'
self.basic = "stare"
self.jsondata = {}
| [
"[email protected]"
]
| |
d1d77c85b19504b9143576ea2c21d7802b398570 | 54d970eadb0dca9889a6bf4c7fa103031783a43d | /leetcode/169.majority_element.py | 099a6c0c837a09c6c067064062153bc6cf2c4c55 | []
| no_license | swq90/python | 54229319dd578a2f889bf47da2263bb35e1bc2b9 | b1f659aa01bb8409c96509a0debcdfaf38a19192 | refs/heads/master | 2021-06-27T14:44:46.699735 | 2021-02-16T05:53:39 | 2021-02-16T05:53:39 | 185,103,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = set(nums)
c = len(nums)/2
for n in l:
count = 0
for x in nums:
if n == x:
count += 1
if count > c:
return n
def majorityElement2(self, nums):
return sorted(nums)[len(nums)/2]
def majorityElement3(self, nums):
major = nums[0]
count = 1
for i in range(1,len(nums)):
if nums[i] == major:
count += 1
elif count == 0:
major = nums[i]
count +=1
else:
count -= 1
return major
# 1.自己的笨方法
# 2.小聪明,但是时间……
# 3.别人的答案,O(n) | [
"[email protected]"
]
| |
52d16af1565f1f746240cb09fe927511550c7b61 | d24a7cbbff1dea9d9a53b10138e73d05eafe0f4e | /mezzanine/__init__.py | 72097f1b321486ba998344049174d5535189ada3 | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| permissive | mminarick/mezzanine | d2d805bad00948421a3ca9bd60beaab131ac9165 | 840e776f5c88ba9529a0a9b1ffcd7ee35f356d1c | refs/heads/master | 2020-12-24T19:50:19.141277 | 2013-04-09T13:47:45 | 2013-04-09T13:47:45 | 9,339,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py |
__version__ = "1.4.5"
| [
"[email protected]"
]
| |
25ebc922e6616ba71700b599a476ca5be37faf97 | 97aa5f503e420422a29fb7ffcf0a7cd3f871915d | /beetween_eyes.py | 6783d751cb3929b5deb3db01badc3c178b499a94 | []
| no_license | LeGrosLezard/synergo_depot_2 | d3e82656f15141b2cee25c312ec942727d0cabfa | c751200ccad2cf4b6503be529bc0ec3b43f57e2d | refs/heads/master | 2021-02-13T22:19:15.560484 | 2020-03-08T01:47:36 | 2020-03-08T01:47:36 | 244,739,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | import cv2
import numpy as np
import threading
import math
def extremums(c):
xe = tuple(c[c[:, :, 0].argmin()][0]) #left
ye = tuple(c[c[:, :, 1].argmin()][0]) #right
we = tuple(c[c[:, :, 0].argmax()][0])
he = tuple(c[c[:, :, 1].argmax()][0]) #bottom
return xe, ye, we, he
def make_line(thresh):
"""We make line for detect more than one area
with border, on eyelashes is paste to the border"""
cv2.line(thresh, (0, 0), (0, thresh.shape[0]), (255, 255, 255), 2)
cv2.line(thresh, (0, 0), (thresh.shape[1], 0), (255, 255, 255), 2)
cv2.line(thresh, (thresh.shape[1], 0), (thresh.shape[1], thresh.shape[0]), (255, 255, 255), 2)
cv2.line(thresh, (0, thresh.shape[0]), (thresh.shape[1], thresh.shape[0]), (255, 255, 255), 2)
return thresh
def recuperate_landmarks(landmarks_head, head_box_head):
_, _, width_head, height_head = head_box_head
adding_height = int(height_head * 0.09) #5 de 74
adding_width = int(width_head * 0.015) #1 de 90
area_landmarks1 = (landmarks_head.part(21).x - adding_width,
landmarks_head.part(21).y - adding_height)
area_landmarks2 = (landmarks_head.part(22).x + adding_width,
landmarks_head.part(22).y - adding_height)
area_landmarks3 = (landmarks_head.part(27).x,
landmarks_head.part(27).y - adding_height)
area_landmarks = [area_landmarks1, area_landmarks2, area_landmarks3]
return area_landmarks
def masks(area_landmarks, threshold, frame_head):
#Make a box of the region.
box_crop = cv2.boundingRect(np.array(area_landmarks))
x ,y, w, h = box_crop
#cv2.rectangle(frame_head, (x, y), (x+w, y+h), (0, 0, 255), 1)
crop_threhsold = threshold[y:y+h, x:x+w]
crop_threhsold = make_line(crop_threhsold)
crop_frame = frame_head[y:y+h, x:x+w]
return crop_threhsold, crop_frame, box_crop
def localisation_wrinkle(crop_threhsold, crop_frame, box_crop):
x ,y, w, h = box_crop
contours, _ = cv2.findContours(crop_threhsold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
wrinkle = 0
wrinkle_list = []
for c in contours:
max_contour = int((w * h) * 0.5)
min_contour = int((w * h) * 0.075)
if min_contour < cv2.contourArea(c) < max_contour:
xe, ye, we, he = extremums(c)
largeur = we[0] - xe[0]
longueur = he[1] - ye[1]
if longueur > largeur:
wrinkle += 1
wrinkle_list.append((he, ye))
if wrinkle == 2:
[cv2.line(crop_frame, i[0], i[1], (0, 0, 255), 1) for i in wrinkle_list]
def wrinkle_lion(frame_head, landmarks_head, gray, threshold, head_box_head):
area_landmarks = recuperate_landmarks(landmarks_head, head_box_head)
crop_threhsold, crop_frame, box_crop = masks(area_landmarks, threshold, frame_head)
localisation_wrinkle(crop_threhsold, crop_frame, box_crop)
| [
"[email protected]"
]
| |
e26eb4ebcbe9aeb7189a193c9f0a67a9fb994c6d | aa4f2cfb001026c17e89af2b304acc4b80a3c638 | /assignment1/q4_sentiment.py | 42d9bc7a3528797d36c22a03aed74b5e548a7236 | []
| no_license | PanXiebit/standford-cs224d | 3b15077f99908708605758c592eebc1515fb2440 | e68497823213eb71467eaac09effff9f1b4bba20 | refs/heads/master | 2020-03-15T06:51:47.063509 | 2018-05-11T01:03:26 | 2018-05-11T01:03:26 | 132,016,982 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,225 | py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from cs224d.glove import *
from cs224d.data_utils import *
from q3_sgd import load_saved_params, sgd
from q4_softmaxreg import softmaxRegression, getSentenceFeature, accuracy, softmax_wrapper
# Try different regularizations and pick the best!
# NOTE: fill in one more "your code here" below before running!
# REGULARIZATION = None # Assign a list of floats in the block below
### YOUR CODE HERE
REGULARIZATION = np.logspace(-3,0,6)
REGULARIZATION = np.hstack([0,REGULARIZATION])
# REGULARIZATION = [0.0, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
### END YOUR CODE
# Load the dataset
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
def wordVectors(pretrain=False):
# Load the word vectors we trained earlier 自己训练的词向量
if not pretrain:
_, wordVectors0, _ = load_saved_params()
wordVectors = (wordVectors0[:nWords, :] + wordVectors0[nWords:, :])
else:
wordVectors = loadWordVectors(tokens)
return wordVectors
wordVectors = wordVectors(pretrain=False)
dimVectors = wordVectors.shape[1]
# Load the train set
trainset = dataset.getTrainSentences()
nTrain = len(trainset)
trainFeatures = np.zeros((nTrain, dimVectors))
trainLabels = np.zeros((nTrain,), dtype=np.int32)
for i in range(nTrain):
words, trainLabels[i] = trainset[i]
trainFeatures[i, :] = getSentenceFeature(tokens, wordVectors, words) # 用平均值~。。
# Prepare dev set features
devset = dataset.getDevSentences()
nDev = len(devset)
devFeatures = np.zeros((nDev, dimVectors))
devLabels = np.zeros((nDev,), dtype=np.int32)
for i in range(nDev):
words, devLabels[i] = devset[i]
devFeatures[i, :] = getSentenceFeature(tokens, wordVectors, words)
# Try our regularization parameters
results = []
for regularization in REGULARIZATION:
random.seed(3141)
np.random.seed(59265)
weights = np.random.randn(dimVectors, 5) # 0,1,2,3,4 总共5类
print("Training for reg=%f" % regularization )
# We will do batch optimization
# 使用sgd来训练参数
weights = sgd(lambda weights: softmax_wrapper(trainFeatures, trainLabels,
weights, regularization), weights, 3.0, 10000, PRINT_EVERY=1000)
# Test on train set
_, _, pred = softmaxRegression(trainFeatures, trainLabels, weights)
trainAccuracy = accuracy(trainLabels, pred)
print ("Train accuracy (%%): %f" % trainAccuracy)
# Test on dev set
_, _, pred = softmaxRegression(devFeatures, devLabels, weights)
devAccuracy = accuracy(devLabels, pred)
print ("Dev accuracy (%%): %f" % devAccuracy)
# Save the results and weights
results.append({
"reg" : regularization,
"weights" : weights,
"train" : trainAccuracy,
"dev" : devAccuracy})
# Print the accuracies
print ("")
print ("=== Recap ===")
print ("Reg\t\tTrain\t\tDev")
for result in results:
print ("%E\t%f\t%f" % (
result["reg"],
result["train"],
result["dev"]))
print ("")
# Pick the best regularization parameters
BEST_REGULARIZATION = None
BEST_WEIGHTS = None
### YOUR CODE HERE
bestdev = 0
for result in results:
if result['dev'] > bestdev:
BEST_REGULARIZATION = result['reg']
BEST_WEIGHTS = result['weights']
bestdev = result['dev']
### END YOUR CODE
# Test your findings on the test set
testset = dataset.getTestSentences()
nTest = len(testset)
testFeatures = np.zeros((nTest, dimVectors))
testLabels = np.zeros((nTest,), dtype=np.int32)
for i in range(nTest):
words, testLabels[i] = testset[i]
testFeatures[i, :] = getSentenceFeature(tokens, wordVectors, words)
_, _, pred = softmaxRegression(testFeatures, testLabels, BEST_WEIGHTS)
print ("Best regularization value: %E" % BEST_REGULARIZATION)
print ("Test accuracy (%%): %f" % accuracy(testLabels, pred))
# Make a plot of regularization vs accuracy
plt.plot(REGULARIZATION, [x["train"] for x in results])
plt.plot(REGULARIZATION, [x["dev"] for x in results])
plt.xscale('log')
plt.xlabel("regularization")
plt.ylabel("accuracy")
plt.legend(['train', 'dev'], loc='upper left')
plt.savefig("q4_reg_v_acc.png")
plt.show()
| [
"[email protected]"
]
| |
0c01af511f45a461139ffb51b4b91df3b226045d | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/virt/vmwareapi/vim.py | d8684ce7c9956a1181363d0dcada19589839e258 | [
"Apache-2.0"
]
| permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 7,619 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
try:
import suds
except ImportError:
suds = None
from nova import flags
from nova.openstack.common import cfg
from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
default=None,
help='VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')
FLAGS = flags.FLAGS
FLAGS.register_opt(vmwareapi_wsdl_loc_opt)
if suds:
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
# suds does not handle AnyType properly.
# VI SDK requires type attribute to be set when AnyType is used
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""suds will send the specified soap envelope.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim:
"""The VIM Object."""
def __init__(self,
protocol="https",
host="localhost"):
"""
Creates the necessary Communication interfaces and gets the
ServiceContent for initiating SOAP transactions.
protocol: http or https
host : ESX IPAddress[:port] or ESX Hostname[:port]
"""
if not suds:
raise Exception(_("Unable to import suds."))
self._protocol = protocol
self._host_name = host
wsdl_url = FLAGS.vmwareapi_wsdl_loc
if wsdl_url is None:
raise Exception(_("Must specify vmwareapi_wsdl_loc"))
# TODO(sateesh): Use this when VMware fixes their faulty wsdl
#wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
# self._host_name)
url = '%s://%s/sdk' % (self._protocol, self._host_name)
self.client = suds.client.Client(wsdl_url, location=url,
plugins=[VIMMessagePlugin()])
self._service_content = self.RetrieveServiceContent("ServiceInstance")
def get_service_content(self):
"""Gets the service content object."""
return self._service_content
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
try:
return object.__getattr__(self, attr_name)
except AttributeError:
def vim_request_handler(managed_object, **kwargs):
"""
Builds the SOAP message and parses the response for fault
checking and other errors.
managed_object : Managed Object Reference or Managed
Object Name
**kwargs : Keyword arguments of the call
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = self._request_managed_object_builder(
managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
# and not returned as Fault object response from the ESX
# SOAP server
if hasattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker"):
fault_checker = getattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker")
fault_checker(response)
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
except error_util.VimFaultException, excep:
raise
except suds.WebFault, excep:
doc = excep.document
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
fault_list = []
for child in detail.getChildren():
fault_list.append(child.get("type"))
raise error_util.VimFaultException(fault_list, excep)
except AttributeError, excep:
raise error_util.VimAttributeError(_("No such SOAP method "
"'%s' provided by VI SDK") % (attr_name), excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader), excep:
raise error_util.SessionOverLoadException(_("httplib "
"error in %s: ") % (attr_name), excep)
except Exception, excep:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be
# caused by ESX host API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type "
"error in %s: ") % (attr_name), excep)
else:
raise error_util.VimException(
_("Exception in %s ") % (attr_name), excep)
return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
# Request Managed Object Builder
if isinstance(managed_object, str):
mo = suds.sudsobject.Property(managed_object)
mo._type = managed_object
else:
mo = managed_object
return mo
def __repr__(self):
return "VIM Object"
def __str__(self):
return "VIM Object"
| [
"[email protected]"
]
| |
c9f2f3fecb1bc630117de97f47fd2588970698d8 | d5fcdea2f2f78bc3fcf39bdf34c8067238edce2c | /qxh/homework20201104/code.py | ede33d99b15778f5153a4cf55dc37b5382d8011f | []
| no_license | ophwsjtu18/ohw20f | a5a6f4d262f48ddc8332838f344c86ec0636b7d4 | 03d7067f0e645338b84410ff13a77d495bec661e | refs/heads/main | 2023-02-04T21:20:21.123617 | 2020-12-25T09:00:04 | 2020-12-25T09:00:04 | 303,941,346 | 4 | 6 | null | 2020-10-14T10:51:15 | 2020-10-14T07:49:18 | null | UTF-8 | Python | false | false | 728 | py | import cv2
import random
import numpy as np
print("hello")
img=cv2.imread("cat.png")
imgold = img.copy()
point = 0
def mouse(event,x,y,flags,param):
global point,curr
if event == cv2.EVENT_LBUTTONDOWN:
if i*50<=x<=(i+1)*50 and j*50<=y<=(j+1)*50:
point = point+1
print(point)
cv2.namedWindow('qxh')
cv2.setMouseCallback('qxh',mouse)
while True:
img = imgold.copy()
head= img[400:450,500:550].copy()
i = random.randint(0,2)
j = random.randint(0,2)
img[i*50:(i+1)*50,j*50:(j+1)*50]=head
for a in range(0,3):
for b in range(0,3):
cv2.rectangle(img,(50*a,50*b),(50+50*a,50+50*b),(0,255,0),3)
cv2.imshow('qxh',img)
cv2.waitKey(500)
| [
"[email protected]"
]
| |
da8acf861f954643451cf1d9718c382e4e8c9b8d | 0881fa0aa942f556466cb62f2b3936b1da96e99f | /IPTVplayer/icomponents/ihost.py | 2e675be096008fb8453a66d142bc676138ff2694 | []
| no_license | persianpros/crossplatform_iptvplayer | da12ca83dbef81baff7cb77e3c3070890dcbc4c5 | 89543b9e04ac6aa2bd050355f8b853d6e2aa76f6 | refs/heads/master | 2020-03-31T13:32:19.606428 | 2018-06-27T08:36:37 | 2018-06-27T08:36:37 | 152,259,962 | 2 | 0 | null | 2018-10-09T13:54:11 | 2018-10-09T13:54:11 | null | UTF-8 | Python | false | false | 34,143 | py | ## @file ihost.py
#
###################################################
# E2 GUI COMMPONENTS
###################################################
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvplayerinit import TranslateTXT as _, GetIPTVNotify
from Plugins.Extensions.IPTVPlayer.icomponents.asynccall import MainSessionWrapper
from Plugins.Extensions.IPTVPlayer.libs.pCommon import common, CParsingHelper
from Plugins.Extensions.IPTVPlayer.libs.urlparser import urlparser
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvtools import CSearchHistoryHelper, GetCookieDir, printDBG, printExc, GetLogoDir, byteify
from Plugins.Extensions.IPTVPlayer.libs.youtube_dl.utils import clean_html
from Components.config import config
from skin import parseColor
try: import json
except Exception: import simplejson as json
from urlparse import urljoin
class CUrlItem:
def __init__(self, name = "", url = "", urlNeedsResolve = 0):
if isinstance(name, basestring): self.name = name
else: self.name = str(name)
# used only for TYPE_VIDEO item
if isinstance(url, basestring): self.url = url
else: self.url = str(url)
self.urlNeedsResolve = urlNeedsResolve # additional request to host is needed to resolv this url (url is not direct link)
## class CDisplayListItem
# define attribiutes for item of diplay list
# communicate display layer with host
#
class CDisplayListItem:
TYPE_CATEGORY = "CATEGORY"
TYPE_VIDEO = "VIDEO"
TYPE_AUDIO = "AUDIO"
TYPE_SEARCH = "SEARCH"
TYPE_ARTICLE = "ARTICLE"
TYPE_PICTURE = "PICTURE"
TYPE_DATA = "DATA"
TYPE_MORE = "MORE"
TYPE_MARKER = "MARKER"
TYPE_SUBTITLE = "SUBTITLE"
TYPE_SUB_PROVIDER = "SUB_PROVIDER"
TYPE_UNKNOWN = "UNKNOWN"
def __init__(self, name = "", \
description = "", \
type = TYPE_UNKNOWN, \
urlItems = [], \
urlSeparateRequest = 0, \
iconimage = '', \
possibleTypesOfSearch = None, \
pinLocked = False, \
isGoodForFavourites = False, \
isWatched = False, \
textColor = '', \
pinCode = ''):
if isinstance(name, basestring): self.name = name
else: self.name = str(name)
if isinstance(description, basestring): self.description = description
else: self.description = str(description)
if isinstance(type, basestring): self.type = type
else: self.type = str(type)
if isinstance(iconimage, basestring): self.iconimage = iconimage
else: self.iconimage = str(iconimage)
if pinLocked: self.pinLocked = True
else: self.pinLocked = False
self.pinCode = str(pinCode)
if isGoodForFavourites: self.isGoodForFavourites = True
else: self.isGoodForFavourites = False
if isWatched: self.isWatched = True
else: self.isWatched = False
self.textColor = str(textColor)
# used only for TYPE_VIDEO item
self.urlItems = urlItems # url to VIDEO
# links are not available the separate request is needed to get links
self.urlSeparateRequest = urlSeparateRequest
# used only for TYPE_SEARCH item
self.possibleTypesOfSearch = possibleTypesOfSearch
self.privateData = None
self.itemIdx = -1
def getDisplayTitle(self):
if self.isWatched:
return '*' + self.name
else:
return self.name
def getTextColor(self):
try:
if self.textColor != '':
return parseColor(self.textColor).argb()
if self.isWatched:
return parseColor(config.plugins.iptvplayer.watched_item_color.value).argb()
except Exception:
printExc()
return None
class ArticleContent:
VISUALIZER_DEFAULT = 'DEFAULT'
# Posible args and values for richDescParams:
RICH_DESC_PARAMS = ["alternate_title", "original_title", "station", "price", "age_limit", "views", "status", "type", "first_air_date", "last_air_date", "seasons", "episodes", "country", "language", "duration", "quality", "subtitles", "year", "imdb_rating", "tmdb_rating",\
"released", "broadcast", "remaining", "rating", "rated", "genre", "genres", "category", "categories", "production", "director", "directors", "writer", "writers", \
"creator", "creators", "cast", "actors", "stars", "awards", "budget", "translation",]
# labels here must be in english language
# translation should be done before presentation using "locals" mechanism
RICH_DESC_LABELS = {"alternate_title": "Alternate Title:",
"original_title": "Original Title:",
"station": "Station:",
"price": "Price:",
"status": "Status:",
"type": "Type:",
"age_limit": "Age limit:",
"first_air_date": "First air date:",
"last_air_date": "Last air date:",
"seasons": "Seasons:",
"episodes": "Episodes:",
"quality": "Quality:",
"subtitles": "Subtitles:",
"country": "Country:",
"language": "Language",
"year": "Year:",
"released": "Released:",
"broadcast": "Broadcast:",
"remaining": "Remaining:",
"imdb_rating": "IMDb Rating:",
"tmdb_rating": "TMDb Rating:",
"rating": "Rating:",
"rated": "Rated:",
"duration": "Duration:",
"genre": "Genre:",
"genres": "Genres:",
"category": "Category:",
"categories": "Categories:",
"production": "Production:",
"director": "Director:",
"directors": "Directors:",
"writer": "Writer:",
"writers": "Writers:",
"creator": "Creator:",
"creators": "Creators:",
"cast": "Cast:",
"actors": "Actors:",
"stars": "Stars:",
"awards": "Awards:",
"views": "Views:",
"budget": "Budget:",
"translation": "Translation:"
}
def __init__(self, title = '', text = '', images = [], trailers = [], richDescParams = {}, visualizer=None):
self.title = title
self.text = text
self.images = images
self.trailers = trailers
self.richDescParams = richDescParams
if None == visualizer:
self.visualizer = ArticleContent.VISUALIZER_DEFAULT
else:
self.visualizer = visualizer
class CFavItem:
RESOLVER_DIRECT_LINK = 'DIRECT_LINK'
RESOLVER_SELF = 'SELF'
RESOLVER_URLLPARSER = 'URLLPARSER'
TYPE_UNKNOWN = CDisplayListItem.TYPE_UNKNOWN
def __init__( self, name = '', \
description = '', \
type = TYPE_UNKNOWN, \
iconimage = '', \
data = '', \
resolver = RESOLVER_SELF ):
self.name = name
self.description = description
self.type = type
self.iconimage = iconimage
self.data = data
self.resolver = resolver
self.hostName = ''
def fromDisplayListItem(self, dispItem):
self.name = dispItem.name
self.description = dispItem.description
self.type = dispItem.type
self.iconimage = dispItem.iconimage
return self
def setFromDict(self, data):
for key in data:
setattr(self, key, data[key])
return self
def getAsDict(self):
return vars(self)
class CHostsGroupItem:
def __init__( self, name = '', title = ''):
self.name = name
self.title = title
class RetHost:
OK = "OK"
ERROR = "ERROR"
NOT_IMPLEMENTED = "NOT_IMPLEMENTED"
def __init__(self, status , value, message = ''):
self.status = status
self.value = value
self.message = message
## class IHost
# interface base class with method used to
# communicate display layer with host
#
class IHost:
def isProtectedByPinCode(self):
return False
# return list of types which can be added as favourite
def getSupportedFavoritesTypes(self):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# get favourite item CFavItem for item with given index
def getFavouriteItem(self, Index=0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# similar as getLinksForItem, returns links
# for given CFavItem
def getLinksForFavourite(self, favItem):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
def setInitFavouriteItem(self, favItem):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return firs available list of item category or video or link
def getInitList(self):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return List of item from current List
# for given Index
# 1 == refresh - force to read data from
# server if possible
# server instead of cache
# item - object of CDisplayListItem for selected item
def getListForItem(self, Index = 0, refresh = 0, item = None):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return prev requested List of item
# for given Index
# 1 == refresh - force to read data from
# server if possible
def getPrevList(self, refresh = 0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return current List
# for given Index
# 1 == refresh - force to read data from
# server if possible
def getCurrentList(self, refresh = 0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return current List
# for given Index
def getMoreForItem(self, Index=0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
def getLinksForVideo(self, Index = 0, item = None):
return self.getLinksForItem(Index, item)
# return list of links for AUDIO, VIDEO, PICTURE
# for given Index,
# item - object of CDisplayListItem for selected item
def getLinksForItem(self, Index = 0, item = None):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
def getArticleContent(self, Index = 0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return resolved url
# for given url
def getResolvedURL(self, url):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return full path to player logo
def getLogoPath(self):
return RetHost(RetHost.OK, value = [GetLogoDir(getattr(self, '__module__').split('.')[-1][4:] + 'logo.png')])
def getSearchResults(self, pattern, searchType = None):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
# return list of custom actions
# for given Index,
# this function is called directly from main theread
# it should be very quick and can not perform long actions,
# like reading file, download web page etc.
def getCustomActions(self, Index = 0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
def performCustomAction(self, privateData):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
def markItemAsViewed(self, Index = 0):
return RetHost(RetHost.NOT_IMPLEMENTED, value = [])
'''
CHostBase implements some typical methods
from IHost interface
'''
class CHostBase(IHost):
def __init__( self, host, withSearchHistrory, favouriteTypes=[] ):
self.host = host
self.withSearchHistrory = withSearchHistrory
self.favouriteTypes = favouriteTypes
self.currIndex = -1
self.listOfprevList = []
self.listOfprevItems = []
self.searchPattern = ''
self.searchType = ''
def getSupportedFavoritesTypes(self):
return RetHost(RetHost.OK, value = self.favouriteTypes)
def isValidIndex(self, Index, validTypes=None):
listLen = len(self.host.currList)
if listLen <= Index or Index < 0:
printDBG( "ERROR isValidIndex - current list is to short len: %d, Index: %d" % (listLen, Index) )
return False
if None != validTypes and self.converItem(self.host.currList[Index]).type not in validTypes:
printDBG( "ERROR isValidIndex - current item has wrong type" )
return False
return True
def withArticleContent(self, cItem):
return False
def getArticleContent(self, Index = 0):
retCode = RetHost.ERROR
retlist = []
if not self.isValidIndex(Index): return RetHost(retCode, value=retlist)
cItem = self.host.currList[Index]
if not self.withArticleContent(cItem):
return RetHost(retCode, value=retlist)
hList = self.host.getArticleContent(cItem)
if None == hList:
return RetHost(retCode, value=retlist)
for item in hList:
title = item.get('title', '')
text = item.get('text', '')
images = item.get("images", [])
othersInfo = item.get('other_info', '')
retlist.append( ArticleContent(title = title, text = text, images = images, richDescParams = othersInfo) )
if len(hList): retCode = RetHost.OK
return RetHost(retCode, value = retlist)
# end getArticleContent
def getLinksForItem(self, Index = 0, selItem = None):
retCode = RetHost.ERROR
retlist = []
if not self.isValidIndex(Index): return RetHost(retCode, value=retlist)
urlList = self.host.getLinksForItem(self.host.currList[Index])
if isinstance(urlList, list):
for item in urlList:
need_resolve = item.get("need_resolve", 0)
retlist.append(CUrlItem(item["name"], item["url"], need_resolve))
return RetHost(RetHost.OK, value = retlist)
# end getLinksForVideo
def getResolvedURL(self, url):
# resolve url to get direct url to video file
retlist = []
urlList = self.host.getVideoLinks(url)
if isinstance(urlList, list):
for item in urlList:
need_resolve = 0
retlist.append(CUrlItem(item["name"], item["url"], need_resolve))
return RetHost(RetHost.OK, value = retlist)
# end getResolvedURL
def getFavouriteItem(self, Index=0):
retCode = RetHost.ERROR
retlist = []
if not self.isValidIndex(Index, self.favouriteTypes): RetHost(retCode, value=retlist)
cItem = self.host.currList[Index]
data = self.host.getFavouriteData(cItem)
if None != data:
favItem = CFavItem(data=data)
favItem.fromDisplayListItem(self.converItem(cItem))
retlist = [favItem]
retCode = RetHost.OK
return RetHost(retCode, value=retlist)
# end getFavouriteItem
def getLinksForFavourite(self, favItem):
retlist = []
urlList = self.host.getLinksForFavourite(favItem.data)
if isinstance(urlList, list):
for item in urlList:
need_resolve = item.get("need_resolve", 0)
name = self.host.cleanHtmlStr( item["name"] )
url = item["url"]
retlist.append(CUrlItem(name, url, need_resolve))
return RetHost(RetHost.OK, value = retlist)
def setInitFavouriteItem(self, favItem):
self.currIndex = -1
self.listOfprevList = []
self.listOfprevItems = []
self.host.setCurrList([])
self.host.setCurrItem({})
if self.host.setInitListFromFavouriteItem(favItem.data):
return RetHost(RetHost.OK, value = None)
return RetHost(RetHost.ERROR, value = None)
# return firs available list of item category or video or link
def getInitList(self):
self.currIndex = -1
self.listOfprevList = []
self.listOfprevItems = []
self.host.handleService(self.currIndex)
convList = self.convertList(self.host.getCurrList())
return RetHost(RetHost.OK, value = convList)
def getListForItem(self, Index = 0, refresh = 0, selItem = None):
self.listOfprevList.append(self.host.getCurrList())
self.listOfprevItems.append(self.host.getCurrItem())
self.currIndex = Index
if self.withSearchHistrory:
self.setSearchPattern()
try:
if len(self.searchPattern):
sts, prevPattern = CSearchHistoryHelper.loadLastPattern()
if sts and prevPattern != self.searchPattern:
CSearchHistoryHelper.saveLastPattern(self.searchPattern)
except Exception:
printExc()
self.host.handleService(Index, refresh, self.searchPattern, self.searchType)
convList = self.convertList(self.host.getCurrList())
return RetHost(RetHost.OK, value = convList)
def getPrevList(self, refresh = 0):
if(len(self.listOfprevList) > 0):
hostList = self.listOfprevList.pop()
hostCurrItem = self.listOfprevItems.pop()
self.host.setCurrList(hostList)
self.host.setCurrItem(hostCurrItem)
convList = self.convertList(hostList)
return RetHost(RetHost.OK, value = convList)
else:
return RetHost(RetHost.ERROR, value = [])
def getCurrentList(self, refresh = 0):
if refresh == 1:
self.host.handleService(self.currIndex, refresh, self.searchPattern, self.searchType)
convList = self.convertList(self.host.getCurrList())
return RetHost(RetHost.OK, value = convList)
def getMoreForItem(self, Index=0):
self.host.handleService(Index, 2, self.searchPattern, self.searchType)
convList = self.convertList(self.host.getCurrList())
return RetHost(RetHost.OK, value = convList)
def getSearchItemInx(self):
try:
list = self.host.getCurrList()
for i in range( len(list) ):
if list[i]['category'] == 'search':
return i
except Exception:
printDBG('getSearchItemInx EXCEPTION')
return -1
def setSearchPattern(self):
try:
list = self.host.getCurrList()
if 'history' == list[self.currIndex]['name']:
pattern = list[self.currIndex]['title']
search_type = list[self.currIndex]['search_type']
self.host.history.addHistoryItem( pattern, search_type)
self.searchPattern = pattern
self.searchType = search_type
except Exception:
printDBG('setSearchPattern EXCEPTION')
self.searchPattern = ''
self.searchType = ''
return
def convertList(self, cList):
hostList = []
for cItem in cList:
hostItem = self.converItem(cItem)
if None != hostItem: hostList.append(hostItem)
return hostList
# end convertList
def getSearchTypes(self):
searchTypesOptions = []
#searchTypesOptions.append((_("Movies"), "movie"))
#searchTypesOptions.append((_("TV Shows"), "series"))
return searchTypesOptions
def getDefaulIcon(self, cItem):
return self.host.getDefaulIcon(cItem)
def getFullIconUrl(self, url, currUrl=None):
if currUrl != None: return self.host.getFullIconUrl(url, currUrl)
else: return self.host.getFullIconUrl(url)
def converItem(self, cItem, needUrlResolve=1, needUrlSeparateRequest=1):
hostList = []
hostLinks = []
type = CDisplayListItem.TYPE_UNKNOWN
possibleTypesOfSearch = None
if 'category' == cItem['type']:
if cItem.get('search_item', False):
type = CDisplayListItem.TYPE_SEARCH
possibleTypesOfSearch = self.getSearchTypes()
else:
type = CDisplayListItem.TYPE_CATEGORY
elif cItem['type'] == 'video':
type = CDisplayListItem.TYPE_VIDEO
elif 'audio' == cItem['type']:
type = CDisplayListItem.TYPE_AUDIO
elif 'picture' == cItem['type']:
type = CDisplayListItem.TYPE_PICTURE
elif 'article' == cItem['type']:
type = CDisplayListItem.TYPE_ARTICLE
elif 'more' == cItem['type']:
type = CDisplayListItem.TYPE_MORE
elif 'marker' == cItem['type']:
type = CDisplayListItem.TYPE_MARKER
elif 'data' == cItem['type']:
type = CDisplayListItem.TYPE_DATA
if type in [CDisplayListItem.TYPE_AUDIO, CDisplayListItem.TYPE_VIDEO, \
CDisplayListItem.TYPE_PICTURE, CDisplayListItem.TYPE_ARTICLE, \
CDisplayListItem.TYPE_DATA]:
url = cItem.get('url', '')
if '' != url: hostLinks.append(CUrlItem("Link", url, needUrlResolve))
title = cItem.get('title', '')
description = cItem.get('desc', '')
icon = self.getFullIconUrl( cItem.get('icon', '') )
if icon == '': icon = self.getDefaulIcon(cItem)
isGoodForFavourites = cItem.get('good_for_fav', False)
pinLocked = cItem.get('pin_locked', False)
pinCode = cItem.get('pin_code', '')
textColor = cItem.get('text_color', '')
return CDisplayListItem(name = title,
description = description,
type = type,
urlItems = hostLinks,
urlSeparateRequest = needUrlSeparateRequest,
iconimage = icon,
possibleTypesOfSearch = possibleTypesOfSearch,
pinLocked = pinLocked,
isGoodForFavourites = isGoodForFavourites,
textColor = textColor,
pinCode = pinCode)
# end converItem
def getSearchResults(self, searchpattern, searchType = None):
retList = []
if self.withSearchHistrory:
self.host.history.addHistoryItem( searchpattern, searchType )
self.searchPattern = searchpattern
self.searchType = searchType
# Find 'Wyszukaj' item
list = self.host.getCurrList()
searchItemIdx = self.getSearchItemInx()
if searchItemIdx > -1:
return self.getListForItem( searchItemIdx )
else:
return RetHost(RetHost.ERROR, value = [])
# end getSearchResults
class CBaseHostClass:
def __init__(self, params={}):
self.sessionEx = MainSessionWrapper()
self.up = urlparser()
proxyURL = params.get('proxyURL', '')
useProxy = params.get('useProxy', False)
if 'MozillaCookieJar' == params.get('cookie_type', ''):
self.cm = common(proxyURL, useProxy, True)
else:
self.cm = common(proxyURL, useProxy)
self.currList = []
self.currItem = {}
if '' != params.get('history', ''):
self.history = CSearchHistoryHelper(params['history'], params.get('history_store_type', False))
if '' != params.get('cookie', ''):
self.COOKIE_FILE = GetCookieDir(params['cookie'])
self.moreMode = False
self.minPyVer = params.get('min_py_ver', 0)
def checkPythonVersion(self, pyVer):
try:
from Screens.MessageBox import MessageBox
import sys
if sys.version_info < pyVer:
hasSNI = False
try:
from ssl import wrap_socket
from inspect import getargspec
if 'server_hostname' in '%s' % [getargspec(wrap_socket)]:
hasSNI = True
except Exception:
pass
if not hasSNI:
message = _('This service requires a new Enigma2 image with a Python version %s or later.') % ('.'.join(str(x) for x in pyVer))
message += '\n' + _('You can also install SNI patch for you python if available.')
self.sessionEx.waitForFinishOpen(MessageBox, message, type = MessageBox.TYPE_INFO, timeout = 10)
except Exception:
printExc()
def informAboutGeoBlockingIfNeeded(self, country, onlyOnce=True):
try:
if onlyOnce and self.isGeoBlockingChecked: return
except Exception:
self.isGeoBlockingChecked = False
sts, data = self.cm.getPage('https://dcinfos.abtasty.com/geolocAndWeather.php')
if not sts: return
try:
data = byteify(json.loads(data.strip()[1:-1]), '', True)
if data['country'] != country:
message = _('%s uses "geo-blocking" measures to prevent you from accessing the services from outside the %s Territory.')
GetIPTVNotify().push(message % (self.getMainUrl(), country), 'info', 5)
self.isGeoBlockingChecked = True
except Exception: printExc()
def listsTab(self, tab, cItem, type='dir'):
defaultType = type
for item in tab:
params = dict(cItem)
params.update(item)
params['name'] = 'category'
type = item.get('type', defaultType)
if type == 'dir': self.addDir(params)
elif type == 'marker': self.addMarker(params)
else: self.addVideo(params)
def listToDir(self, cList, idx):
return self.cm.ph.listToDir(cList, idx)
def getMainUrl(self):
return self.MAIN_URL
def setMainUrl(self, url):
if self.cm.isValidUrl(url):
self.MAIN_URL = self.cm.getBaseUrl(url)
return True
return False
def getFullUrl(self, url, currUrl=None):
if url.startswith('./'):
url = url[1:]
if currUrl == None or not self.cm.isValidUrl(currUrl):
try: mainUrl = self.getMainUrl()
except Exception: mainUrl = 'http://fake'
else:
mainUrl = self.cm.getBaseUrl(currUrl)
if url.startswith('//'):
proto = mainUrl.split('://', 1)[0]
url = proto + ':' + url
elif url.startswith('://'):
proto = mainUrl.split('://', 1)[0]
url = proto + url
elif url.startswith('/'):
url = mainUrl + url[1:]
elif 0 < len(url) and '://' not in url:
if currUrl == None or not self.cm.isValidUrl(currUrl):
url = mainUrl + url
else:
url = urljoin(currUrl, url)
return url
def getFullIconUrl(self, url, currUrl=None):
if currUrl != None: return self.getFullUrl(url, currUrl)
else: return self.getFullUrl(url)
def getDefaulIcon(self, cItem=None):
try: return self.DEFAULT_ICON_URL
except Exception:
pass
return ''
@staticmethod
def cleanHtmlStr(str):
str = str.replace('<', ' <')
str = str.replace(' ', ' ')
str = str.replace(' ', ' ')
str = clean_html(str)
str = str.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
return CParsingHelper.removeDoubles(str, ' ').strip()
@staticmethod
def getStr(v, default=''):
if type(v) == type(u''): return v.encode('utf-8')
elif type(v) == type(''): return v
return default
def getCurrList(self):
return self.currList
def setCurrList(self, list):
self.currList = list
def getCurrItem(self):
return self.currItem
def setCurrItem(self, item):
self.currItem = item
def addDir(self, params):
params['type'] = 'category'
self.currList.append(params)
return
def addMore(self, params):
params['type'] = 'more'
self.currList.append(params)
return
def addVideo(self, params):
params['type'] = 'video'
self.currList.append(params)
return
def addAudio(self, params):
params['type'] = 'audio'
self.currList.append(params)
return
def addPicture(self, params):
params['type'] = 'picture'
self.currList.append(params)
return
def addData(self, params):
params['type'] = 'data'
self.currList.append(params)
return
def addArticle(self, params):
params['type'] = 'article'
self.currList.append(params)
return
def addMarker(self, params):
params['type'] = 'marker'
self.currList.append(params)
return
def listsHistory(self, baseItem={'name': 'history', 'category': 'Wyszukaj'}, desc_key='plot', desc_base=(_("Type: ")) ):
list = self.history.getHistoryList()
for histItem in list:
plot = ''
try:
if type(histItem) == type({}):
pattern = histItem.get('pattern', '')
search_type = histItem.get('type', '')
if '' != search_type: plot = desc_base + _(search_type)
else:
pattern = histItem
search_type = None
params = dict(baseItem)
params.update({'title': pattern, 'search_type': search_type, desc_key: plot})
self.addDir(params)
except Exception: printExc()
def getFavouriteData(self, cItem):
try:
return json.dumps(cItem)
except Exception:
printExc()
return ''
def getLinksForFavourite(self, fav_data):
try:
if self.MAIN_URL == None:
self.selectDomain()
except Exception:
printExc()
links = []
try:
cItem = byteify(json.loads(fav_data))
links = self.getLinksForItem(cItem)
except Exception: printExc()
return links
def setInitListFromFavouriteItem(self, fav_data):
try:
if self.MAIN_URL == None:
self.selectDomain()
except Exception:
printExc()
try:
params = byteify(json.loads(fav_data))
except Exception:
params = {}
printExc()
return False
self.currList.append(params)
return True
def getLinksForItem(self, cItem):
return self.getLinksForVideo(cItem)
def handleService(self, index, refresh=0, searchPattern='', searchType=''):
if self.minPyVer > 0:
self.checkPythonVersion(self.minPyVer)
self.minPyVer = 0 # inform only once
self.moreMode = False
if 0 == refresh:
if len(self.currList) <= index:
return
if -1 == index:
self.currItem = { "name": None }
else:
self.currItem = self.currList[index]
if 2 == refresh: # refresh for more items
printDBG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> endHandleService index[%s]" % index)
# remove item more and store items before and after item more
self.beforeMoreItemList = self.currList[0:index]
self.afterMoreItemList = self.currList[index+1:]
self.moreMode = True
if -1 == index:
self.currItem = { "name": None }
else:
self.currItem = self.currList[index]
def endHandleService(self, index, refresh):
if 2 == refresh: # refresh for more items
currList = self.currList
self.currList = self.beforeMoreItemList
for item in currList:
if 'more' == item['type'] or (item not in self.beforeMoreItemList and item not in self.afterMoreItemList):
self.currList.append(item)
self.currList.extend(self.afterMoreItemList)
self.beforeMoreItemList = []
self.afterMoreItemList = []
self.moreMode = False
| [
"[email protected]"
]
| |
7f260eaba2132e0e7ef19adf23a3ee754a689496 | 54a745510b16111f5e5f610a07be49ea1e79fccf | /ML/03matplotlib.py | 51a928da6ca02165ed7cea4fd31ebabc6c2b90fe | []
| no_license | SonDog0/bigdata | 84a5b7c58ad9680cdc0e49ac6088f482e09118a5 | e6cd1e3bbb0bfec0c89a31b3fb4ef66d50c272be | refs/heads/master | 2020-04-22T02:24:16.469718 | 2019-03-13T08:59:26 | 2019-03-13T08:59:26 | 170,047,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,621 | py | # matplotlib
# 파이썬의 대표적인 시각화 도구
# numpy 기반으로 만들어진 다중 플랫폼 데이터 시각화 라이브러리
# 하지만, 시간이 지남에 따라 인터페이스와 스타일이 고루해짐
# R의 ggplot처럼 세련되고 새로운 도구의 출현을 기대함
# 후추 깔끔하고 현대적인API로 무장한 seaborn 패키지 탄생
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 주피터노트북에서 matplotlib를 이용한 그래프 그리기
# %matplotlib inline 를 설정해 두면
# show 함수 호출 없이 그래프를 그릴 수 있음
# 간단한 선 그래프
#plt.plot([1,4,9,16,25])
#plt.show()
# => 지정한 자료는 자동으로 y축으로 지정
# => x축 값이 없으면 자동으로 0,1,2,3 ...으로 설정
# 즉, np.array 객체를 인자로 넘기는 경우,
# y 축만 설정하면 x 축은 자동으로 감지
np.random.seed(181218)
# #y = np.random.standard_normal(20) # 정규분포 난수
# #print(y)
#
# #x = range(len(y))
#
# # plt.plot(y)
# # plt.show()
# # plt.plot(x,y)
# # plt.show()
#
#
#
# # 암시적 스타일 지정하기 : 색, 모양, 선
# # 색 : r,g,b,c,m,y,k,w
# # 마커 : . o v ^ 1 p * + d D
# # 선 : : -. -- -
#
# plt.plot([1,2,3,4,5],[9,8,7,6,5],'r*:')
# # plt.show()
#
#
# # 명시적 스타일로 지정하기
# # color, linewidth, linestyle, markersize,
# # marker edge color, marker edge width, marker face color
# plt.plot([1,2,3,4,5],[10,20,30,40,50],
# c='m',lw=3,ls='--',marker='d',
# ms=20,mec='k',mew=5,mfc='r')
# # plt.show()
#
# # 그래프 축 제어하기
# # matplotlib에서 축, 그래프, 레이블등
# # 모든 객체를 아우르는 하나의 그릇(container)
# fig = plt.figure() # plot container
# # plt.show() # 빈화면만 출력
# ax = plt.axes()
x = np.linspace(0,10,1000) # 지정한 구간을 구간수로 분활
# print('분활된 구간수',x)
# # sin 그래프
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x))
# plt.grid()
# plt.show()
#
# # cos 그래프
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.cos(x))
# plt.grid()
# plt.show()
#
# # tan 그래프
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.tan(x/2))
# plt.grid()
# plt.ylim(100,-100)
# plt.xlim(2,4)
# plt.show()
# sin,cos,tan 함께 그리기 1
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x), c='r')
# ax.plot(x, np.cos(x), c='b')
# ax.plot(x, np.tan(x/2), c='g')
# plt.grid()
# plt.ylim(2,-2)
# plt.xlim(10,-10)
# plt.show()
# # sin,cos,tan 함께 그리기 2
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x), 'r',
# x, np.cos(x), 'b--',
# x, np.tan(x/2),'g-.')
# plt.grid()
# plt.ylim(2,-2)
# plt.xlim(10,-10)
# plt.show()
# # 그래프 색상 지정 방식
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x-0), c='red') # 색상명
# ax.plot(x, np.sin(x-1), c='b') # 단축
# ax.plot(x, np.sin(x-2), c='0.45') # 회색조 0검정 ~ 1흰 사이
# ax.plot(x, np.sin(x-3), c='#4c0b5f') # 16진수표기(RRGGBB)
# ax.plot(x, np.sin(x-4), c=(1.0, 0.2, 0.3)) #RGB tuple(0,1)
# ax.plot(x, np.sin(x-5), c='darkred') # HTML 색상이름
# plt.grid()
# plt.show()
# 그래프 색 모양 지정 방식
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, x+2, linestyle='solid') # -
# ax.plot(x, x+2, linestyle='dashed') # --
# ax.plot(x, x+2, linestyle='dotted') # .
# ax.plot(x, x+2, linestyle='dashdot') # -.
# plt.grid()
# plt.show()
# # 그래프 축, 라벨, 타이틀 지정
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x), 'r', label='sin(x)')
# ax.plot(x, np.cos(x), 'b', label='cos(x)')
# plt.legend()
# plt.title('sin&cos function curve')
# plt.xlabel('x value') # x축 제목
# plt.ylabel('y value') # y축 제목
# plt.grid()
# plt.show()
# 타이틀 통합 지정 - axes
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x), 'r', label='sin(x)')
# ax.plot(x, np.cos(x), 'b', label='cos(x)')
# ax.set(xlim=(0,10),ylim=(-2,2),
# xlabel='x value',ylabel='y value',
# title='sin&cos function curve')
# ax.legend()
# # ax.set_title() 식으로 다 따로 쓸 수도 있음
# ax.grid()
# plt.show()
# matplotlib 한글 사용
import matplotlib as mpl
# ftpath = 'C:/Windows/Fonts/D2Coding-Ver1.3.2-20180524.ttf'
# fname = mpl.font_manager.FontProperties(fname=ftpath).get_name()
# mpl.rc('font',family=fname)
# mpl.rcParams['axes.unicode_minus'] = False
# mpl.rcParams['font.size'] = 20
#
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(x, np.sin(x), label='사인')
# ax.legend()
# plt.show()
# 시스템에 설치된 폰트정보 알아내기
import matplotlib.font_manager as fm
font_list = fm.findSystemFonts(fontpaths=None, fontext='ttf')
print(font_list[:10]) # ttf 폰트 목록 상위 10개 출력
for f in fm.fontManager.ttflist:
print(f.name) # matplotlib가 관리하는 폰트 목록
for f in fm.fontManager.ttflist:
if 'YTT' in f.fname:
print(f.name, f.fname) # ''가 포함된 폰트이름 출력
mpl.rcParams['font.family'] = 'Yj TEUNTEUN Bold'
mpl.rcParams['axes.unicode_minus'] = False
mpl.rcParams['font.size'] = 20
fig = plt.figure()
ax = plt.axes()
ax.plot(x, np.sin(x), label='사인')
ax.legend()
plt.axhline(y=0, c='b', linewidth=1)
# plt.axvline(y=0, c='b', linewidth=1)
plt.show() | [
"[email protected]"
]
| |
e5398d21f5c7efbfb78f9baabca55243340adfa6 | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/toolkit/fitting/tests/test_polynomial/test_linear_evaluate.py | f32c3f8d35bf59b46ec924a4f754f12ce4960ec4 | [
"BSD-3-Clause"
]
| permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from sofia_redux.toolkit.fitting.polynomial import linear_evaluate
@pytest.fixture
def data():
a = np.array([[3, 4], [5, 6.]])
b = np.array([7., 8])
out = np.array([3.5, 4.5])[None].T
return a, b, out
def test_disallow_errors(data):
result = linear_evaluate(*data, allow_errors=False)
assert np.allclose(result, 5.5)
a, b, out = data
# test datavec
result = linear_evaluate(a[None], b[None], out, allow_errors=False)
assert np.allclose(result, 5.5)
assert result.shape == (1, 1)
with pytest.raises(np.linalg.LinAlgError):
linear_evaluate(a[None] * 0, b[None], out, allow_errors=False)
def test_allow_errors(data):
result = linear_evaluate(*data, allow_errors=True)
assert np.allclose(result, 5.5)
a, b, out = data
# test datavec
result = linear_evaluate(a[None], b[None], out, allow_errors=True)
assert np.allclose(result, 5.5)
assert result.shape == (1, 1)
result = linear_evaluate(a[None] * 0, b[None], out, allow_errors=True)
assert np.isnan(result).all()
a2 = np.stack((a, a))
b2 = np.stack((b, b))
a2[0] *= 0
result = linear_evaluate(a2, b2, out, allow_errors=True)
assert np.allclose(result, [[np.nan], [5.5]], equal_nan=True)
# test no datavec with error
result = linear_evaluate(a * 0, b, out, allow_errors=True)
assert np.isnan(result).all()
| [
"[email protected]"
]
| |
f52d6c9450b0f7d1d43cf363370c7c348683e874 | b2e1f29524122ccc3ff1fd477b2ff99502a9daf8 | /games/spiders/ai.py | 2a44d213de2808a2fb30d5fcdd7469c1addc35da | [
"MIT"
]
| permissive | siggame/Joueur.py | c6b368d4e98b14652ae39640f50e94406696f473 | 02bb5788ed5d88d24eb21869a10c6e7292ee9767 | refs/heads/master | 2022-05-10T02:16:38.136295 | 2022-05-03T22:03:23 | 2022-05-03T22:03:23 | 31,439,144 | 4 | 40 | MIT | 2020-11-08T00:25:54 | 2015-02-27T21:06:17 | Python | UTF-8 | Python | false | false | 3,454 | py | # This is where you build your AI for the Spiders game.
from joueur.base_ai import BaseAI
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class AI(BaseAI):
""" The AI you add and improve code inside to play Spiders. """
@property
def game(self) -> 'games.spiders.game.Game':
"""games.spiders.game.Game: The reference to the Game instance this AI is playing.
"""
return self._game # don't directly touch this "private" variable pls
@property
def player(self) -> 'games.spiders.player.Player':
"""games.spiders.player.Player: The reference to the Player this AI controls in the Game.
"""
return self._player # don't directly touch this "private" variable pls
def get_name(self) -> str:
"""This is the name you send to the server so your AI will control the player named this string.
Returns:
str: The name of your Player.
"""
# <<-- Creer-Merge: get-name -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
return "Spiders Python Player" # REPLACE THIS WITH YOUR TEAM NAME
# <<-- /Creer-Merge: get-name -->>
def start(self) -> None:
"""This is called once the game starts and your AI knows its player and game. You can initialize your AI here.
"""
# <<-- Creer-Merge: start -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# replace with your start logic
# <<-- /Creer-Merge: start -->>
def game_updated(self) -> None:
"""This is called every time the game's state updates, so if you are tracking anything you can update it here.
"""
# <<-- Creer-Merge: game-updated -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# replace with your game updated logic
# <<-- /Creer-Merge: game-updated -->>
def end(self, won: bool, reason: str) -> None:
"""This is called when the game ends, you can clean up your data and dump files here if need be.
Args:
won (bool): True means you won, False means you lost.
reason (str): The human readable string explaining why your AI won or lost.
"""
# <<-- Creer-Merge: end -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# replace with your end logic
# <<-- /Creer-Merge: end -->>
def run_turn(self) -> bool:
"""This is called every time it is this AI.player's turn.
Returns:
bool: Represents if you want to end your turn. True means end your turn, False means to keep your turn going and re-call this function.
"""
# <<-- Creer-Merge: runTurn -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# Put your game logic here for runTurn
return True
# <<-- /Creer-Merge: runTurn -->>
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you need additional functions for your AI you can add them here
# <<-- /Creer-Merge: functions -->>
| [
"[email protected]"
]
| |
410a61537fa54d53fcf8a0c953c45f4eb9b659f9 | 5d23e3a9b39f49d93c65dde4cd17e5e2f786283b | /scholariumat/products/behaviours.py | ddabf3ef553e62d23a8371636c7374fdc475fc2d | [
"MIT"
]
| permissive | MerlinB/scholariumat | aa22b59e6ebd1cde2edf00604d9102fe96b60c8a | 7c4e2bfbf3556877a856101966b591a07b4f809f | refs/heads/master | 2021-06-27T21:34:28.816791 | 2018-12-01T19:42:16 | 2018-12-01T19:42:16 | 135,583,822 | 0 | 0 | null | 2018-05-31T12:57:03 | 2018-05-31T12:57:03 | null | UTF-8 | Python | false | false | 3,918 | py | import logging
from django.db import models
from django.conf import settings
from django_extensions.db.models import TimeStampedModel, TitleSlugDescriptionModel
from framework.behaviours import PermalinkAble
logger = logging.getLogger(__name__)
class ProductBase(TitleSlugDescriptionModel, TimeStampedModel, PermalinkAble):
"""Abstract parent class for all product type classes."""
product = models.OneToOneField('products.Product', on_delete=models.CASCADE, null=True, editable=False)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.product:
from .models import Product
self.product = Product.objects.create()
super().save(*args, **kwargs)
def delete(self, *args, **kwargs): # TODO: Gets ignored in bulk delete. pre_delete signal better?
self.product.delete()
super().delete(*args, **kwargs)
class Meta:
abstract = True
class AttachmentBase(models.Model):
"""Base class to create downloadable item attachment classes."""
type = models.ForeignKey('products.AttachmentType', on_delete=models.PROTECT)
item = models.ForeignKey('products.Item', on_delete=models.CASCADE)
def get(self):
pass
def __str__(self):
item_type = self.item.type.__str__()
type = self.type.__str__()
return f'{item_type}: {type}' if item_type != type else type
class Meta:
abstract = True
class BalanceMixin(models.Model):
"""Profile mixin for storing and managing user balance"""
balance = models.SmallIntegerField('Guthaben', default=0)
def spend(self, amount):
"""Given an amount, tries to spend from current balance."""
new_balance = self.balance - amount
if new_balance >= 0:
self.balance = new_balance
self.save()
return True
else:
logger.debug('{} tried to spend {} but only owns {}'.format(self, amount, self.balance))
return False
def refill(self, amount):
"""Refills balance."""
self.balance += amount
self.save()
class Meta:
abstract = True
class CartMixin(models.Model):
"""Profile mixin for storing and processing items in a shopping cart"""
@property
def cart(self):
return self.purchase_set.filter(executed=False).order_by('-created')
@property
def cart_shipping(self):
"""Returns shipping costs of cart."""
return settings.SHIPPING if any([purchase.item.type.shipping for purchase in self.cart]) else 0
@property
def cart_total(self):
"""Sums prices and adds shiping costs"""
return sum([purchase.total for purchase in self.cart]) + self.cart_shipping
@property
def cart_available(self):
return all([purchase.available for purchase in self.cart])
def clean_cart(self):
for purchase in self.purchase_set.filter(executed=False):
if not purchase.available:
purchase.delete()
def execute_cart(self):
if self.balance >= self.cart_total:
for purchase in self.cart:
purchase.execute()
return True
@property
def purchases(self):
return self.purchase_set.filter(executed=True).order_by('-modified')
@property
def items_bought(self):
from .models import Item
return Item.objects.filter(purchase__in=self.purchases).distinct()
@property
def products_bought(self):
from .models import Product
return Product.objects.filter(item__in=self.items_bought).distinct()
@property
def orders(self):
return self.purchases.filter(amount__isnull=False)
@property
def events_booked(self):
return self.purchases.filter(item__type__slug__in=['livestream', 'teilnahme'])
class Meta:
abstract = True
| [
"[email protected]"
]
| |
f77a2bbf6ec372015fb299a16ac565a2c225233e | fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4 | /everydays/BookBeingRead/python高级编程/day3.2.py | 4965680592d853a9dd4d1fe9cb00722b7e7f8b18 | []
| no_license | jake20001/Hello | be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605 | 08217871bb17152eb09e68cd154937ebe5d59d2c | refs/heads/master | 2021-07-10T09:48:15.883716 | 2021-04-23T14:49:03 | 2021-04-23T14:49:03 | 56,282,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | # -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/12/3 16:02
# FileName : day3.2
# Description :
# --------------------------------
class User(object):
def __init__(self,roles):
self.roles = roles
class Unauthorized(Exception):
pass
def inroles(irole,roles):
for role in irole:
if role in roles:
return True
return False
def protect(irole):
def _protect(function):
def __protect(*args,**kwargs):
user = globals().get('user')
if user is None or not inroles(irole,user.roles):
raise Unauthorized("I won't tell you")
return function(*args,**kwargs)
return __protect
return _protect
def protect2(role):
def _protect(function):
def __protect(*args,**kwargs):
user = globals().get('user')
if user is None or role not in user.roles:
raise Unauthorized("I won't tell you")
return function(*args,**kwargs)
return __protect
return _protect
tarek = User(('admin','user'))
bill = User(('user',))
visit = User(('visit',))
class MySecrets(object):
@protect(['admin','user'])
def waffle_recipe(self):
print('use tons of butter')
these_are = MySecrets()
user = tarek
these_are.waffle_recipe()
user = bill
these_are.waffle_recipe()
user = visit
these_are.waffle_recipe() | [
"[email protected]"
]
| |
f6e12755ec63191bca0bd30de980e0252d7aa5c0 | 34e3eaae20e90851331f591d01c30fa0b9e764ef | /tools/tensorflow_docs/api_generator/report/linter.py | 75d0fee81dc277881ec98d195ef944f01319766a | [
"Apache-2.0"
]
| permissive | nherlianto/docs | 9860eeda0fe779709c455b2636c7337282a35d72 | be8ac03d73a77d3b71ba2b42849d2169c1d7d100 | refs/heads/master | 2023-01-04T01:12:01.782815 | 2020-10-29T17:09:04 | 2020-10-29T17:09:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,474 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lints the docstring of an API symbol."""
import ast
import inspect
import re
import textwrap
from typing import Optional, Any, List, Tuple
import astor
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator.report.schema import api_report_generated_pb2 as api_report_pb2
def _get_source(py_object: Any) -> Optional[str]:
if py_object is not None:
try:
source = textwrap.dedent(inspect.getsource(py_object))
return source
except Exception: # pylint: disable=broad-except
return None
return None
def _count_empty_param(items: List[Tuple[str, str]]) -> int:
count = 0
for item in items:
if not item[1].strip():
count += 1
return count
def lint_params(page_info: parser.PageInfo) -> api_report_pb2.ParameterLint:
"""Lints the parameters of a docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `DescriptionLint` proto object.
"""
param_lint = api_report_pb2.ParameterLint()
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
if part.title.lower().startswith('args'):
param_lint.total_args_param = len(part.items)
param_lint.num_empty_param_desc_args = _count_empty_param(part.items)
if part.title.lower().startswith('attr'):
param_lint.total_attr_param = len(part.items)
param_lint.num_empty_param_desc_attr = _count_empty_param(part.items)
return param_lint
def lint_description(
page_info: parser.PageInfo) -> api_report_pb2.DescriptionLint:
"""Lints the description of a docstring.
If a field in the proto is assigned 0, then it means that that field doesn't
exist.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `DescriptionLint` proto object.
"""
len_brief = 0
if page_info.doc.brief:
len_brief = len(page_info.doc.brief.split())
len_long_desc = 0
for part in page_info.doc.docstring_parts:
if not isinstance(part, parser.TitleBlock):
len_long_desc += len(part.split())
return api_report_pb2.DescriptionLint(
len_brief=len_brief, len_long_desc=len_long_desc)
_EXAMPLE_RE = re.compile(
r"""
(?P<indent>\ *)(?P<content>```.*?\n\s*?```)
""", re.VERBOSE | re.DOTALL)
def lint_usage_example(
page_info: parser.PageInfo) -> api_report_pb2.UsageExampleLint:
"""Counts the number of doctests and untested examples in a docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `UsageExampleLint` proto object.
"""
description = []
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
description.append(str(part))
else:
description.append(part)
desc_str = ''.join(description)
num_doctest = 0
num_untested_examples = 0
# The doctests are wrapped in backticks (```).
for match in _EXAMPLE_RE.finditer(desc_str):
if '>>>' in match.groupdict()['content']:
num_doctest += 1
else:
num_untested_examples += 1
return api_report_pb2.UsageExampleLint(
num_doctest=num_doctest, num_untested_examples=num_untested_examples)
class ReturnVisitor(ast.NodeVisitor):
"""Visits the Returns node in an AST."""
def __init__(self) -> None:
self.total_returns = []
def visit_Return(self, node) -> None: # pylint: disable=invalid-name
if node.value is None:
self.total_returns.append('None')
else:
self.total_returns.append(astor.to_source(node.value))
def lint_returns(
page_info: parser.PageInfo) -> Optional[api_report_pb2.ReturnLint]:
""""Lints the returns block in the docstring.
This linter only checks if a `Returns` block exists in the docstring
if it finds `return` keyword in the source code.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `ReturnLint` proto object.
"""
source = _get_source(page_info.py_object)
return_visitor = ReturnVisitor()
if source is not None:
try:
return_visitor.visit(ast.parse(source))
except Exception: # pylint: disable=broad-except
pass
if source is not None and 'return' in source:
for item in page_info.doc.docstring_parts:
if isinstance(item, parser.TitleBlock):
if item.title.lower().startswith('return'):
return api_report_pb2.ReturnLint(returns_defined=True)
# If "Returns" word is present in the brief docstring then having a separate
# `Returns` section is not needed.
if 'return' in page_info.doc.brief.lower():
return api_report_pb2.ReturnLint(returns_defined=True)
# If the code only returns None then `Returns` section in the docstring is
# not required.
if all(return_val == 'None' for return_val in return_visitor.total_returns):
return None
return api_report_pb2.ReturnLint(returns_defined=False)
return None
class RaiseVisitor(ast.NodeVisitor):
"""Visits the Raises node in an AST."""
def __init__(self) -> None:
self.total_raises = []
def visit_Raise(self, node) -> None: # pylint: disable=invalid-name
# This `if` block means that there is a bare raise in the code.
if node.exc is None:
return
self.total_raises.append(astor.to_source(node.exc.func).strip())
def lint_raises(page_info: parser.PageInfo) -> api_report_pb2.RaisesLint:
"""Lints the raises block in the docstring.
The total raises in code are extracted via an AST and compared against those
extracted from the docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `RaisesLint` proto object.
"""
raises_lint = api_report_pb2.RaisesLint()
# Extract the raises from the source code.
raise_visitor = RaiseVisitor()
source = _get_source(page_info.py_object)
if source is not None:
try:
raise_visitor.visit(ast.parse(source))
except Exception: # pylint: disable=broad-except
pass
raises_lint.total_raises_in_code = len(raise_visitor.total_raises)
# Extract the raises defined in the docstring.
raises_defined_in_doc = []
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
if part.title.lower().startswith('raises'):
raises_lint.num_raises_defined = len(part.items)
if part.items:
raises_defined_in_doc.extend(list(zip(*part.items))[0])
break
else:
raises_lint.num_raises_defined = 0
return raises_lint
| [
"[email protected]"
]
| |
ce8b66aa9979fcf3c606a1dbef6946caea8ede6c | 213ded9295d50b8133ae31ad2682240338406b60 | /while.py | f6ad2f212463b5f02ce32ba506038c9fb7876caf | []
| no_license | Rayhun/python-learning-repository | 16b15f87089c02cb7ff2a109fe17301618c05794 | 45d72da422554f66a4673e105ee7d066264efd7b | refs/heads/master | 2022-12-13T16:27:15.708590 | 2020-09-18T19:13:12 | 2020-09-18T19:13:12 | 294,159,873 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | ''' while loop '''
# i = 1 # loop starting value
# summation = 0 # 15, 5
# while i < 5:
# summation += i
# i = i + 1 # in, dec
# print(summation)
st_num = 0
# while st_num < 5:
# print("bd cse solved")
# st_num = st_num + 1
# while st_num < 5:
# st_num = st_num + 1
# if st_num == 4:
# continue
# print("rayhan" ,st_num)
result = 0 #res 3
num = 3 #num 4
for i in range(2):
result = result + num
num = num + 1
print(num) | [
"[email protected]"
]
| |
d2ddfb5a4d5f689f2730abb4fadaa996d34416e4 | 3fa4a77e75738d00835dcca1c47d4b99d371b2d8 | /backend/pyrogram/raw/functions/channels/delete_history.py | 88e0739de6c429f863509cd3616f2468ef250217 | [
"Apache-2.0"
]
| permissive | appheap/social-media-analyzer | 1711f415fcd094bff94ac4f009a7a8546f53196f | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | refs/heads/master | 2023-06-24T02:13:45.150791 | 2021-07-22T07:32:40 | 2021-07-22T07:32:40 | 287,000,778 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class DeleteHistory(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``123``
- ID: ``0xaf369d42``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
max_id: ``int`` ``32-bit``
Returns:
``bool``
"""
__slots__: List[str] = ["channel", "max_id"]
ID = 0xaf369d42
QUALNAME = "functions.channels.DeleteHistory"
def __init__(self, *, channel: "raw.base.InputChannel", max_id: int) -> None:
self.channel = channel # InputChannel
self.max_id = max_id # int
@staticmethod
def read(data: BytesIO, *args: Any) -> "DeleteHistory":
# No flags
channel = TLObject.read(data)
max_id = Int.read(data)
return DeleteHistory(channel=channel, max_id=max_id)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(self.channel.write())
data.write(Int(self.max_id))
return data.getvalue()
| [
"[email protected]"
]
| |
514a5ddb17a13c84b2e606804ea6b986afb8f6b8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/305/usersdata/281/69192/submittedfiles/formula.py | 5f040c2823eaf58bd6a6efc57b12488619d593ba | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | # -*- coding: utf-8 -*-
print('Digite tres números')
p=float(input('1° número:'))
print(p)
i=float(input('2° número:'))
print(i)
n=float(input('3° número:'))
print(n)
v=float(input(p*(((1+i)**n)-1)/i))
print(v%2f)
| [
"[email protected]"
]
| |
b09b228f492342f49e0bc8e90b2c5a1e1527fb2d | 5cbb0d3f89450fd1ef4b1fddbbb1ef1a8fb7fd16 | /tdd/code/show_cov.py | 93195abf586d237dffc5acbe393b03909c09dc41 | []
| no_license | AndreaCrotti/ep2013 | e259eb6223faf74bb39324c9aa1101f439cf09a2 | 964b20ed9a331d0d59fe538ba47d7e0bc96ebc2b | refs/heads/master | 2016-09-06T19:35:51.876432 | 2013-10-22T10:43:27 | 2013-10-22T10:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import unittest
def smart_division(a, b):
"""Run a 'smart' division
"""
if b == 0:
raise Exception("Can not divide by 0")
res = a / b
back_res = res * b
if back_res != a:
return a / float(b)
else:
return res
class TestSmartDivision(unittest.TestCase):
pass
# def test_division_by_0(self):
# """Test that dividing by 0 raises an exception
# """
# with self.assertRaises(Exception):
# smart_division(10, 0)
# def test_float_division(self):
# """Check that the float division returns a correct result (with approximation)
# """
# self.assertAlmostEqual(smart_division(2, 3), 0.66, places=1)
# def test_int_division(self):
# self.assertEqual(smart_division(1, 1), 1)
# self.assertEqual(smart_division(10, 2), 5)
if __name__ == '__main__':
unittest.main()
# Use "nosetests-2.7 show_cov.py -sv --with-cov --cov-report=html"
# and open htmlcov/index.html to see the html report
| [
"[email protected]"
]
| |
d941bcaa3bac8031b9b9e6017e3c41b7d18d1beb | c3cd0262e200926cf0f9e37091557da85e0de85e | /py/ch02/src/vm/__init__.py | 55c98feca717c7b8dce711c34303f2cc8f3897a7 | []
| no_license | luningcowboy/writelua | b860bc4cc701f9a96baf3d40a1995fe1cbef6523 | 323cdb99043ab0f9f7ec1c6650f23bf94117df8c | refs/heads/master | 2022-12-18T21:48:45.821121 | 2020-09-25T08:19:22 | 2020-09-25T08:19:22 | 292,487,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: luning
# Created Time : 一 9/14 13:34:42 2020
# File Name: src/vm/__init__.py
# Description:
"""
| [
"[email protected]"
]
| |
b255e83582ef7fd606ae36ae4b594bf82c58a6bc | 462137348c3013fd1f389ae23557425d22497b36 | /24_days/Day 13/Question47.py | fd0d75f8edc99d6668feb2b0a2443d0585def719 | []
| no_license | wmemon/python_playground | f05b70c50c2889acd6353ba199fd725b75f48bb1 | 3b424388f92f81d82621645ee7fdbd4ac164da79 | refs/heads/master | 2022-11-23T05:09:30.828726 | 2020-07-28T12:47:03 | 2020-07-28T12:47:03 | 283,192,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | """
Define a class named Circle which can be constructed by a radius.
The Circle class has a method which can compute the area.
"""
class Circle():
def __init__(self, radius):
self.radius = float(radius)
def get_area(self):
return float(3.14*self.radius ** 2)
c1 = Circle('wasim')
print(c1.get_area())
| [
"[email protected]"
]
| |
4d962f5ac69eb471a8b246a29da9df3570e14bfc | 04c9243ddf81011fe980ffffd016f1770444b503 | /send-mail/send_mail.py | 96ac21e95fb2f6b98408e32f51eb0a07e97d9623 | []
| no_license | wcl6005/Linux-Automated-Scripts | 9880ed68fbcfdb9440b20186e784367a8dcc0a69 | 359e1673b47174c8b3edf9f0c57a4f9a45343481 | refs/heads/master | 2022-11-14T20:54:41.469457 | 2020-07-10T09:02:50 | 2020-07-10T09:02:50 | 278,578,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | # -*- coding: UTF-8 -*-
import smtplib
from email.mime.text import MIMEText
def send_mail(email_user, email_pwd, recv, title, content, mail_host='smtp.163.com', port=25):
'''
发送邮件函数
username: 邮箱账号,发送者账号 [email protected]
passwd: 邮箱授权码(不是邮箱的登录密码,是邮箱授权码,参考:https://jingyan.baidu.com/article/c275f6ba33a95de33d7567d9.html)
recv: 邮箱接收人地址,多个账号以逗号隔开
title: 邮件标题
content: 邮件内容
mail_host: 邮箱服务器,163邮箱host: smtp.163.com
port: 邮箱端口号,163邮箱的默认端口是 25
测试:
$ python send_mail.py
python3.7.5 测试通过
'''
msg = MIMEText(content) # 邮件内容
msg['Subject'] = title # 邮件主题
msg['From'] = email_user # 发送者账号
msg['To'] = recv # 接收者账号列表
smtp = smtplib.SMTP(mail_host, port=port) # 连接邮箱,传入邮箱地址,和端口号,smtp的端口号是25
smtp.login(email_user, email_pwd) # 登录发送者的邮箱账号,密码
# 参数分别是 发送者,接收者,第三个是把上面的发送邮件的 内容变成字符串
smtp.sendmail(email_user, recv, msg.as_string())
smtp.quit() # 发送完毕后退出smtp
if __name__ == '__main__':
import time
email_user = '[email protected]' # 发送者账号
email_pwd = 'wcl6005' # 发送者授权码
maillist = '[email protected]' # 邮箱接收人地址
title = '邮件标题'
content = '邮件内容: 从%s发来的邮件。%s'%(email_user, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())))
send_mail(email_user, email_pwd, maillist, title, content)
print('OK! Email send success.')
| [
"[email protected]"
]
| |
009c46ffdc0c2959f6a1fda0333f12363dd53a6c | 1916dc66aa9710d9a7d4fab42c28c1c49e2f630c | /app_owner/migrations/0003_auto_20210316_1712.py | a588828181da6c46475f4d327bd773d18d5ef637 | []
| no_license | robert8888/portfolio | 8ce6c1be774666a58ee44379621c6a4515f64d37 | a640b1a62810e4a5dd4f7e20fefc86dc3ca7851f | refs/heads/master | 2023-06-03T23:28:53.888863 | 2021-06-25T13:11:28 | 2021-06-25T13:11:28 | 333,259,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Generated by Django 3.1.5 on 2021-03-16 16:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_owner', '0002_auto_20210316_1702'),
]
operations = [
migrations.AlterField(
model_name='contactimage',
name='contact',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app_owner.contact'),
),
]
| [
"[email protected]"
]
| |
d9ffaffb2494e34a6811abee8d013faca8719020 | 5e4a25bce2f60ee87f580b64f62697ad9bbbaced | /0082-Remove-Duplicates-from-Sorted-List-II/solution.py | e7986f3383d5e0979f48e908d21137e835714085 | []
| no_license | Glitch95/InterviewPrep | e729b88826a257699da9ea07493d705490da7373 | b3a27d34518d77e1e4896af061f8581885ccfed0 | refs/heads/master | 2020-08-02T12:19:58.783302 | 2019-10-09T16:47:34 | 2019-10-09T16:47:34 | 211,348,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head):
# Let's we a 2 pointer techinque to keep track of the start and end
# of each distinct region.
# We will also use a dummy head to nicely handle edge cases.
# A region being a section of the list with all nodes having the same value.
# Let's try to visualize this
# | C | D | 1 | 2 | 3 | 3 | 4 | 4 | 5 | c != n
# p c n
# | C | D | 1 | 2 | 3 | 3 | 4 | 4 | 5 | c != n
# p c n
# | C | D | 1 | 2 | 3 | 3 | 4 | 4 | 5 | c != n
# p c n
# | C | D | 1 | 2 | 3 | 3 | 4 | 4 | 5 | c == n
# p c n
# | C | D | 1 | 2 | 3 | 3 | 4 | 4 | 5 | c != n and c.next != n
# p c n
# | C | D | 1 | 2 | 4 | 4 | 5 | p.next = n, c = p.next, n = c.next
# p c n
# | C | D | 1 | 2 | 4 | 4 | 5 | c == n
# p c n
# | C | D | 1 | 2 | 4 | 4 | 5 | c != n and c.next != n
# p c n
# | C | D | 1 | 2 | 4 | 4 | 5 | p.next = n
# p c n
# | C | D | 1 | 2 | 5 |
# p
n1, n2 = ListNode(None), ListNode(None)
n1.next, n2.next = n2, head
prev = n1
while prev.next and prev.next.next:
curr, next = prev.next, prev.next.next
while curr and next and curr.val == next.val:
next = next.next
if curr.next is not next:
prev.next = next
else:
prev = prev.next
return n1.next.next
| [
"[email protected]"
]
| |
4b1da38a8480fbb833641d31f5880d580bc9abed | c3ff891e0e23c5f9488508d30349259cc6b64b4d | /python练习/老王开枪/Demo37.py | 277cd136e886303cb3e3572d5b36cebc8f598013 | []
| no_license | JacksonMike/python_exercise | 2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53 | 7698f8ce260439abb3cbdf478586fa1888791a61 | refs/heads/master | 2020-07-14T18:16:39.265372 | 2019-08-30T11:56:29 | 2019-08-30T11:56:29 | 205,370,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | import time as tt
tt.sleep(4)
#main.py一般为程序最核心的py | [
"[email protected]"
]
| |
193164d2c30cf5429adfce910325ef2fb3d82ef2 | 42c63d5f9c724c99ba93f77bdead51891fcf8623 | /OpenStack-Mitaka-src/cinder/cinder/volume/drivers/emc/xtremio.py | d85b8763d22fc01cf8c9085eabb8ca52e7a6a366 | [
"Apache-2.0"
]
| permissive | liyongle/openstack-mitaka | 115ae819d42ed9bf0922a8c0ab584fa99a3daf92 | 5ccd31c6c3b9aa68b9db1bdafcf1b029e8e37b33 | refs/heads/master | 2021-07-13T04:57:53.488114 | 2019-03-07T13:26:25 | 2019-03-07T13:26:25 | 174,311,782 | 0 | 1 | null | 2020-07-24T01:44:47 | 2019-03-07T09:18:55 | Python | UTF-8 | Python | false | false | 41,939 | py | # Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for EMC XtremIO Storage.
supported XtremIO version 2.4 and up
1.0.0 - initial release
1.0.1 - enable volume extend
1.0.2 - added FC support, improved error handling
1.0.3 - update logging level, add translation
1.0.4 - support for FC zones
1.0.5 - add support for XtremIO 4.0
1.0.6 - add support for iSCSI multipath, CA validation, consistency groups,
R/O snapshots, CHAP discovery authentication
1.0.7 - cache glance images on the array
"""
import json
import math
import random
import requests
import string
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_PROVISIONING_FACTOR = 20.0
XTREMIO_OPTS = [
cfg.StrOpt('xtremio_cluster_name',
default='',
help='XMS cluster id in multi-cluster environment'),
cfg.IntOpt('xtremio_array_busy_retry_count',
default=5,
help='Number of retries in case array is busy'),
cfg.IntOpt('xtremio_array_busy_retry_interval',
default=5,
help='Interval between retries in case array is busy'),
cfg.IntOpt('xtremio_volumes_per_glance_cache',
default=100,
help='Number of volumes created from each cached glance image')]
CONF.register_opts(XTREMIO_OPTS)
RANDOM = random.Random()
OBJ_NOT_FOUND_ERR = 'obj_not_found'
VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique'
VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found'
ALREADY_MAPPED_ERR = 'already_mapped'
SYSTEM_BUSY = 'system_is_busy'
TOO_MANY_OBJECTS = 'too_many_objs'
TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol'
XTREMIO_OID_NAME = 1
XTREMIO_OID_INDEX = 2
class XtremIOClient(object):
def __init__(self, configuration, cluster_id):
self.configuration = configuration
self.cluster_id = cluster_id
self.verify = (self.configuration.
safe_get('driver_ssl_cert_verify') or False)
if self.verify:
verify_path = (self.configuration.
safe_get('driver_ssl_cert_path') or None)
if verify_path:
self.verify = verify_path
def get_base_url(self, ver):
if ver == 'v1':
return 'https://%s/api/json/types' % self.configuration.san_ip
elif ver == 'v2':
return 'https://%s/api/json/v2/types' % self.configuration.san_ip
@utils.retry(exception.XtremIOArrayBusy,
CONF.xtremio_array_busy_retry_count,
CONF.xtremio_array_busy_retry_interval, 1)
def req(self, object_type='volumes', method='GET', data=None,
name=None, idx=None, ver='v1'):
if not data:
data = {}
if name and idx:
msg = _("can't handle both name and index in req")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
url = '%s/%s' % (self.get_base_url(ver), object_type)
params = {}
key = None
if name:
params['name'] = name
key = name
elif idx:
url = '%s/%d' % (url, idx)
key = str(idx)
if method in ('GET', 'DELETE'):
params.update(data)
self.update_url(params, self.cluster_id)
if method != 'GET':
self.update_data(data, self.cluster_id)
LOG.debug('data: %s', data)
LOG.debug('%(type)s %(url)s', {'type': method, 'url': url})
try:
response = requests.request(method, url, params=params,
data=json.dumps(data),
verify=self.verify,
auth=(self.configuration.san_login,
self.configuration.san_password))
except requests.exceptions.RequestException as exc:
msg = (_('Exception: %s') % six.text_type(exc))
raise exception.VolumeDriverException(message=msg)
if 200 <= response.status_code < 300:
if method in ('GET', 'POST'):
return response.json()
else:
return ''
self.handle_errors(response, key, object_type)
def handle_errors(self, response, key, object_type):
if response.status_code == 400:
error = response.json()
err_msg = error.get('message')
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
LOG.warning(_LW("object %(key)s of "
"type %(typ)s not found, %(err_msg)s"),
{'key': key, 'typ': object_type,
'err_msg': err_msg, })
raise exception.NotFound()
elif err_msg == VOL_NOT_UNIQUE_ERR:
LOG.error(_LE("can't create 2 volumes with the same name, %s"),
err_msg)
msg = (_('Volume by this name already exists'))
raise exception.VolumeBackendAPIException(data=msg)
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"),
{'key': key, 'msg': err_msg, })
raise exception.VolumeNotFound(volume_id=key)
elif ALREADY_MAPPED_ERR in err_msg:
raise exception.XtremIOAlreadyMappedError()
elif err_msg == SYSTEM_BUSY:
raise exception.XtremIOArrayBusy()
elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL):
raise exception.XtremIOSnapshotsLimitExceeded()
msg = _('Bad response from XMS, %s') % response.text
LOG.error(msg)
raise exception.VolumeBackendAPIException(message=msg)
def update_url(self, data, cluster_id):
return
def update_data(self, data, cluster_id):
return
def get_cluster(self):
return self.req('clusters', idx=1)['content']
def create_snapshot(self, src, dest, ro=False):
"""Create a snapshot of a volume on the array.
XtreamIO array snapshots are also volumes.
:src: name of the source volume to be cloned
:dest: name for the new snapshot
:ro: new snapshot type ro/regular. only applicable to Client4
"""
raise NotImplementedError()
def get_extra_capabilities(self):
return {}
def get_initiator(self, port_address):
raise NotImplementedError()
def add_vol_to_cg(self, vol_id, cg_id):
pass
class XtremIOClient3(XtremIOClient):
def __init__(self, configuration, cluster_id):
super(XtremIOClient3, self).__init__(configuration, cluster_id)
self._portals = []
def find_lunmap(self, ig_name, vol_name):
try:
lun_mappings = self.req('lun-maps')['lun-maps']
except exception.NotFound:
raise (exception.VolumeDriverException
(_("can't find lun-map, ig:%(ig)s vol:%(vol)s") %
{'ig': ig_name, 'vol': vol_name}))
for lm_link in lun_mappings:
idx = lm_link['href'].split('/')[-1]
# NOTE(geguileo): There can be races so mapped elements retrieved
# in the listing may no longer exist.
try:
lm = self.req('lun-maps', idx=int(idx))['content']
except exception.NotFound:
continue
if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name:
return lm
return None
def num_of_mapped_volumes(self, initiator):
cnt = 0
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
# NOTE(geguileo): There can be races so mapped elements retrieved
# in the listing may no longer exist.
try:
lm = self.req('lun-maps', idx=int(idx))['content']
except exception.NotFound:
continue
if lm['ig-name'] == initiator:
cnt += 1
return cnt
def get_iscsi_portals(self):
if self._portals:
return self._portals
iscsi_portals = [t['name'] for t in self.req('iscsi-portals')
['iscsi-portals']]
for portal_name in iscsi_portals:
try:
self._portals.append(self.req('iscsi-portals',
name=portal_name)['content'])
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("iscsi portal, %s, not found") % portal_name))
return self._portals
def create_snapshot(self, src, dest, ro=False):
data = {'snap-vol-name': dest, 'ancestor-vol-id': src}
self.req('snapshots', 'POST', data)
def get_initiator(self, port_address):
try:
return self.req('initiators', 'GET', name=port_address)['content']
except exception.NotFound:
pass
class XtremIOClient4(XtremIOClient):
def __init__(self, configuration, cluster_id):
super(XtremIOClient4, self).__init__(configuration, cluster_id)
self._cluster_name = None
def req(self, object_type='volumes', method='GET', data=None,
name=None, idx=None, ver='v2'):
return super(XtremIOClient4, self).req(object_type, method, data,
name, idx, ver)
def get_extra_capabilities(self):
return {'consistencygroup_support': True}
def find_lunmap(self, ig_name, vol_name):
try:
return (self.req('lun-maps',
data={'full': 1,
'filter': ['vol-name:eq:%s' % vol_name,
'ig-name:eq:%s' % ig_name]})
['lun-maps'][0])
except (KeyError, IndexError):
raise exception.VolumeNotFound(volume_id=vol_name)
def num_of_mapped_volumes(self, initiator):
return len(self.req('lun-maps',
data={'filter': 'ig-name:eq:%s' % initiator})
['lun-maps'])
def update_url(self, data, cluster_id):
if cluster_id:
data['cluster-name'] = cluster_id
def update_data(self, data, cluster_id):
if cluster_id:
data['cluster-id'] = cluster_id
def get_iscsi_portals(self):
return self.req('iscsi-portals',
data={'full': 1})['iscsi-portals']
def get_cluster(self):
if not self.cluster_id:
self.cluster_id = self.req('clusters')['clusters'][0]['name']
return self.req('clusters', name=self.cluster_id)['content']
def create_snapshot(self, src, dest, ro=False):
data = {'snapshot-set-name': dest, 'snap-suffix': dest,
'volume-list': [src],
'snapshot-type': 'readonly' if ro else 'regular'}
res = self.req('snapshots', 'POST', data, ver='v2')
typ, idx = res['links'][0]['href'].split('/')[-2:]
# rename the snapshot
data = {'name': dest}
try:
self.req(typ, 'PUT', data, idx=int(idx))
except exception.VolumeBackendAPIException:
# reverting
msg = _LE('Failed to rename the created snapshot, reverting.')
LOG.error(msg)
self.req(typ, 'DELETE', idx=int(idx))
raise
def add_vol_to_cg(self, vol_id, cg_id):
add_data = {'vol-id': vol_id, 'cg-id': cg_id}
self.req('consistency-group-volumes', 'POST', add_data, ver='v2')
def get_initiator(self, port_address):
inits = self.req('initiators',
data={'filter': 'port-address:eq:' + port_address,
'full': 1})['initiators']
if len(inits) == 1:
return inits[0]
else:
pass
class XtremIOVolumeDriver(san.SanDriver):
"""Executes commands relating to Volumes."""
VERSION = '1.0.7'
driver_name = 'XtremIO'
MIN_XMS_VERSION = [3, 0, 0]
def __init__(self, *args, **kwargs):
super(XtremIOVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XTREMIO_OPTS)
self.protocol = None
self.backend_name = (self.configuration.safe_get('volume_backend_name')
or self.driver_name)
self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name')
or '')
self.provisioning_factor = (self.configuration.
safe_get('max_over_subscription_ratio')
or DEFAULT_PROVISIONING_FACTOR)
self._stats = {}
self.client = XtremIOClient3(self.configuration, self.cluster_id)
def _obj_from_result(self, res):
typ, idx = res['links'][0]['href'].split('/')[-2:]
return self.client.req(typ, idx=int(idx))['content']
def check_for_setup_error(self):
try:
name = self.client.req('clusters')['clusters'][0]['name']
cluster = self.client.req('clusters', name=name)['content']
version_text = cluster['sys-sw-version']
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise (exception.VolumeBackendAPIException
(data=msg))
ver = [int(n) for n in version_text.split('-')[0].split('.')]
if ver < self.MIN_XMS_VERSION:
msg = (_('Invalid XtremIO version %(cur)s,'
' version %(min)s or up is required') %
{'min': self.MIN_XMS_VERSION,
'cur': ver})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('XtremIO SW version %s'), version_text)
if ver[0] >= 4:
self.client = XtremIOClient4(self.configuration, self.cluster_id)
def create_volume(self, volume):
"Creates a volume"
data = {'vol-name': volume['id'],
'vol-size': str(volume['size']) + 'g'
}
self.client.req('volumes', 'POST', data)
if volume.get('consistencygroup_id'):
self.client.add_vol_to_cg(volume['id'],
volume['consistencygroup_id'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
if snapshot.get('cgsnapshot_id'):
# get array snapshot id from CG snapshot
snap_by_anc = self._get_snapset_ancestors(snapshot.cgsnapshot)
snapshot_id = snap_by_anc[snapshot['volume_id']]
else:
snapshot_id = snapshot['id']
self.client.create_snapshot(snapshot_id, volume['id'])
# add new volume to consistency group
if (volume.get('consistencygroup_id') and
self.client is XtremIOClient4):
self.client.add_vol_to_cg(volume['id'],
snapshot['consistencygroup_id'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol = self.client.req('volumes', name=src_vref['id'])['content']
ctxt = context.get_admin_context()
cache = self.db.image_volume_cache_get_by_volume_id(ctxt,
src_vref['id'])
limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache')
if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']:
raise exception.CinderException('Exceeded the configured limit of '
'%d snapshots per volume' % limit)
try:
self.client.create_snapshot(src_vref['id'], volume['id'])
except exception.XtremIOSnapshotsLimitExceeded as e:
raise exception.CinderException(e.message)
if volume.get('consistencygroup_id') and self.client is XtremIOClient4:
self.client.add_vol_to_cg(volume['id'],
volume['consistencygroup_id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
self.client.req('volumes', 'DELETE', name=volume['id'])
except exception.NotFound:
LOG.info(_LI("volume %s doesn't exist"), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.client.create_snapshot(snapshot.volume_id, snapshot.id, True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
self.client.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
def _update_volume_stats(self):
sys = self.client.get_cluster()
physical_space = int(sys["ud-ssd-space"]) / units.Mi
used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi
free_physical = physical_space - used_physical_space
actual_prov = int(sys["vol-size"]) / units.Mi
self._stats = {'volume_backend_name': self.backend_name,
'vendor_name': 'EMC',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'total_capacity_gb': physical_space,
'free_capacity_gb': (free_physical *
self.provisioning_factor),
'provisioned_capacity_gb': actual_prov,
'max_over_subscription_ratio': self.provisioning_factor,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False,
'multiattach': True,
}
self._stats.update(self.client.get_extra_capabilities())
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV."""
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# Attempt to rename the LV to match the OpenStack internal name.
self.client.req('volumes', 'PUT', data={'vol-name': volume['id']},
idx=vol_obj['index'])
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing."""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
lv_size = int(math.ceil(int(vol_obj['vol-size']) / units.Mi))
return lv_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# trying to rename the volume to [cinder name]-unmanged
try:
self.client.req('volumes', 'PUT', name=volume['id'],
data={'vol-name': volume['name'] + '-unmanged'})
except exception.NotFound:
LOG.info(_LI("Volume with the name %s wasn't found,"
" can't unmanage"),
volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
data = {'vol-size': six.text_type(new_size) + 'g'}
try:
self.client.req('volumes', 'PUT', data, name=volume['id'])
except exception.NotFound:
msg = _("can't find the volume to extend")
raise exception.VolumeDriverException(message=msg)
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
tg = self.client.req('target-groups', name='Default')['content']
vol = self.client.req('volumes', name=volume['id'])['content']
for ig_idx in self._get_ig_indexes_from_initiators(connector):
lm_name = '%s_%s_%s' % (six.text_type(vol['index']),
six.text_type(ig_idx),
six.text_type(tg['index']))
LOG.debug('Removing lun map %s.', lm_name)
try:
self.client.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
LOG.warning(_LW("terminate_connection: lun map not found"))
def _get_password(self):
return ''.join(RANDOM.choice
(string.ascii_uppercase + string.digits)
for _ in range(12))
def create_lun_map(self, volume, ig, lun_num=None):
try:
data = {'ig-id': ig, 'vol-id': volume['id']}
if lun_num:
data['lun'] = lun_num
res = self.client.req('lun-maps', 'POST', data)
lunmap = self._obj_from_result(res)
LOG.info(_LI('Created lun-map:\n%s'), lunmap)
except exception.XtremIOAlreadyMappedError:
LOG.info(_LI('Volume already mapped, retrieving %(ig)s, %(vol)s'),
{'ig': ig, 'vol': volume['id']})
lunmap = self.client.find_lunmap(ig, volume['id'])
return lunmap
def _get_ig_name(self, connector):
raise NotImplementedError()
def _get_ig_indexes_from_initiators(self, connector):
initiator_names = self._get_initiator_names(connector)
ig_indexes = set()
for initiator_name in initiator_names:
initiator = self.client.get_initiator(initiator_name)
ig_indexes.add(initiator['ig-id'][XTREMIO_OID_INDEX])
return list(ig_indexes)
def _get_initiator_names(self, connector):
raise NotImplementedError()
def create_consistencygroup(self, context, group):
"""Creates a consistency group.
:param context: the context
:param group: the group object to be created
:returns: dict -- modelUpdate = {'status': 'available'}
:raises: VolumeBackendAPIException
"""
create_data = {'consistency-group-name': group['id']}
self.client.req('consistency-groups', 'POST', data=create_data,
ver='v2')
return {'status': fields.ConsistencyGroupStatus.AVAILABLE}
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
self.client.req('consistency-groups', 'DELETE', name=group['id'],
ver='v2')
volumes = self.db.volume_get_all_by_group(context, group['id'])
for volume in volumes:
self.delete_volume(volume)
volume.status = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def _get_snapset_ancestors(self, snapset_name):
snapset = self.client.req('snapshot-sets',
name=snapset_name)['content']
volume_ids = [s[XTREMIO_OID_INDEX] for s in snapset['vol-list']]
return {v['ancestor-vol-id'][XTREMIO_OID_NAME]: v['name'] for v
in self.client.req('volumes',
data={'full': 1,
'props':
'ancestor-vol-id'})['volumes']
if v['index'] in volume_ids}
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistencygroup from source.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:param volumes: a list of volume dictionaries in the group.
:param cgsnapshot: the dictionary of the cgsnapshot as source.
:param snapshots: a list of snapshot dictionaries in the cgsnapshot.
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
:returns model_update, volumes_model_update
"""
if not (cgsnapshot and snapshots and not source_cg or
source_cg and source_vols and not cgsnapshot):
msg = _("create_consistencygroup_from_src only supports a "
"cgsnapshot source or a consistency group source. "
"Multiple sources cannot be used.")
raise exception.InvalidInput(msg)
if cgsnapshot:
snap_name = self._get_cgsnap_name(cgsnapshot)
snap_by_anc = self._get_snapset_ancestors(snap_name)
for volume, snapshot in zip(volumes, snapshots):
real_snap = snap_by_anc[snapshot['volume_id']]
self.create_volume_from_snapshot(volume, {'id': real_snap})
elif source_cg:
data = {'consistency-group-id': source_cg['id'],
'snapshot-set-name': group['id']}
self.client.req('snapshots', 'POST', data, ver='v2')
snap_by_anc = self._get_snapset_ancestors(group['id'])
for volume, src_vol in zip(volumes, source_vols):
snap_vol_name = snap_by_anc[src_vol['id']]
self.client.req('volumes', 'PUT', {'name': volume['id']},
name=snap_vol_name)
create_data = {'consistency-group-name': group['id'],
'vol-list': [v['id'] for v in volumes]}
self.client.req('consistency-groups', 'POST', data=create_data,
ver='v2')
return None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
add_volumes = add_volumes if add_volumes else []
remove_volumes = remove_volumes if remove_volumes else []
for vol in add_volumes:
add_data = {'vol-id': vol['id'], 'cg-id': group['id']}
self.client.req('consistency-group-volumes', 'POST', add_data,
ver='v2')
for vol in remove_volumes:
remove_data = {'vol-id': vol['id'], 'cg-id': group['id']}
self.client.req('consistency-group-volumes', 'DELETE', remove_data,
name=group['id'], ver='v2')
return None, None, None
def _get_cgsnap_name(self, cgsnapshot):
return '%(cg)s%(snap)s' % {'cg': cgsnapshot['consistencygroup_id']
.replace('-', ''),
'snap': cgsnapshot['id'].replace('-', '')}
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
data = {'consistency-group-id': cgsnapshot['consistencygroup_id'],
'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)}
self.client.req('snapshots', 'POST', data, ver='v2')
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot['id'])
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
self.client.req('snapshot-sets', 'DELETE',
name=self._get_cgsnap_name(cgsnapshot), ver='v2')
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot['id'])
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': cgsnapshot.status}
return model_update, snapshots
def _get_ig(self, name):
try:
return self.client.req('initiator-groups', 'GET',
name=name)['content']
except exception.NotFound:
pass
def _create_ig(self, name):
# create an initiator group to hold the initiator
data = {'ig-name': name}
self.client.req('initiator-groups', 'POST', data)
try:
return self.client.req('initiator-groups', name=name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") % name))
class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
driver_name = 'XtremIO_ISCSI'
def __init__(self, *args, **kwargs):
super(XtremIOISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
def _add_auth(self, data, login_chap, discovery_chap):
login_passwd, discovery_passwd = None, None
if login_chap:
data['initiator-authentication-user-name'] = 'chap_user'
login_passwd = self._get_password()
data['initiator-authentication-password'] = login_passwd
if discovery_chap:
data['initiator-discovery-user-name'] = 'chap_user'
discovery_passwd = self._get_password()
data['initiator-discovery-password'] = discovery_passwd
return login_passwd, discovery_passwd
def _create_initiator(self, connector, login_chap, discovery_chap):
initiator = self._get_initiator_names(connector)[0]
# create an initiator
data = {'initiator-name': initiator,
'ig-id': initiator,
'port-address': initiator}
l, d = self._add_auth(data, login_chap, discovery_chap)
self.client.req('initiators', 'POST', data)
return l, d
def initialize_connection(self, volume, connector):
try:
sys = self.client.get_cluster()
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise exception.VolumeBackendAPIException(data=msg)
login_chap = (sys.get('chap-authentication-mode', 'disabled') !=
'disabled')
discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
'disabled')
initiator_name = self._get_initiator_names(connector)[0]
initiator = self.client.get_initiator(initiator_name)
if initiator:
login_passwd = initiator['chap-authentication-initiator-password']
discovery_passwd = initiator['chap-discovery-initiator-password']
ig = self._get_ig(initiator['ig-id'][XTREMIO_OID_NAME])
else:
ig = self._get_ig(self._get_ig_name(connector))
if not ig:
ig = self._create_ig(self._get_ig_name(connector))
(login_passwd,
discovery_passwd) = self._create_initiator(connector,
login_chap,
discovery_chap)
# if CHAP was enabled after the the initiator was created
if login_chap and not login_passwd:
LOG.info(_LI('initiator has no password while using chap,'
'adding it'))
data = {}
(login_passwd,
d_passwd) = self._add_auth(data, login_chap, discovery_chap and
not discovery_passwd)
discovery_passwd = (discovery_passwd if discovery_passwd
else d_passwd)
self.client.req('initiators', 'PUT', data, idx=initiator['index'])
# lun mappping
lunmap = self.create_lun_map(volume, ig['ig-id'][XTREMIO_OID_NAME])
properties = self._get_iscsi_properties(lunmap)
if login_chap:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = 'chap_user'
properties['auth_password'] = login_passwd
if discovery_chap:
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = 'chap_user'
properties['discovery_auth_password'] = discovery_passwd
LOG.debug('init conn params:\n%s', properties)
return {
'driver_volume_type': 'iscsi',
'data': properties
}
def _get_iscsi_properties(self, lunmap):
"""Gets iscsi configuration.
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
multiple connection return
:target_iqns, :target_portals, :target_luns, which contain lists of
multiple values. The main portal information is also returned in
:target_iqn, :target_portal, :target_lun for backward compatibility.
"""
portals = self.client.get_iscsi_portals()
if not portals:
msg = _("XtremIO not configured correctly, no iscsi portals found")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
portal = RANDOM.choice(portals)
portal_addr = ('%(ip)s:%(port)d' %
{'ip': portal['ip-addr'].split('/')[0],
'port': portal['ip-port']})
tg_portals = ['%(ip)s:%(port)d' % {'ip': p['ip-addr'].split('/')[0],
'port': p['ip-port']}
for p in portals]
properties = {'target_discovered': False,
'target_iqn': portal['port-address'],
'target_lun': lunmap['lun'],
'target_portal': portal_addr,
'target_iqns': [p['port-address'] for p in portals],
'target_portals': tg_portals,
'target_luns': [lunmap['lun']] * len(portals)}
return properties
def _get_initiator_names(self, connector):
return [connector['initiator']]
def _get_ig_name(self, connector):
return connector['initiator']
class XtremIOFibreChannelDriver(XtremIOVolumeDriver,
driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(XtremIOFibreChannelDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self._targets = None
def get_targets(self):
if not self._targets:
try:
target_list = self.client.req('targets')["targets"]
targets = [self.client.req('targets',
name=target['name'])['content']
for target in target_list
if '-fc' in target['name']]
self._targets = [target['port-address'].replace(':', '')
for target in targets
if target['port-state'] == 'up']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to get targets")))
return self._targets
def _get_free_lun(self, igs):
luns = []
for ig in igs:
luns.extend(lm['lun'] for lm in
self.client.req('lun-maps',
data={'full': 1, 'prop': 'lun',
'filter': 'ig-name:eq:%s' % ig})
['lun-maps'])
uniq_luns = set(luns + [0])
seq = range(len(uniq_luns) + 1)
return min(set(seq) - uniq_luns)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
wwpns = self._get_initiator_names(connector)
ig_name = self._get_ig_name(connector)
i_t_map = {}
found = []
new = []
for wwpn in wwpns:
init = self.client.get_initiator(wwpn)
if init:
found.append(init)
else:
new.append(wwpn)
i_t_map[wwpn.replace(':', '')] = self.get_targets()
# get or create initiator group
if new:
ig = self._get_ig(ig_name)
if not ig:
ig = self._create_ig(ig_name)
for wwpn in new:
data = {'initiator-name': wwpn, 'ig-id': ig_name,
'port-address': wwpn}
self.client.req('initiators', 'POST', data)
igs = list(set([i['ig-id'][XTREMIO_OID_NAME] for i in found]))
if new and ig['ig-id'][XTREMIO_OID_NAME] not in igs:
igs.append(ig['ig-id'][XTREMIO_OID_NAME])
if len(igs) > 1:
lun_num = self._get_free_lun(igs)
else:
lun_num = None
for ig in igs:
lunmap = self.create_lun_map(volume, ig, lun_num)
lun_num = lunmap['lun']
return {'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': False,
'target_lun': lun_num,
'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
(super(XtremIOFibreChannelDriver, self)
.terminate_connection(volume, connector, **kwargs))
num_vols = (self.client
.num_of_mapped_volumes(self._get_ig_name(connector)))
if num_vols > 0:
data = {}
else:
i_t_map = {}
for initiator in self._get_initiator_names(connector):
i_t_map[initiator.replace(':', '')] = self.get_targets()
data = {'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}
return {'driver_volume_type': 'fibre_channel',
'data': data}
def _get_initiator_names(self, connector):
return [wwpn if ':' in wwpn else
':'.join(wwpn[i:i + 2] for i in range(0, len(wwpn), 2))
for wwpn in connector['wwpns']]
def _get_ig_name(self, connector):
return connector['host']
| [
"[email protected]"
]
| |
9a5c1a76f2d449757f4893cf7b1e2dd10c04f45d | 3406886ecbbed36bb47288a38eaab001a2b30417 | /ya_glm/solver/FistaSolver.py | 7ae58227df0224fd51239a55280bac13a3b70976 | [
"MIT"
]
| permissive | thomaskeefe/ya_glm | 8d953f7444e51dfeaa28dcd92aaf946112ebc677 | e6e1bbb915d15c530d10a4776ea848b331c99c3b | refs/heads/main | 2023-06-21T15:08:33.590892 | 2021-07-30T00:57:49 | 2021-07-30T00:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,563 | py | from copy import deepcopy
from ya_glm.GlmSolver import GlmSolver
from ya_glm.opt.penalty.vec import LassoPenalty, RidgePenalty, \
WithIntercept, TikhonovPenalty
from ya_glm.opt.penalty.GroupLasso import GroupLasso
from ya_glm.opt.penalty.mat_penalty import MultiTaskLasso, NuclearNorm, \
MatricizeEntrywisePen, \
MatWithIntercept
from ya_glm.opt.utils import decat_coef_inter_vec, decat_coef_inter_mat
from ya_glm.opt.fista import solve_fista
from ya_glm.opt.base import Sum, Func
from ya_glm.opt.glm_loss.get import get_glm_loss, _LOSS_FUNC_CLS2STR
from ya_glm.solver.utils import process_param_path
from ya_glm.autoassign import autoassign
class FistaSolver(GlmSolver):
"""
Solves a penalized GLM problem using the FISTA algorithm.
Parameters
----------
max_iter: int
Maximum number of iterations.
xtol: float, None
Stopping criterion based on max norm of successive iteration differences i.e. stop if max(x_new - x_prev) < xtol.
rtol: float, None
Stopping criterion based on the relative difference of successive loss function values i.e. stop if abs(loss_new - loss_prev)/loss_new < rtol.
atol: float, None
Stopping criterion based on the absolute difference of successive loss function values i.e. stop if abs(loss_new - loss_prev) < atol.
bt_max_steps: int
Maximum number of backtracking steps to take.
bt_shrink: float
How much to shrink the step size in each backtracking step. Should lie strictly in the unit interval.
bt_grow: float, None
(Optional) How much to grow the step size each iteraction when using backgracking.
tracking_level: int
How much data to track.
"""
@autoassign
def __init__(self,
max_iter=1000,
xtol=1e-4,
rtol=None,
atol=None,
bt_max_steps=20,
bt_shrink=0.5,
bt_grow=1.1,
tracking_level=0): pass
def get_solve_kws(self):
return deepcopy(self.__dict__)
def _get_avail_losses(self):
return ['lin_reg', 'huber',
'log_reg', 'multinomial',
'poisson'] # no quantile
# TODO: compute lipchitz constant,
# possibly do stuff with tikhonov
def setup(self, X, y, loss, penalty, sample_weight=None):
pass
def solve(self, X, y, loss, penalty,
fit_intercept=True,
sample_weight=None,
coef_init=None,
intercept_init=None
):
"""
Solves a penalized GLM problem. See docs for ya_glm.GlmSolver.
"""
if loss.name not in self._get_avail_losses():
raise ValueError("{} loss not available; this solver only"
"implements {}".format(loss.name,
self._get_avail_losses()))
return solve_glm(X=X, y=y,
loss=loss,
fit_intercept=fit_intercept,
sample_weight=sample_weight,
coef_init=coef_init,
intercept_init=intercept_init,
**penalty.get_solve_kws(),
**self.get_solve_kws())
def solve_path(self, X, y, loss, penalty_seq,
fit_intercept=True,
sample_weight=None,
coef_init=None,
intercept_init=None):
"""
Solves a sequence of penalized GLM problem using a path algorithm. See docs for ya_glm.GlmSolver.
"""
if loss.name not in self._get_avail_losses():
raise ValueError("{} loss not available; this solver only"
"implements {}".format(loss.name,
self._get_avail_losses()))
if coef_init is not None or intercept_init is not None:
# TODO do we want to allow this? perhaps warn?
raise NotImplementedError
return solve_glm_path(X=X, y=y,
loss=loss,
fit_intercept=fit_intercept,
sample_weight=sample_weight,
**penalty_seq.get_solve_kws(),
**self.get_solve_kws())
def has_path_algo(self, loss, penalty):
"""
Yes this solver has an available path algorithm!
"""
return True
def solve_glm(X, y,
loss,
fit_intercept=True,
sample_weight=None,
lasso_pen_val=None,
lasso_weights=None,
groups=None,
multi_task=False,
nuc=False,
ridge_pen_val=None,
ridge_weights=None,
tikhonov=None,
coef_init=None,
intercept_init=None,
xtol=1e-4,
rtol=None,
atol=None,
max_iter=1000,
bt_max_steps=20,
bt_shrink=0.5,
bt_grow=1.1,
tracking_level=0):
"""
Sets up and solves a penalized GLM problem using fista.
For documentation of the GLM arguments see TODO.
For documentaion of the optimization arguments see ya_glm.opt.fista.solve_fista.
"""
#######################
# setup loss function #
#######################
is_mr = y.ndim > 1 and y.shape[1] > 1
if not isinstance(loss, Func):
# get loss function object
loss_func = get_glm_loss(X=X, y=y, loss=loss,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
# precompute Lipchitz constant
loss_func.setup()
else:
loss_func = loss
if _LOSS_FUNC_CLS2STR[type(loss_func)] == 'quantile':
raise NotImplementedError("fista solver does not support quantile loss")
# in case we passed a loss_func object, make sure fit_intercpet
# agrees with loss_func
fit_intercept = loss_func.fit_intercept
#####################
# set initial value #
#####################
if coef_init is None or intercept_init is None:
init_val = loss_func.default_init()
else:
init_val = loss_func.cat_intercept_coef(intercept_init, coef_init)
#############################
# pre process penalty input #
#############################
is_already_mat_pen = False
# check
if is_mr:
# not currently supported for multi response
assert groups is None
assert tikhonov is None
else:
if nuc:
raise ValueError("Nuclear norm not applicable"
" to vector coefficients")
#################
# Lasso penalty #
#################
if lasso_pen_val is None:
lasso = None
elif groups is not None:
lasso = GroupLasso(groups=groups,
mult=lasso_pen_val, weights=lasso_weights)
elif is_mr and multi_task:
lasso = MultiTaskLasso(mult=lasso_pen_val, weights=lasso_weights)
is_already_mat_pen = True
elif is_mr and nuc:
lasso = NuclearNorm(mult=lasso_pen_val, weights=lasso_weights)
is_already_mat_pen = True
else:
lasso = LassoPenalty(mult=lasso_pen_val, weights=lasso_weights)
##############
# L2 penalty #
##############
if ridge_pen_val is None:
ridge = None
elif tikhonov is not None:
assert ridge_weights is None # cant have both ridge_weights and tikhonov
ridge = TikhonovPenalty(mult=ridge_pen_val, mat=tikhonov)
else:
ridge = RidgePenalty(mult=ridge_pen_val, weights=ridge_weights)
# possibly format penalties for matrix coefficients
if is_mr:
if ridge is not None:
ridge = MatricizeEntrywisePen(func=ridge)
if lasso is not None and not is_already_mat_pen:
lasso = MatricizeEntrywisePen(func=lasso)
# possibly add intercept
if fit_intercept:
if ridge is not None:
if is_mr:
ridge = MatWithIntercept(func=ridge)
else:
ridge = WithIntercept(func=ridge)
if lasso is not None:
if is_mr:
lasso = MatWithIntercept(func=lasso)
else:
lasso = WithIntercept(func=lasso)
# if there is a ridge penalty add it to the loss funcion
# TODO: do we want to do this for vanilla ridge + lasso?
# or should we put lasso and ride together
if ridge is not None:
loss_func = Sum([loss_func, ridge])
# setup step size/backtracking
if loss_func.grad_lip is not None:
# use Lipchtiz constant if it is available
step = 'lip'
backtracking = False
else:
step = 1 # TODO: perhaps smarter base step size?
backtracking = True
############################
# solve problem with FISTA #
############################
coef, out = solve_fista(smooth_func=loss_func,
init_val=init_val,
non_smooth_func=lasso,
step=step,
backtracking=backtracking,
accel=True,
restart=True,
max_iter=max_iter,
xtol=xtol,
rtol=rtol,
atol=atol,
bt_max_steps=bt_max_steps,
bt_shrink=bt_shrink,
bt_grow=bt_grow,
tracking_level=tracking_level)
# format output
if fit_intercept:
if is_mr:
coef, intercept = decat_coef_inter_mat(coef)
else:
coef, intercept = decat_coef_inter_vec(coef)
else:
intercept = None
return coef, intercept, out
def solve_glm_path(X, y, loss,
lasso_pen_seq=None, ridge_pen_seq=None,
fit_intercept=True,
sample_weight=None,
# generator=True,
check_decr=True,
**kws):
"""
Fits a GLM along a tuning parameter path using the homotopy method.
Each subproblem is solved using FISTA.
Parameters
-----------
all arguments are the same as solve_glm_fista except the following
L1_pen_seq: None, array-like
The L1 penalty parameter tuning sequence.
ridge_pen_seq: None, array-like
The L2 penalty parameter tuning sequence. If both L1_pen_seq and ridge_pen_seq are provided they should be the same length.
check_decr: bool
Whether or not to check the L1_pen_seq/ridge_pen_seq are monotonically decreasing.
Output
------
either a generator yielding
fit_out: dict
param_setting: dict
or a list whith these entries.
"""
# TODO: possibly re-write this so we can do tikhinov precomputation stuff once
param_path = process_param_path(lasso_pen_seq=lasso_pen_seq,
ridge_pen_seq=ridge_pen_seq,
check_decr=check_decr)
# get the GLm loss object
loss_func = get_glm_loss(X=X, y=y, loss=loss,
fit_intercept=fit_intercept,
sample_weight=sample_weight)
# precompute Lipchitz constant
loss_func.setup()
# possibly get initializers
if 'coef_init' in kws:
coef = kws['coef_init']
del kws['coef_init']
else:
coef = None
if 'intercept_init' in kws:
intercept = kws['intercept_init']
del kws['intercept_init']
else:
intercept = None
# fit each path value
for params in param_path:
# Solve this path element!
coef, intercept, opt_data = solve_glm(X=X, y=y,
loss=loss_func,
coef_init=coef,
intercept_init=intercept,
**params,
**kws)
# format output
fit_out = {'coef': coef,
'intercept': intercept,
'opt_data': opt_data}
# if generator:
yield fit_out, params
| [
"[email protected]"
]
| |
f957d3f5869f27eab08c8095549202efedb9f536 | 34455fa21dd12b18dc4a352b85463c3db15f6bdf | /Ch02/02_04/02_05_Finish.py | cb7ceee14d6869054f1ffe62f1b16736ebcb801e | []
| no_license | CodedQuen/The-Python-3-Standard-Library | 771abd26c0c39aa925d06b204bb600104a5bdd8f | 5a9facc9fb2c1085e12fe4284fe593ef17609e13 | refs/heads/master | 2022-06-14T01:32:54.088505 | 2020-05-05T10:38:28 | 2020-05-05T10:38:28 | 261,428,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # Statistics Module
import statistics
import math
agesData = [10, 13, 14, 12, 11, 10, 11, 10, 15]
print(statistics.mean(agesData))
print(statistics.mode(agesData))
print(statistics.median(agesData))
print(sorted(agesData))
print(statistics.variance(agesData))
print(statistics.stdev(agesData))
print(math.sqrt(statistics.variance(agesData)))
| [
"[email protected]"
]
| |
b5fe0395c5ac075049b045069e7b33185d734a95 | 367d2670c75d385d122bca60b9f550ca5b3888c1 | /gem5/src/cpu/InstPBTrace.py | 6f40f3a5e72b4ce051cc501b1858440b154a8243 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
]
| permissive | Anish-Saxena/aqua_rowhammer_mitigation | 4f060037d50fb17707338a6edcaa0ac33c39d559 | 3fef5b6aa80c006a4bd6ed4bedd726016142a81c | refs/heads/main | 2023-04-13T05:35:20.872581 | 2023-01-05T21:10:39 | 2023-01-05T21:10:39 | 519,395,072 | 4 | 3 | Unlicense | 2023-01-05T21:10:40 | 2022-07-30T02:03:02 | C++ | UTF-8 | Python | false | false | 1,849 | py | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.SimObject import SimObject
from m5.params import *
from m5.objects.InstTracer import InstTracer
class InstPBTrace(InstTracer):
type = 'InstPBTrace'
cxx_class = 'Trace::InstPBTrace'
cxx_header = 'cpu/inst_pb_trace.hh'
file_name = Param.String("Instruction trace output file")
| [
"[email protected]"
]
| |
b76c6dbea5edc63bb1f6f5b25d92ee0136bb98fb | 3344f02a57ec704acd58449502925b8f1ffc564b | /app/models.py | 3748a6a8f3e0bc35adc59bd1c3d980946d372008 | [
"MIT"
]
| permissive | Jackson-coder-arch/One-minute-pitches | dcfad8f4bab22c133f15cb712df2d2f2b36a0ef0 | 2bbb9db1d709594f5465fce187e52198858bf5b6 | refs/heads/master | 2023-03-11T20:58:44.295780 | 2021-03-05T04:45:08 | 2021-03-05T04:45:08 | 342,477,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py | from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
pitches = db.relationship('Pitch',backref = 'user',lazy="dynamic")
comments = db.relationship('Comment',backref = 'user',lazy="dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def save_user(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.username}'
class Pitch(db.Model):
__tablename__ = 'pitches'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String(255))
pitch_content = db.Column(db.String(255))
author = db.Column(db.String(255))
category = db.Column(db.String(255))
upvote = db.Column(db.Integer)
downvote = db.Column(db.Integer)
published_at = db.Column(db.DateTime, default = datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
comments = db.relationship('Comment',backref = 'pitch',lazy="dynamic")
def save_pitch(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.description}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
body = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
published_at = db.Column(db.DateTime, default = datetime.utcnow)
def save_comment(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'User {self.description}' | [
"[email protected]"
]
| |
81514d43a8d83c10edcfafd29865f09868f39d2f | f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2 | /2015/AST1/vezbovni/Ivan/drugi.py | 2b511e0abfef1e8c327b2cdf308726281e5c262f | []
| no_license | ispastlibrary/Titan | a4a7e4bb56544d28b884a336db488488e81402e0 | f60e5c6dc43876415b36ad76ab0322a1f709b14d | refs/heads/master | 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | def obim(a, b):
O = 2*a+2*b
print("obim pravougaonika stranica", a "i" b, "je", O)
return O
prvi = obim(1, 2)
drugi = obim(25, 36)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.