code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding=utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Default configs."""
from .base import BaseConfig
from zeus.common import ConfigSerializable
class CityscapesCommonConfig(BaseConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
root_path = None
num_parallel_batches = 64
fixed_size = True
train_portion = 1.0
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesConfig = {"batch_size": {"type": int},
"root_path": {"type": str},
"num_parallel_batches": {"type": int},
"fixed_size": {"type": bool}
}
return rules_CityscapesConfig
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {"common": {"type": dict},
"train": {"type": dict},
"val": {"type": dict},
"test": {"type": dict}
}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common,
'train': cls.train,
'val': cls.val,
'test': cls.test
}
|
normal
|
{
"blob_id": "f3da38f2c4fda0a1d54e79c2c21070f98002b88d",
"index": 3351,
"step-1": "<mask token>\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n <mask token>\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-2": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-3": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':\n {'type': str}, 'num_parallel_batches': {'type': int},\n 'fixed_size': {'type': bool}}\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-4": "<mask token>\n\n\nclass CityscapesCommonConfig(BaseConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n root_path = None\n num_parallel_batches = 64\n fixed_size = True\n train_portion = 1.0\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {'batch_size': {'type': int}, 'root_path':\n {'type': str}, 'num_parallel_batches': {'type': int},\n 'fixed_size': {'type': bool}}\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {'batch_size': {'type': int},\n 'list_path': {'type': str}}\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {'common': {'type': dict}, 'train': {'type':\n dict}, 'val': {'type': dict}, 'test': {'type': dict}}\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common, 'train': cls.train, 'val': cls.val,\n 'test': cls.test}\n",
"step-5": "# -*- coding=utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\"\"\"Default configs.\"\"\"\n\nfrom .base import BaseConfig\nfrom zeus.common import ConfigSerializable\n\n\nclass CityscapesCommonConfig(BaseConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n root_path = None\n num_parallel_batches = 64\n fixed_size = True\n train_portion = 1.0\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesConfig = {\"batch_size\": {\"type\": int},\n \"root_path\": {\"type\": str},\n \"num_parallel_batches\": {\"type\": int},\n \"fixed_size\": {\"type\": bool}\n }\n return rules_CityscapesConfig\n\n\nclass CityscapesTrainConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'train.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig\n\n\nclass CityscapesValConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig\n\n\nclass CityscapesTestConfig(CityscapesCommonConfig):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n batch_size = 1\n list_path = 'val.txt'\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_CityscapesTestConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTestConfig\n\n\nclass CityscapesConfig(ConfigSerializable):\n \"\"\"Default Dataset config for Cityscapes.\"\"\"\n\n common = CityscapesCommonConfig\n train = CityscapesTrainConfig\n val = CityscapesValConfig\n test = CityscapesTestConfig\n\n @classmethod\n def rules(cls):\n \"\"\"Return rules for checking.\"\"\"\n rules_Cityscapes = {\"common\": {\"type\": dict},\n \"train\": {\"type\": dict},\n \"val\": {\"type\": dict},\n \"test\": {\"type\": dict}\n }\n return rules_Cityscapes\n\n @classmethod\n def get_config(cls):\n \"\"\"Get sub config.\"\"\"\n return {'common': cls.common,\n 'train': cls.train,\n 'val': cls.val,\n 'test': cls.test\n }\n",
"step-ids": [
8,
18,
19,
21,
23
]
}
|
[
8,
18,
19,
21,
23
] |
from skimage.measure import structural_similarity as ssim
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import pathlib
import warnings
from PIL import Image
from numpy import array
source_path = "/home/justin/Desktop/FeatureClustering/"
feature_length = len(os.listdir(source_path))
vector_data = []
recorded_lines = []
labels =[]
for folder in os.listdir(source_path):
for filename in os.listdir(source_path + folder +"/"):
if(filename != "---.png"):
linename = filename.split("-")
linename = linename[0]+"-"+linename[1]
if(linename not in recorded_lines):
vector = np.zeros(shape=(feature_length))
label = 0 if "G" in filename else 1
vector_data.append(vector)
labels.append(label)
recorded_lines.append(linename)
else:
index = recorded_lines.index(linename)
vector_data[index][int(folder)] += 1
#print(np.c_[recorded_lines,vector_data])
np.save("/home/justin/Desktop/vector_data.npy", vector_data)
np.save("/home/justin/Desktop/label_data.npy", labels)
|
normal
|
{
"blob_id": "ff1346060141ee3504aa5ee9de3a6ec196bcc216",
"index": 3918,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-3": "<mask token>\nsource_path = '/home/justin/Desktop/FeatureClustering/'\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels = []\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-4": "from skimage.measure import structural_similarity as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport pathlib\nimport warnings\nfrom PIL import Image\nfrom numpy import array\nsource_path = '/home/justin/Desktop/FeatureClustering/'\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels = []\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder + '/'):\n if filename != '---.png':\n linename = filename.split('-')\n linename = linename[0] + '-' + linename[1]\n if linename not in recorded_lines:\n vector = np.zeros(shape=feature_length)\n label = 0 if 'G' in filename else 1\n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\nnp.save('/home/justin/Desktop/vector_data.npy', vector_data)\nnp.save('/home/justin/Desktop/label_data.npy', labels)\n",
"step-5": "from skimage.measure import structural_similarity as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport pathlib\nimport warnings\nfrom PIL import Image\nfrom numpy import array\n\nsource_path = \"/home/justin/Desktop/FeatureClustering/\"\n\nfeature_length = len(os.listdir(source_path))\nvector_data = []\nrecorded_lines = []\nlabels =[]\nfor folder in os.listdir(source_path):\n for filename in os.listdir(source_path + folder +\"/\"):\n if(filename != \"---.png\"):\n linename = filename.split(\"-\")\n linename = linename[0]+\"-\"+linename[1]\n \n if(linename not in recorded_lines):\n vector = np.zeros(shape=(feature_length))\n label = 0 if \"G\" in filename else 1 \n vector_data.append(vector)\n labels.append(label)\n recorded_lines.append(linename)\n else:\n index = recorded_lines.index(linename)\n vector_data[index][int(folder)] += 1\n\n#print(np.c_[recorded_lines,vector_data])\nnp.save(\"/home/justin/Desktop/vector_data.npy\", vector_data)\nnp.save(\"/home/justin/Desktop/label_data.npy\", labels)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Uses python3
from decimal import Decimal
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
there = input()
store = there.split()
a = int(max(store))
b = int(min(store))
factor = gcd_naive(a,b)
if factor > 1:
multiple = (Decimal(a) * Decimal(b)) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
|
normal
|
{
"blob_id": "c70681f5ff8d49a243b7d26164aa5430739354f4",
"index": 6936,
"step-1": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\n<mask token>\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-3": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a, b)\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-4": "from decimal import Decimal\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a, b)\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-5": "# Uses python3\nfrom decimal import Decimal\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a,b)\nif factor > 1:\n multiple = (Decimal(a) * Decimal(b)) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\n\nprint(int(multiple))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python3
###################################################
### Euler project
### zdrassvouitie @ 10/2016
###################################################
file_name = '013_largeSum_data'
tot = 0
with open(file_name, "r") as f:
stop = 1
while stop != 0:
line = f.readline()
if len(line) < 1:
break
tot += float(line)
print(tot)
|
normal
|
{
"blob_id": "bcdf1c03d996520f3d4d8d12ec4ef34ea63ef3cf",
"index": 3936,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(file_name, 'r') as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\nprint(tot)\n",
"step-3": "file_name = '013_largeSum_data'\ntot = 0\nwith open(file_name, 'r') as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\nprint(tot)\n",
"step-4": "#!/usr/bin/python3\n\n###################################################\n### Euler project\n### zdrassvouitie @ 10/2016\n###################################################\n\nfile_name = '013_largeSum_data'\ntot = 0\nwith open(file_name, \"r\") as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\n\nprint(tot)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from auth_passwordreset_reset import auth_passwordreset_reset
from auth_register import auth_register
from data import *
import pytest
#invalid reset code
def test_auth_passwordreset_reset1():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assuming that the code from the email was "WER123"
#this should not work as the code "ABS124" doesnt match "WER123"
with pytest.raises(ValueError, match='*Incorrect Reset Code*'):
auth_passwordreset_reset("ABS124", "SomePass")
#invalid password
def test_auth_passwordreset_reset2():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assume that the code generated was "AUW624"
#these should not work as the new passowrd lengths are <5
with pytest.raises(ValueError, match='*Invalid Password Length*'):
auth_passwordreset_reset("AUW624", "")
auth_passwordreset_reset("AUW624", "nope")
#valid case
def test_auth_passwordreset_reset3():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assume that the code generated was "AUW624"
auth_passwordreset_reset("AUW624", "Valispass12")
#test to see if password updated
assert new_user_password == "Valispass12"
#this sequence should successfully reset the password
|
normal
|
{
"blob_id": "a315d01f0fb16f0c74c447c07b76f33e6ff6427d",
"index": 9742,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\n<mask token>\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-3": "<mask token>\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\ndef test_auth_passwordreset_reset2():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset('AUW624', '')\n auth_passwordreset_reset('AUW624', 'nope')\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-4": "from auth_passwordreset_reset import auth_passwordreset_reset\nfrom auth_register import auth_register\nfrom data import *\nimport pytest\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\ndef test_auth_passwordreset_reset2():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset('AUW624', '')\n auth_passwordreset_reset('AUW624', 'nope')\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-5": "from auth_passwordreset_reset import auth_passwordreset_reset\nfrom auth_register import auth_register\nfrom data import *\nimport pytest\n\n\n#invalid reset code\ndef test_auth_passwordreset_reset1():\n \n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assuming that the code from the email was \"WER123\"\n \n #this should not work as the code \"ABS124\" doesnt match \"WER123\"\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset(\"ABS124\", \"SomePass\")\n \n#invalid password\ndef test_auth_passwordreset_reset2():\n\n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assume that the code generated was \"AUW624\"\n \n #these should not work as the new passowrd lengths are <5\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset(\"AUW624\", \"\")\n auth_passwordreset_reset(\"AUW624\", \"nope\")\n \n#valid case\ndef test_auth_passwordreset_reset3():\n \n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assume that the code generated was \"AUW624\"\n auth_passwordreset_reset(\"AUW624\", \"Valispass12\") \n \n #test to see if password updated\n assert new_user_password == \"Valispass12\"\n #this sequence should successfully reset the password\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from statistics import mean
from tqdm import tqdm
import multiprocessing as mp
from . import model as dymod
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of first {} data'.format(example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
def parallel_task(args):
"""並列計算タスク"""
file_list, total_core, current_core, filter_value = args
file_count = len(file_list)
start = int(file_count * current_core / total_core)
end = int(file_count * (current_core + 1) / total_core) - 1
header = dymod.InstantData.get_header_row(file_list[0])
error_file_index_list = []
text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)
for i in tqdm(range(start, end), desc=text):
status = pd.read_csv(file_list[i], header=header)['Status']
if np.sum((status == 1) | (status == 17)) >= filter_value:
error_file_index_list.append(i)
return error_file_index_list
filtering = Filter()
|
normal
|
{
"blob_id": "5d4585dc96d4ebdbc15b7382038cfea959c9a6f3",
"index": 2495,
"step-1": "<mask token>\n\n\nclass Filter:\n <mask token>\n\n @classmethod\n def get_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n <mask token>\n\n @classmethod\n def show_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of first {} data'.format(\n example_number))\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))\n plt.grid(which='minor')\n plt.show()\n\n @classmethod\n def show_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of all data')\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.grid()\n plt.show()\n <mask token>\n\n @staticmethod\n def get_total_incorrect_vector(file):\n \"\"\"瞬時データに含まれる誤ベクトルの数を返す\"\"\"\n data = dymod.InstantData(file)\n status = data.get_data('Status')\n return np.sum((status == 1) | (status == 17))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Filter:\n \"\"\"誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理\"\"\"\n\n @classmethod\n def get_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def get_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def show_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of first {} data'.format(\n example_number))\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))\n plt.grid(which='minor')\n plt.show()\n\n @classmethod\n def show_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of all data')\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.grid()\n plt.show()\n\n @staticmethod\n def filter_incorrect_vector(file_list, filter_value):\n \"\"\"ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する\"\"\"\n before = len(file_list)\n print('Filtering...')\n total_core = mp.cpu_count()\n pool = mp.Pool(total_core)\n args = [(file_list, total_core, i, filter_value) for i in range(\n total_core)]\n callback = pool.map(parallel_task, args)\n error_index_list = []\n for each_error_index_list in callback:\n for error_index in each_error_index_list:\n error_index_list.append(error_index)\n error_index_list.sort(reverse=True)\n for error_index in error_index_list:\n del file_list[error_index]\n after = len(file_list)\n print('Finish!\\nFiltered data:', str(before - after) + '/' + str(\n before))\n return file_list\n\n @staticmethod\n def get_total_incorrect_vector(file):\n \"\"\"瞬時データに含まれる誤ベクトルの数を返す\"\"\"\n data = dymod.InstantData(file)\n status = data.get_data('Status')\n return np.sum((status == 1) | (status == 17))\n\n\ndef parallel_task(args):\n \"\"\"並列計算タスク\"\"\"\n file_list, total_core, current_core, filter_value = args\n file_count = len(file_list)\n start = int(file_count * current_core / total_core)\n end = int(file_count * (current_core + 1) / total_core) - 1\n header = dymod.InstantData.get_header_row(file_list[0])\n error_file_index_list = []\n text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)\n for i in tqdm(range(start, end), desc=text):\n status = pd.read_csv(file_list[i], header=header)['Status']\n if np.sum((status == 1) | (status == 17)) >= filter_value:\n error_file_index_list.append(i)\n return error_file_index_list\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Filter:\n \"\"\"誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理\"\"\"\n\n @classmethod\n def get_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def get_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def show_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of first {} data'.format(\n example_number))\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))\n plt.grid(which='minor')\n plt.show()\n\n @classmethod\n def show_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of all data')\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.grid()\n plt.show()\n\n @staticmethod\n def filter_incorrect_vector(file_list, filter_value):\n \"\"\"ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する\"\"\"\n before = len(file_list)\n print('Filtering...')\n total_core = mp.cpu_count()\n pool = mp.Pool(total_core)\n args = [(file_list, total_core, i, filter_value) for i in range(\n total_core)]\n callback = pool.map(parallel_task, args)\n error_index_list = []\n for each_error_index_list in callback:\n for error_index in each_error_index_list:\n error_index_list.append(error_index)\n error_index_list.sort(reverse=True)\n for error_index in error_index_list:\n del file_list[error_index]\n after = len(file_list)\n print('Finish!\\nFiltered data:', str(before - after) + '/' + str(\n before))\n return file_list\n\n @staticmethod\n def get_total_incorrect_vector(file):\n \"\"\"瞬時データに含まれる誤ベクトルの数を返す\"\"\"\n data = dymod.InstantData(file)\n status = data.get_data('Status')\n return np.sum((status == 1) | (status == 17))\n\n\ndef parallel_task(args):\n \"\"\"並列計算タスク\"\"\"\n file_list, total_core, current_core, filter_value = args\n file_count = len(file_list)\n start = int(file_count * current_core / total_core)\n end = int(file_count * (current_core + 1) / total_core) - 1\n header = dymod.InstantData.get_header_row(file_list[0])\n error_file_index_list = []\n text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)\n for i in tqdm(range(start, end), desc=text):\n status = pd.read_csv(file_list[i], header=header)['Status']\n if np.sum((status == 1) | (status == 17)) >= filter_value:\n error_file_index_list.append(i)\n return error_file_index_list\n\n\nfiltering = Filter()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tick\nfrom statistics import mean\nfrom tqdm import tqdm\nimport multiprocessing as mp\nfrom . import model as dymod\n\n\nclass Filter:\n \"\"\"誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理\"\"\"\n\n @classmethod\n def get_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def get_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def show_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of first {} data'.format(\n example_number))\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))\n plt.grid(which='minor')\n plt.show()\n\n @classmethod\n def show_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n plt.title('incorrect vector NO. of all data')\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(\n incorrect_vector_mean))\n plt.grid()\n plt.show()\n\n @staticmethod\n def filter_incorrect_vector(file_list, filter_value):\n \"\"\"ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する\"\"\"\n before = len(file_list)\n print('Filtering...')\n total_core = mp.cpu_count()\n pool = mp.Pool(total_core)\n args = [(file_list, total_core, i, filter_value) for i in range(\n total_core)]\n callback = pool.map(parallel_task, args)\n error_index_list = []\n for each_error_index_list in callback:\n for error_index in each_error_index_list:\n error_index_list.append(error_index)\n error_index_list.sort(reverse=True)\n for error_index in error_index_list:\n del file_list[error_index]\n after = len(file_list)\n print('Finish!\\nFiltered data:', str(before - after) + '/' + str(\n before))\n return file_list\n\n @staticmethod\n def get_total_incorrect_vector(file):\n \"\"\"瞬時データに含まれる誤ベクトルの数を返す\"\"\"\n data = dymod.InstantData(file)\n status = data.get_data('Status')\n return np.sum((status == 1) | (status == 17))\n\n\ndef parallel_task(args):\n \"\"\"並列計算タスク\"\"\"\n file_list, total_core, current_core, filter_value = args\n file_count = len(file_list)\n start = int(file_count * current_core / total_core)\n end = int(file_count * (current_core + 1) / total_core) - 1\n header = dymod.InstantData.get_header_row(file_list[0])\n error_file_index_list = []\n text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)\n for i in tqdm(range(start, end), desc=text):\n status = pd.read_csv(file_list[i], header=header)['Status']\n if np.sum((status == 1) | (status == 17)) >= filter_value:\n error_file_index_list.append(i)\n return error_file_index_list\n\n\nfiltering = Filter()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tick\nfrom statistics import mean\nfrom tqdm import tqdm\nimport multiprocessing as mp\n\nfrom . import model as dymod\n\n\nclass Filter:\n \"\"\"誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理\"\"\"\n\n @classmethod\n def get_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def get_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n return incorrect_vector_list\n\n @classmethod\n def show_incorrect_vector_example(cls, file_list, example_number):\n \"\"\"含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n try:\n file_list = file_list[0:example_number]\n except:\n pass\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n\n # plot\n plt.title('incorrect vector NO. of first {} data'.format(example_number))\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))\n plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))\n plt.grid(which='minor')\n plt.show()\n\n @classmethod\n def show_incorrect_vector_all(cls, file_list):\n \"\"\"含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する\"\"\"\n incorrect_vector_list = []\n for i, file in enumerate(tqdm(file_list)):\n total_incorrect_vector = cls.get_total_incorrect_vector(file)\n incorrect_vector_list.append(total_incorrect_vector)\n incorrect_vector_mean = mean(incorrect_vector_list)\n\n # plot\n plt.title('incorrect vector NO. of all data')\n plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)\n plt.axhline(incorrect_vector_mean, color='black')\n plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))\n plt.grid()\n plt.show()\n\n @staticmethod\n def filter_incorrect_vector(file_list, filter_value):\n \"\"\"ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する\"\"\"\n before = len(file_list)\n print('Filtering...')\n total_core = mp.cpu_count()\n pool = mp.Pool(total_core)\n args = [(file_list, total_core, i, filter_value) for i in range(total_core)]\n callback = pool.map(parallel_task, args)\n error_index_list = []\n for each_error_index_list in callback:\n for error_index in each_error_index_list:\n error_index_list.append(error_index)\n error_index_list.sort(reverse=True)\n for error_index in error_index_list:\n del file_list[error_index]\n after = len(file_list)\n print('Finish!\\nFiltered data:', str(before - after) + '/' + str(before))\n return file_list\n\n @staticmethod\n def get_total_incorrect_vector(file):\n \"\"\"瞬時データに含まれる誤ベクトルの数を返す\"\"\"\n data = dymod.InstantData(file)\n status = data.get_data('Status')\n return np.sum((status == 1) | (status == 17))\n\n\ndef parallel_task(args):\n \"\"\"並列計算タスク\"\"\"\n file_list, total_core, current_core, filter_value = args\n file_count = len(file_list)\n start = int(file_count * current_core / total_core)\n end = int(file_count * (current_core + 1) / total_core) - 1\n header = dymod.InstantData.get_header_row(file_list[0])\n error_file_index_list = []\n text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)\n for i in tqdm(range(start, end), desc=text):\n status = pd.read_csv(file_list[i], header=header)['Status']\n if np.sum((status == 1) | (status == 17)) >= filter_value:\n error_file_index_list.append(i)\n return error_file_index_list\n\n\nfiltering = Filter()\n",
"step-ids": [
5,
9,
10,
11,
12
]
}
|
[
5,
9,
10,
11,
12
] |
import cv2 as cv
#! THESE ARE IMAGES THAT AREN'T DOWNSIZED
#original_image_1 = cv.imread("hamburger_face.JPG")
#original_image_2 = cv.imread("hammock_reading.JPG")
#original_image_3 = cv.imread("sofa_face.JPG")
#original_image_4 = cv.imread("frisbee_team.JPG")
original_image_5 = cv.imread("mans_face.JPG")
# TO PRINT OUT ARRAY AND DIMENSIONS
# print(original_image)
# print(original_image.shape)
#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)
grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)
# TO PRINT OUT GRAYSCALE IMG
#cv.imshow("gray_img", grayscale_image)
#cv.waitKey(0)
#cv.destroyAllWindows()
face_cascade = cv.CascadeClassifier('haar_cascade_front.xml')
detected_faces = face_cascade.detectMultiScale(grayscale_image)
# PRINTS COORDINATES OF FACES
#print(detected_faces)
for face in detected_faces:
x , y , w , h = face
cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)
cv.imshow("orig_img", original_image_5)
cv.waitKey(0)
cv.destroyAllWindows()
|
normal
|
{
"blob_id": "d0bd08bea65878f5fccfc4affecdf53cc36179df",
"index": 6633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-3": "<mask token>\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-4": "import cv2 as cv\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-5": "import cv2 as cv\r\n\r\n#! THESE ARE IMAGES THAT AREN'T DOWNSIZED\r\n#original_image_1 = cv.imread(\"hamburger_face.JPG\")\r\n#original_image_2 = cv.imread(\"hammock_reading.JPG\")\r\n#original_image_3 = cv.imread(\"sofa_face.JPG\")\r\n#original_image_4 = cv.imread(\"frisbee_team.JPG\")\r\noriginal_image_5 = cv.imread(\"mans_face.JPG\")\r\n\r\n# TO PRINT OUT ARRAY AND DIMENSIONS\r\n# print(original_image)\r\n# print(original_image.shape)\r\n\r\n#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)\r\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\r\n\r\n# TO PRINT OUT GRAYSCALE IMG\r\n#cv.imshow(\"gray_img\", grayscale_image)\r\n#cv.waitKey(0)\r\n#cv.destroyAllWindows()\r\n\r\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\r\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\r\n\r\n# PRINTS COORDINATES OF FACES\r\n#print(detected_faces)\r\n\r\nfor face in detected_faces:\r\n x , y , w , h = face\r\n cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)\r\n\r\ncv.imshow(\"orig_img\", original_image_5)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print("rap.sweeps.data_management level init")
|
normal
|
{
"blob_id": "7d138a0ad7e4d8f7047dd73ae503bdc7ae5aa065",
"index": 9801,
"step-1": "<mask token>\n",
"step-2": "print('rap.sweeps.data_management level init')\n",
"step-3": "print(\"rap.sweeps.data_management level init\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
import userinput
class Testing(unittest.TestCase):
def test_creation(self):
x = userinput.UserInput()
self.assertNotEqual(x, None)
def test_charset_initialization(self):
x = userinput.UserInput()
self.assertEqual(x.character_set, userinput.CHARACTERS)
def test_charset_display(self):
x = userinput.UserInput()
self.assertEqual(str(x.character_set), str(x.display_characters()))
def test_charset_remove(self):
x = userinput.UserInput()
# my favourite character :)
x.remove_character('پ')
self.assertNotIn('پ', x.character_set)
def test_charset_remove_missing(self):
x = userinput.UserInput()
# my favourite character :)
try:
x.remove_character('+')
self.assertFalse(False)
except KeyError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "4745d81558130440d35d277b586572f5d3f85c06",
"index": 7366,
"step-1": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n <mask token>\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport userinput\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport userinput\n\n\nclass Testing(unittest.TestCase):\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n # my favourite character :)\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n # my favourite character :)\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
"""A simple script to create a motion plan."""
import os
import json
import logging
from logging.config import dictConfig
import argparse
import numpy as np
from opentrons_hardware.hardware_control.motion_planning import move_manager
from opentrons_hardware.hardware_control.motion_planning.types import (
AxisConstraints,
SystemConstraints,
MoveTarget,
vectorize,
Coordinates,
)
from typing import Dict, Any, List, cast
AXIS_NAMES = ["X", "Y", "Z", "A", "B", "C"]
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"}
},
"handlers": {
"stream_handler": {
"class": "logging.StreamHandler",
"formatter": "basic",
"level": logging.INFO,
},
},
"loggers": {
"": {
"handlers": ["stream_handler"],
"level": logging.DEBUG,
},
},
}
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(description="Motion planning script.")
parser.add_argument(
"--params-file-path",
"-p",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_params.json"),
help="the parameter file path",
)
parser.add_argument(
"--debug",
"-d",
type=bool,
required=False,
default=False,
help="set logging level to debug",
)
parser.add_argument(
"--output",
"-o",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_output.json"),
help="the output file path",
)
parser.add_argument(
"--blend-log",
"-b",
choices=["last", "all"],
required=False,
default="last",
help="output the last list or all of the blend log",
)
args = parser.parse_args()
if args.debug:
LOG_CONFIG["handlers"]["stream_handler"]["level"] = logging.DEBUG
LOG_CONFIG["loggers"][""]["level"] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, "r") as f:
params = json.load(f)
constraints: SystemConstraints[str] = {
axis: AxisConstraints.build(**params["constraints"][axis])
for axis in AXIS_NAMES
}
origin_from_file: List[float] = cast(List[float], params["origin"])
origin: Coordinates[str, np.float64] = dict(
zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))
)
target_list = [
MoveTarget.build(
dict(zip(AXIS_NAMES, target["coordinates"])), target["max_speed"]
)
for target in params["target_list"]
]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(
origin=origin,
target_list=target_list,
iteration_limit=params["iteration_limit"],
)
output = {
"moves": [v.to_dict() for v in blend_log[-1]],
"origin": list(vectorize(origin)),
}
def myconverter(obj: Any) -> Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, "w") as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "b7d75c2523dba0baaf06ba270045a4a344b8156c",
"index": 3023,
"step-1": "<mask token>\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\n<mask token>\n",
"step-2": "<mask token>\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import AxisConstraints, SystemConstraints, MoveTarget, vectorize, Coordinates\nfrom typing import Dict, Any, List, cast\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"A simple script to create a motion plan.\"\"\"\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\n\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import (\n AxisConstraints,\n SystemConstraints,\n MoveTarget,\n vectorize,\n Coordinates,\n)\nfrom typing import Dict, Any, List, cast\n\nAXIS_NAMES = [\"X\", \"Y\", \"Z\", \"A\", \"B\", \"C\"]\n\nlog = logging.getLogger(__name__)\n\nLOG_CONFIG: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"basic\": {\"format\": \"%(asctime)s %(name)s %(levelname)s %(message)s\"}\n },\n \"handlers\": {\n \"stream_handler\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"basic\",\n \"level\": logging.INFO,\n },\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"stream_handler\"],\n \"level\": logging.DEBUG,\n },\n },\n}\n\n\ndef main() -> None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description=\"Motion planning script.\")\n parser.add_argument(\n \"--params-file-path\",\n \"-p\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_params.json\"),\n help=\"the parameter file path\",\n )\n parser.add_argument(\n \"--debug\",\n \"-d\",\n type=bool,\n required=False,\n default=False,\n help=\"set logging level to debug\",\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_output.json\"),\n help=\"the output file path\",\n )\n parser.add_argument(\n \"--blend-log\",\n \"-b\",\n choices=[\"last\", \"all\"],\n required=False,\n default=\"last\",\n help=\"output the last list or all of the blend log\",\n )\n args = parser.parse_args()\n\n if args.debug:\n LOG_CONFIG[\"handlers\"][\"stream_handler\"][\"level\"] = logging.DEBUG\n LOG_CONFIG[\"loggers\"][\"\"][\"level\"] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n\n with open(args.params_file_path, \"r\") as f:\n params = json.load(f)\n\n constraints: SystemConstraints[str] = {\n axis: AxisConstraints.build(**params[\"constraints\"][axis])\n for axis in AXIS_NAMES\n }\n origin_from_file: List[float] = cast(List[float], params[\"origin\"])\n origin: Coordinates[str, np.float64] = dict(\n zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))\n )\n target_list = [\n MoveTarget.build(\n dict(zip(AXIS_NAMES, target[\"coordinates\"])), target[\"max_speed\"]\n )\n for target in params[\"target_list\"]\n ]\n\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(\n origin=origin,\n target_list=target_list,\n iteration_limit=params[\"iteration_limit\"],\n )\n\n output = {\n \"moves\": [v.to_dict() for v in blend_log[-1]],\n \"origin\": list(vectorize(origin)),\n }\n\n def myconverter(obj: Any) -> Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n\n with open(args.output, \"w\") as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-27 21:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('regions', '0002_auto_20171024_1707'),
]
operations = [
migrations.AlterField(
model_name='region',
name='email',
field=models.EmailField(max_length=100, null=True, verbose_name='email'),
),
migrations.AlterField(
model_name='region',
name='governor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='public_servants.PublicServant', verbose_name='governor'),
),
migrations.AlterField(
model_name='region',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, verbose_name='phone'),
),
migrations.AlterField(
model_name='region',
name='twitter',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
normal
|
{
"blob_id": "1330addd53c6187a41dfea6957bf47aaecca1135",
"index": 7180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('regions', '0002_auto_20171024_1707')]\n operations = [migrations.AlterField(model_name='region', name='email',\n field=models.EmailField(max_length=100, null=True, verbose_name=\n 'email')), migrations.AlterField(model_name='region', name=\n 'governor', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.CASCADE, to='public_servants.PublicServant',\n verbose_name='governor')), migrations.AlterField(model_name=\n 'region', name='phone', field=phonenumber_field.modelfields.\n PhoneNumberField(max_length=128, null=True, verbose_name='phone')),\n migrations.AlterField(model_name='region', name='twitter', field=\n models.CharField(blank=True, max_length=50, null=True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('regions', '0002_auto_20171024_1707')]\n operations = [migrations.AlterField(model_name='region', name='email',\n field=models.EmailField(max_length=100, null=True, verbose_name=\n 'email')), migrations.AlterField(model_name='region', name=\n 'governor', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.CASCADE, to='public_servants.PublicServant',\n verbose_name='governor')), migrations.AlterField(model_name=\n 'region', name='phone', field=phonenumber_field.modelfields.\n PhoneNumberField(max_length=128, null=True, verbose_name='phone')),\n migrations.AlterField(model_name='region', name='twitter', field=\n models.CharField(blank=True, max_length=50, null=True))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-10-27 21:59\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('regions', '0002_auto_20171024_1707'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='region',\n name='email',\n field=models.EmailField(max_length=100, null=True, verbose_name='email'),\n ),\n migrations.AlterField(\n model_name='region',\n name='governor',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='public_servants.PublicServant', verbose_name='governor'),\n ),\n migrations.AlterField(\n model_name='region',\n name='phone',\n field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, verbose_name='phone'),\n ),\n migrations.AlterField(\n model_name='region',\n name='twitter',\n field=models.CharField(blank=True, max_length=50, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from .models import File, Sample, Plate, Well, Machine, Project
class MachineForm(forms.ModelForm):
class Meta:
model = Machine
fields = ['name', 'author', 'status', 'comments']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'author', 'collaborators', 'status', 'comments']
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['name', 'script', 'author', 'file']
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project',
'author', 'sequence', 'length', 'genbank', 'source_reference',
'comments', 'parent_id', 'organism', 'genus_specie', 'marker',
'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type',
'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',
'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = ['name', 'barcode', 'type', 'contents', 'location',
'num_cols', 'num_rows', 'num_well', 'function', 'project',
'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples',
'active', 'status']
|
normal
|
{
"blob_id": "5bb894feaf9293bf70b3f831e33be555f74efde8",
"index": 6901,
"step-1": "<mask token>\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-2": "<mask token>\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-3": "<mask token>\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-4": "from django import forms\nfrom .models import File, Sample, Plate, Well, Machine, Project\n\n\nclass MachineForm(forms.ModelForm):\n\n\n class Meta:\n model = Machine\n fields = ['name', 'author', 'status', 'comments']\n\n\nclass ProjectForm(forms.ModelForm):\n\n\n class Meta:\n model = Project\n fields = ['name', 'author', 'collaborators', 'status', 'comments']\n\n\nclass FileForm(forms.ModelForm):\n\n\n class Meta:\n model = File\n fields = ['name', 'script', 'author', 'file']\n\n\nclass SampleForm(forms.ModelForm):\n\n\n class Meta:\n model = Sample\n fields = ['name', 'alias', 'sample_type', 'description', 'project',\n 'author', 'sequence', 'length', 'genbank', 'source_reference',\n 'comments', 'parent_id', 'organism', 'genus_specie', 'marker',\n 'application', 'strategy', 'seq_verified', 'origin_rep',\n 'cloning_system', 'strand', 'order_number', 'part_type',\n 'moclo_type', 'sub_sample_id', 'primer_id', 'end', 'direction',\n 'tm']\n\n\nclass PlateForm(forms.ModelForm):\n\n\n class Meta:\n model = Plate\n fields = ['name', 'barcode', 'type', 'contents', 'location',\n 'num_cols', 'num_rows', 'num_well', 'function', 'project',\n 'active', 'status']\n\n\nclass WellForm(forms.ModelForm):\n\n\n class Meta:\n model = Well\n fields = ['name', 'volume', 'concentration', 'plate', 'samples',\n 'active', 'status']\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
#Sample Python Code
print("Different Code!!!")
#print("Hello World!")
|
normal
|
{
"blob_id": "1e24952006afebb7bf10a83077fc4effd5cc9c58",
"index": 1301,
"step-1": "<mask token>\n",
"step-2": "print('Different Code!!!')\n",
"step-3": "#Sample Python Code\nprint(\"Different Code!!!\")\n#print(\"Hello World!\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import uvicore
from uvicore.support import module
from uvicore.typing import Dict, List
from uvicore.support.dumper import dump, dd
from uvicore.contracts import Email
@uvicore.service()
class Mail:
def __init__(self, *,
mailer: str = None,
mailer_options: Dict = None,
to: List = [],
cc: List = [],
bcc: List = [],
from_name: str = None,
from_address: str = None,
subject: str = None,
html: str = None,
text: str = None,
attachments: List = [],
) -> None:
# Get mailer and options from config
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone().merge(mailer_options)
# New message superdict
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
def mailer_options(self, options: Dict):
self._mailer_options.merge(Dict(options))
return self
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
def bcc(self, bcc: List):
self._message.bcc = bcc
return self
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
def from_address(self, from_address: str):
self._message.from_address = from_address
return self
def subject(self, subject: str):
self._message.subject = subject
return self
def html(self, html: str):
self._message.html = html
return self
def text(self, text: str):
self._message.text = text
return self
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
# Use dynamic module based on mailer driver
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
|
normal
|
{
"blob_id": "c87ede0e3c6d4cc305450f68b4cf61fb63986760",
"index": 8676,
"step-1": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n <mask token>\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-2": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n <mask token>\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-3": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-4": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n\n def mailer_options(self, options: Dict):\n self._mailer_options.merge(Dict(options))\n return self\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-5": "import uvicore\nfrom uvicore.support import module\nfrom uvicore.typing import Dict, List\nfrom uvicore.support.dumper import dump, dd\nfrom uvicore.contracts import Email\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *,\n mailer: str = None,\n mailer_options: Dict = None,\n to: List = [],\n cc: List = [],\n bcc: List = [],\n from_name: str = None,\n from_address: str = None,\n subject: str = None,\n html: str = None,\n text: str = None,\n attachments: List = [],\n ) -> None:\n # Get mailer and options from config\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone().merge(mailer_options)\n\n # New message superdict\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n\n def mailer_options(self, options: Dict):\n self._mailer_options.merge(Dict(options))\n return self\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n\n def from_address(self, from_address: str):\n self._message.from_address = from_address\n return self\n\n def subject(self, subject: str):\n self._message.subject = subject\n return self\n\n def html(self, html: str):\n self._message.html = html\n return self\n\n def text(self, text: str):\n self._message.text = text\n return self\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n # Use dynamic module based on mailer driver\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-ids": [
6,
8,
9,
10,
15
]
}
|
[
6,
8,
9,
10,
15
] |
#!/usr/bin/env python
from anytree import Node, RenderTree
webtest = Node("WebappTest")
registration = Node("Registration", parent=webtest)
smsconfirm = Node("SMSconfirm", parent=registration)
login = Node("Login", parent=smsconfirm)
useruploadCV = Node("UserUploadCV", parent=login)
usermatchJD = Node("UserMatchJD", parent=useruploadCV)
bemember = Node("BeMember", parent=login)
addprj = Node("AddProject", parent=bemember)
memuploadCV = Node("MemberUploadCV", parent=addprj)
memupfollowupCV = Node("MemberFollowupCV", parent=memuploadCV)
previewCV = Node("PreviewCV", parent=memuploadCV)
addbid = Node("AddBidding", parent=addprj)
modbid = Node("ModifyBidding", parent=addbid)
addcus = Node("AddCustomer", parent=addbid)
addJD = Node("AddJD", parent=addcus)
JDmatchCV = Node("JDmatchCV", parent=addJD)
JDmatchCVMultiDB = Node("JDmatchCVMultiDB", parent=JDmatchCV)
previewMatchCV = Node("previewMatchCV", parent=JDmatchCVMultiDB)
CVraderChart = Node("CVraderChart", parent=JDmatchCVMultiDB)
from anytree.exporter import DotExporter
DotExporter(webtest).to_picture("webtest.png")
|
normal
|
{
"blob_id": "33ac328b2bf16380b50c58013bd0d4d888dc3952",
"index": 4693,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-3": "<mask token>\nwebtest = Node('WebappTest')\nregistration = Node('Registration', parent=webtest)\nsmsconfirm = Node('SMSconfirm', parent=registration)\nlogin = Node('Login', parent=smsconfirm)\nuseruploadCV = Node('UserUploadCV', parent=login)\nusermatchJD = Node('UserMatchJD', parent=useruploadCV)\nbemember = Node('BeMember', parent=login)\naddprj = Node('AddProject', parent=bemember)\nmemuploadCV = Node('MemberUploadCV', parent=addprj)\nmemupfollowupCV = Node('MemberFollowupCV', parent=memuploadCV)\npreviewCV = Node('PreviewCV', parent=memuploadCV)\naddbid = Node('AddBidding', parent=addprj)\nmodbid = Node('ModifyBidding', parent=addbid)\naddcus = Node('AddCustomer', parent=addbid)\naddJD = Node('AddJD', parent=addcus)\nJDmatchCV = Node('JDmatchCV', parent=addJD)\nJDmatchCVMultiDB = Node('JDmatchCVMultiDB', parent=JDmatchCV)\npreviewMatchCV = Node('previewMatchCV', parent=JDmatchCVMultiDB)\nCVraderChart = Node('CVraderChart', parent=JDmatchCVMultiDB)\n<mask token>\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-4": "from anytree import Node, RenderTree\nwebtest = Node('WebappTest')\nregistration = Node('Registration', parent=webtest)\nsmsconfirm = Node('SMSconfirm', parent=registration)\nlogin = Node('Login', parent=smsconfirm)\nuseruploadCV = Node('UserUploadCV', parent=login)\nusermatchJD = Node('UserMatchJD', parent=useruploadCV)\nbemember = Node('BeMember', parent=login)\naddprj = Node('AddProject', parent=bemember)\nmemuploadCV = Node('MemberUploadCV', parent=addprj)\nmemupfollowupCV = Node('MemberFollowupCV', parent=memuploadCV)\npreviewCV = Node('PreviewCV', parent=memuploadCV)\naddbid = Node('AddBidding', parent=addprj)\nmodbid = Node('ModifyBidding', parent=addbid)\naddcus = Node('AddCustomer', parent=addbid)\naddJD = Node('AddJD', parent=addcus)\nJDmatchCV = Node('JDmatchCV', parent=addJD)\nJDmatchCVMultiDB = Node('JDmatchCVMultiDB', parent=JDmatchCV)\npreviewMatchCV = Node('previewMatchCV', parent=JDmatchCVMultiDB)\nCVraderChart = Node('CVraderChart', parent=JDmatchCVMultiDB)\nfrom anytree.exporter import DotExporter\nDotExporter(webtest).to_picture('webtest.png')\n",
"step-5": "#!/usr/bin/env python\n\nfrom anytree import Node, RenderTree\n\n\nwebtest = Node(\"WebappTest\")\nregistration = Node(\"Registration\", parent=webtest)\nsmsconfirm = Node(\"SMSconfirm\", parent=registration)\nlogin = Node(\"Login\", parent=smsconfirm)\nuseruploadCV = Node(\"UserUploadCV\", parent=login)\nusermatchJD = Node(\"UserMatchJD\", parent=useruploadCV)\nbemember = Node(\"BeMember\", parent=login)\naddprj = Node(\"AddProject\", parent=bemember)\nmemuploadCV = Node(\"MemberUploadCV\", parent=addprj)\nmemupfollowupCV = Node(\"MemberFollowupCV\", parent=memuploadCV)\npreviewCV = Node(\"PreviewCV\", parent=memuploadCV)\naddbid = Node(\"AddBidding\", parent=addprj)\nmodbid = Node(\"ModifyBidding\", parent=addbid)\naddcus = Node(\"AddCustomer\", parent=addbid)\naddJD = Node(\"AddJD\", parent=addcus)\nJDmatchCV = Node(\"JDmatchCV\", parent=addJD)\nJDmatchCVMultiDB = Node(\"JDmatchCVMultiDB\", parent=JDmatchCV)\npreviewMatchCV = Node(\"previewMatchCV\", parent=JDmatchCVMultiDB)\nCVraderChart = Node(\"CVraderChart\", parent=JDmatchCVMultiDB)\n\n\nfrom anytree.exporter import DotExporter\nDotExporter(webtest).to_picture(\"webtest.png\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from .models import Invite
class InviteAdmin(admin.ModelAdmin):
list_display = ('invitee', 'inviter', 'created_on', 'approved',
'rejected', 'used')
admin.site.register(Invite, InviteAdmin)
|
normal
|
{
"blob_id": "fcb13b087b9c967ab16b64885411cc4aae98583c",
"index": 2130,
"step-1": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\nadmin.site.register(Invite, InviteAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Invite\n\n\nclass InviteAdmin(admin.ModelAdmin):\n list_display = ('invitee', 'inviter', 'created_on', 'approved',\n 'rejected', 'used')\n\n\nadmin.site.register(Invite, InviteAdmin)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from operator import itemgetter
import math
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5*(freq_term/max_freq)
idf = math.log(1 + (n_docs/len(posting_list)))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term],2)
cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)
simularity = context_simularity*referens_simularity
if(simularity != 0):
ranked_documents.append((document,simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)
return ranked_documents
|
normal
|
{
"blob_id": "39197b3f9f85d94457584d7e488ca376e52207f1",
"index": 5832,
"step-1": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-3": "<mask token>\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-4": "from operator import itemgetter\nimport math\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-5": "from operator import itemgetter\nimport math\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n \n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map: \n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list]) \n tf = 0.5 + 0.5*(freq_term/max_freq)\n idf = math.log(1 + (n_docs/len(posting_list)))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n\n return tf_idf_map\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term],2)\n cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))\n return cosinus_similarity \n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):\n ranked_documents = [] \n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)\n simularity = context_simularity*referens_simularity\n if(simularity != 0):\n ranked_documents.append((document,simularity)) \n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)\n return ranked_documents",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
print("Hello world! im in github")
|
normal
|
{
"blob_id": "2db6f88b733c23063803c374d7a5b651e8443bd5",
"index": 6135,
"step-1": "<mask token>\n",
"step-2": "print('Hello world! im in github')\n",
"step-3": "print(\"Hello world! im in github\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.db import models
from django.utils import timezone
# Create your models here.
class URL(models.Model):
label = models.CharField(null=True, blank=True, max_length=30)
address = models.URLField()
slug = models.SlugField(unique=True, max_length=8)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.label
|
normal
|
{
"blob_id": "2dcb02ea2f36dd31eda13c1d666201f861c117e7",
"index": 4027,
"step-1": "<mask token>\n\n\nclass URL(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass URL(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.label\n",
"step-3": "<mask token>\n\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\n\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def non_dupulicates_lette(word):
text = list(word);
print(text)
i=0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d= dict()
for c in word:
if c not in d:
d[c]=1
else:
d[c]+=1
for k in d:
if d[k]==1:
print(k)
else:
print(k,d[k])
return d
#count=0
#othercount=1
#sizeword=len(word)-1
#while count<sizeword:
#letter=word[count]
#while othercount<sizeword:
#if letter == word[othercount]:
#return True
#othercount= othercount+1
#count+=1
#return False
A='bccata'#['a','b','b','c']
non_dupulicates_lette(A)
#result=has_dupulicates(A)
#print(result)
|
normal
|
{
"blob_id": "8cd234c2ec1b36abd992cc1a46147376cc241ede",
"index": 3276,
"step-1": "<mask token>\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-2": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-3": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\nnon_dupulicates_lette(A)\n",
"step-4": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\nA = 'bccata'\nnon_dupulicates_lette(A)\n",
"step-5": "def non_dupulicates_lette(word):\n text = list(word);\n print(text)\n i=0\n for i in range(len(text)):\n for k in text:\n print(c)\n \ndef has_dupulicates(word):\n d= dict()\n for c in word:\n if c not in d:\n d[c]=1\n \n else:\n d[c]+=1\n\n\n for k in d:\n if d[k]==1:\n print(k)\n \n else:\n print(k,d[k])\n \n \n \n return d\n #count=0\n #othercount=1\n #sizeword=len(word)-1\n #while count<sizeword:\n #letter=word[count]\n #while othercount<sizeword:\n #if letter == word[othercount]:\n #return True\n #othercount= othercount+1\n\n #count+=1\n\n\n #return False\nA='bccata'#['a','b','b','c']\nnon_dupulicates_lette(A)\n#result=has_dupulicates(A)\n#print(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
import sys, io,re
import regex
from collections import defaultdict
import datetime
import json
def update_key(data_base, url,kkey):
keys_saved = regex.get_data('<key>\s(.+?)\s<',data_base[url]['key'])
if kkey not in keys_saved:
data_base[url]['key'] = data_base[url]['key'][:-1]
data_base[url]['key'] += ' <key> ' + kkey + ' <\key>\n'
return True
return False
def check_date(data_base,key_word):
date = 0
for url in data_base:
for key in data_base[url]:
if key_word == key:
try:
d = int(re.sub(r'-', '', data_base[url][key]))
if date < d:
date = d
except ValueError:
continue
if date != 0:
date = str(date)
year = int(date[0:4])
if date[4] != '0':
month = int(date[4:6])
elif date[4] == '0':
month = int(date[5])
if date[6] != '0':
day = int(date[6:8])
elif date[6] == '0':
day = int(date[7])
date = (datetime.date(year, month, day) - datetime.timedelta(1)).isoformat()
return int(re.sub(r'-', '', date))
else:
return 0
def load_keywords_info():
try:
with open('keywords.json', 'r') as fp:
data = json.load(fp)
return data
except json.decoder.JSONDecodeError:
return defaultdict(str)
def save_keywords_info(data):
with open('keywords.json', 'w') as fp:
json.dump(data, fp)
def load_url_info():
try:
with open('urls.json', 'r') as fp:
data = json.load(fp)
return data
except json.decoder.JSONDecodeError:
return defaultdict(list)
def save_url_info(data):
with open('urls.json', 'w') as fp:
json.dump(data, fp)
def load_previous(data_base):
previous = []
try:
file = open("news.bank","r",encoding='utf8');
for line in file:
previous.append(line)
i = 0
while i < len(previous):
url = regex.get_data('>\s(.+?)\s<',previous[i+4])[0]
key = regex.get_data('>\s(.+?)\s<',previous[i+1])[0]
#date = regex.get_data('>\s(.+?)\s<',previous[i+5])[0]
data_base[key].append(url)
#data_base[url][key] = date
#data_base[url] = defaultdict(str)
#data_base[id]['id'] = previous[i]
#data_base[key]['key'] = previous[i]
#data_base[url]['title'] = previous[i+1]
#data_base[url]['source'] = previous[i+2]
#data_base[url]['url'] = previous[i+3]
#data_base[url]['date'] = previous[i+4]
#data_base[url]['author'] = previous[i+5]
#data_base[url]['content1'] = previous[i+6]
#data_base[url]['content2'] = previous[i+7]
i += 10
except FileNotFoundError:
pass
def check_last_update(url,date):
count = 0
for u in url:
d = regex.get_data('\S+\/(\d+\/\d+\/\d+)\S+',u)[0]
d = int(re.sub(r'/', '', d))
if d < date:
return count
count += 1
return -1
def MinEditDist(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
|
normal
|
{
"blob_id": "50a5d3431693b402c15b557357eaf9a85fc02b0b",
"index": 2921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef update_key(data_base, url, kkey):\n keys_saved = regex.get_data('<key>\\\\s(.+?)\\\\s<', data_base[url]['key'])\n if kkey not in keys_saved:\n data_base[url]['key'] = data_base[url]['key'][:-1]\n data_base[url]['key'] += ' <key> ' + kkey + ' <\\\\key>\\n'\n return True\n return False\n\n\ndef check_date(data_base, key_word):\n date = 0\n for url in data_base:\n for key in data_base[url]:\n if key_word == key:\n try:\n d = int(re.sub('-', '', data_base[url][key]))\n if date < d:\n date = d\n except ValueError:\n continue\n if date != 0:\n date = str(date)\n year = int(date[0:4])\n if date[4] != '0':\n month = int(date[4:6])\n elif date[4] == '0':\n month = int(date[5])\n if date[6] != '0':\n day = int(date[6:8])\n elif date[6] == '0':\n day = int(date[7])\n date = (datetime.date(year, month, day) - datetime.timedelta(1)\n ).isoformat()\n return int(re.sub('-', '', date))\n else:\n return 0\n\n\ndef load_keywords_info():\n try:\n with open('keywords.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(str)\n\n\n<mask token>\n\n\ndef load_url_info():\n try:\n with open('urls.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(list)\n\n\ndef save_url_info(data):\n with open('urls.json', 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_previous(data_base):\n previous = []\n try:\n file = open('news.bank', 'r', encoding='utf8')\n for line in file:\n previous.append(line)\n i = 0\n while i < len(previous):\n url = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 4])[0]\n key = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 1])[0]\n data_base[key].append(url)\n i += 10\n except FileNotFoundError:\n pass\n\n\n<mask token>\n\n\ndef MinEditDist(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1],\n distances_[-1])))\n distances = distances_\n return distances[-1]\n",
"step-3": "<mask token>\n\n\ndef update_key(data_base, url, kkey):\n keys_saved = regex.get_data('<key>\\\\s(.+?)\\\\s<', data_base[url]['key'])\n if kkey not in keys_saved:\n data_base[url]['key'] = data_base[url]['key'][:-1]\n data_base[url]['key'] += ' <key> ' + kkey + ' <\\\\key>\\n'\n return True\n return False\n\n\ndef check_date(data_base, key_word):\n date = 0\n for url in data_base:\n for key in data_base[url]:\n if key_word == key:\n try:\n d = int(re.sub('-', '', data_base[url][key]))\n if date < d:\n date = d\n except ValueError:\n continue\n if date != 0:\n date = str(date)\n year = int(date[0:4])\n if date[4] != '0':\n month = int(date[4:6])\n elif date[4] == '0':\n month = int(date[5])\n if date[6] != '0':\n day = int(date[6:8])\n elif date[6] == '0':\n day = int(date[7])\n date = (datetime.date(year, month, day) - datetime.timedelta(1)\n ).isoformat()\n return int(re.sub('-', '', date))\n else:\n return 0\n\n\ndef load_keywords_info():\n try:\n with open('keywords.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(str)\n\n\ndef save_keywords_info(data):\n with open('keywords.json', 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_url_info():\n try:\n with open('urls.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(list)\n\n\ndef save_url_info(data):\n with open('urls.json', 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_previous(data_base):\n previous = []\n try:\n file = open('news.bank', 'r', encoding='utf8')\n for line in file:\n previous.append(line)\n i = 0\n while i < len(previous):\n url = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 4])[0]\n key = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 1])[0]\n data_base[key].append(url)\n i += 10\n except FileNotFoundError:\n pass\n\n\n<mask token>\n\n\ndef MinEditDist(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1],\n distances_[-1])))\n distances = distances_\n return distances[-1]\n",
"step-4": "<mask token>\n\n\ndef update_key(data_base, url, kkey):\n keys_saved = regex.get_data('<key>\\\\s(.+?)\\\\s<', data_base[url]['key'])\n if kkey not in keys_saved:\n data_base[url]['key'] = data_base[url]['key'][:-1]\n data_base[url]['key'] += ' <key> ' + kkey + ' <\\\\key>\\n'\n return True\n return False\n\n\ndef check_date(data_base, key_word):\n date = 0\n for url in data_base:\n for key in data_base[url]:\n if key_word == key:\n try:\n d = int(re.sub('-', '', data_base[url][key]))\n if date < d:\n date = d\n except ValueError:\n continue\n if date != 0:\n date = str(date)\n year = int(date[0:4])\n if date[4] != '0':\n month = int(date[4:6])\n elif date[4] == '0':\n month = int(date[5])\n if date[6] != '0':\n day = int(date[6:8])\n elif date[6] == '0':\n day = int(date[7])\n date = (datetime.date(year, month, day) - datetime.timedelta(1)\n ).isoformat()\n return int(re.sub('-', '', date))\n else:\n return 0\n\n\ndef load_keywords_info():\n try:\n with open('keywords.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(str)\n\n\ndef save_keywords_info(data):\n with open('keywords.json', 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_url_info():\n try:\n with open('urls.json', 'r') as fp:\n data = json.load(fp)\n return data\n except json.decoder.JSONDecodeError:\n return defaultdict(list)\n\n\ndef save_url_info(data):\n with open('urls.json', 'w') as fp:\n json.dump(data, fp)\n\n\ndef load_previous(data_base):\n previous = []\n try:\n file = open('news.bank', 'r', encoding='utf8')\n for line in file:\n previous.append(line)\n i = 0\n while i < len(previous):\n url = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 4])[0]\n key = regex.get_data('>\\\\s(.+?)\\\\s<', previous[i + 1])[0]\n data_base[key].append(url)\n i += 10\n except FileNotFoundError:\n pass\n\n\ndef check_last_update(url, date):\n count = 0\n for u in url:\n d = regex.get_data('\\\\S+\\\\/(\\\\d+\\\\/\\\\d+\\\\/\\\\d+)\\\\S+', u)[0]\n d = int(re.sub('/', '', d))\n if d < date:\n return count\n count += 1\n return -1\n\n\ndef MinEditDist(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1],\n distances_[-1])))\n distances = distances_\n return distances[-1]\n",
"step-5": "# -*- coding: utf-8 -*-\r\nimport sys, io,re\r\nimport regex\r\nfrom collections import defaultdict\r\nimport datetime\r\nimport json\r\n\r\n\r\ndef update_key(data_base, url,kkey):\r\n keys_saved = regex.get_data('<key>\\s(.+?)\\s<',data_base[url]['key'])\r\n\r\n if kkey not in keys_saved:\r\n data_base[url]['key'] = data_base[url]['key'][:-1]\r\n data_base[url]['key'] += ' <key> ' + kkey + ' <\\key>\\n'\r\n return True\r\n\r\n return False\r\n\r\ndef check_date(data_base,key_word):\r\n date = 0\r\n\r\n for url in data_base:\r\n for key in data_base[url]:\r\n if key_word == key:\r\n try:\r\n d = int(re.sub(r'-', '', data_base[url][key]))\r\n if date < d:\r\n date = d\r\n except ValueError:\r\n continue\r\n\r\n\r\n if date != 0:\r\n date = str(date)\r\n year = int(date[0:4])\r\n if date[4] != '0':\r\n month = int(date[4:6])\r\n elif date[4] == '0':\r\n month = int(date[5])\r\n if date[6] != '0':\r\n day = int(date[6:8])\r\n elif date[6] == '0':\r\n day = int(date[7])\r\n\r\n\r\n\r\n date = (datetime.date(year, month, day) - datetime.timedelta(1)).isoformat()\r\n return int(re.sub(r'-', '', date))\r\n else:\r\n return 0\r\n \r\ndef load_keywords_info():\r\n try:\r\n with open('keywords.json', 'r') as fp:\r\n data = json.load(fp)\r\n return data\r\n except json.decoder.JSONDecodeError:\r\n return defaultdict(str)\r\n\r\n\r\ndef save_keywords_info(data):\r\n with open('keywords.json', 'w') as fp:\r\n json.dump(data, fp)\r\n\r\n\r\ndef load_url_info():\r\n try:\r\n with open('urls.json', 'r') as fp:\r\n data = json.load(fp)\r\n return data\r\n except json.decoder.JSONDecodeError:\r\n return defaultdict(list)\r\n\r\n\r\ndef save_url_info(data):\r\n with open('urls.json', 'w') as fp:\r\n json.dump(data, fp)\r\n\r\ndef load_previous(data_base):\r\n previous = []\r\n try:\r\n file = open(\"news.bank\",\"r\",encoding='utf8');\r\n for line in file:\r\n previous.append(line)\r\n\r\n\r\n \r\n i = 0\r\n while i < len(previous):\r\n\r\n url = regex.get_data('>\\s(.+?)\\s<',previous[i+4])[0]\r\n key = regex.get_data('>\\s(.+?)\\s<',previous[i+1])[0] \r\n #date = regex.get_data('>\\s(.+?)\\s<',previous[i+5])[0] \r\n\r\n data_base[key].append(url)\r\n\r\n #data_base[url][key] = date\r\n #data_base[url] = defaultdict(str)\r\n #data_base[id]['id'] = previous[i]\r\n #data_base[key]['key'] = previous[i]\r\n #data_base[url]['title'] = previous[i+1]\r\n #data_base[url]['source'] = previous[i+2]\r\n #data_base[url]['url'] = previous[i+3]\r\n #data_base[url]['date'] = previous[i+4]\r\n #data_base[url]['author'] = previous[i+5]\r\n #data_base[url]['content1'] = previous[i+6]\r\n #data_base[url]['content2'] = previous[i+7]\r\n\r\n i += 10\r\n\r\n\r\n except FileNotFoundError:\r\n pass\r\n\r\ndef check_last_update(url,date):\r\n count = 0\r\n for u in url:\r\n d = regex.get_data('\\S+\\/(\\d+\\/\\d+\\/\\d+)\\S+',u)[0]\r\n d = int(re.sub(r'/', '', d))\r\n if d < date:\r\n return count\r\n\r\n count += 1\r\n\r\n return -1\r\n\r\n\r\ndef MinEditDist(s1, s2):\r\n if len(s1) > len(s2):\r\n s1, s2 = s2, s1\r\n\r\n distances = range(len(s1) + 1)\r\n for i2, c2 in enumerate(s2):\r\n distances_ = [i2+1]\r\n for i1, c1 in enumerate(s1):\r\n if c1 == c2:\r\n distances_.append(distances[i1])\r\n else:\r\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\r\n distances = distances_\r\n return distances[-1]\r\n",
"step-ids": [
0,
7,
8,
9,
11
]
}
|
[
0,
7,
8,
9,
11
] |
import csv
import us
from flask import abort, Flask, request, render_template
app = Flask(__name__) # pylint: disable=invalid-name
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None),
'fips': request.args.get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
|
normal
|
{
"blob_id": "af00c6f443426b1f61e1816d7d14ebc7e6871a82",
"index": 5562,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/api')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\[email protected]('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n<mask token>\n\n\[email protected]('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\[email protected]('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\[email protected]('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n<mask token>\n\n\[email protected]('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\[email protected]('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\n<mask token>\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\n<mask token>\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/api')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\[email protected]('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\[email protected]('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\[email protected]('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\[email protected]('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\[email protected]('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\[email protected]('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/api')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\[email protected]('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\[email protected]('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\[email protected]('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\[email protected]('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\[email protected]('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\[email protected]('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-4": "import csv\nimport us\nfrom flask import abort, Flask, request, render_template\napp = Flask(__name__)\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/api')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\[email protected]('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\[email protected]('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\[email protected]('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\[email protected]('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\[email protected]('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\[email protected]('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\[email protected]('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\[email protected]('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-5": "import csv\nimport us\n\nfrom flask import abort, Flask, request, render_template\n\napp = Flask(__name__) # pylint: disable=invalid-name\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/api')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\[email protected]('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\[email protected]('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\[email protected]('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\[email protected]('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\[email protected]('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\[email protected]('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\[email protected]('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\[email protected]('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\[email protected]('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None),\n 'fips': request.args.get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-ids": [
34,
39,
40,
41,
42
]
}
|
[
34,
39,
40,
41,
42
] |
from calc1 import LispTranslator, RPNTranslator, Parser, Lexer
import unittest
class TestTranslators(unittest.TestCase):
def init_rpn(self, program):
return RPNTranslator(Parser(Lexer(program)))
def init_lisp(self, program):
return LispTranslator(Parser(Lexer(program)))
def test_simple_rpn(self):
self.assertEqual(self.init_rpn('2 + 3').interpret(), '2 3 +')
self.assertEqual(self.init_rpn('2 + 3 + 5').interpret(), '2 3 + 5 +')
self.assertEqual(self.init_rpn('2 + 3 * 5').interpret(), '2 3 5 * +')
self.assertEqual(self.init_rpn('(2 + 3) * 5').interpret(), '2 3 + 5 *')
def test_simple_lisp(self):
self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')
self.assertEqual(self.init_lisp('2 + 3 + 5').interpret(),
'(+ (+ 2 3) 5)')
self.assertEqual(self.init_lisp('2 + 3 * 5').interpret(),
'(+ 2 (* 3 5))')
self.assertEqual(self.init_lisp('(2 + 3) * 5').interpret(),
'(* (+ 2 3) 5)')
def test_examples_chapter_seven(self):
self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(),
'5 3 + 12 * 3 DIV')
self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')
self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(),
'(+ 2 (* 3 5))')
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "d0e957abfe5646fb84aed69902f2382d554dc825",
"index": 4401,
"step-1": "<mask token>\n\n\nclass TestTranslators(unittest.TestCase):\n <mask token>\n\n def init_lisp(self, program):\n return LispTranslator(Parser(Lexer(program)))\n <mask token>\n <mask token>\n\n def test_examples_chapter_seven(self):\n self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(),\n '5 3 + 12 * 3 DIV')\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(),\n '(+ 2 (* 3 5))')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTranslators(unittest.TestCase):\n <mask token>\n\n def init_lisp(self, program):\n return LispTranslator(Parser(Lexer(program)))\n\n def test_simple_rpn(self):\n self.assertEqual(self.init_rpn('2 + 3').interpret(), '2 3 +')\n self.assertEqual(self.init_rpn('2 + 3 + 5').interpret(), '2 3 + 5 +')\n self.assertEqual(self.init_rpn('2 + 3 * 5').interpret(), '2 3 5 * +')\n self.assertEqual(self.init_rpn('(2 + 3) * 5').interpret(), '2 3 + 5 *')\n\n def test_simple_lisp(self):\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('2 + 3 + 5').interpret(),\n '(+ (+ 2 3) 5)')\n self.assertEqual(self.init_lisp('2 + 3 * 5').interpret(),\n '(+ 2 (* 3 5))')\n self.assertEqual(self.init_lisp('(2 + 3) * 5').interpret(),\n '(* (+ 2 3) 5)')\n\n def test_examples_chapter_seven(self):\n self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(),\n '5 3 + 12 * 3 DIV')\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(),\n '(+ 2 (* 3 5))')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTranslators(unittest.TestCase):\n\n def init_rpn(self, program):\n return RPNTranslator(Parser(Lexer(program)))\n\n def init_lisp(self, program):\n return LispTranslator(Parser(Lexer(program)))\n\n def test_simple_rpn(self):\n self.assertEqual(self.init_rpn('2 + 3').interpret(), '2 3 +')\n self.assertEqual(self.init_rpn('2 + 3 + 5').interpret(), '2 3 + 5 +')\n self.assertEqual(self.init_rpn('2 + 3 * 5').interpret(), '2 3 5 * +')\n self.assertEqual(self.init_rpn('(2 + 3) * 5').interpret(), '2 3 + 5 *')\n\n def test_simple_lisp(self):\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('2 + 3 + 5').interpret(),\n '(+ (+ 2 3) 5)')\n self.assertEqual(self.init_lisp('2 + 3 * 5').interpret(),\n '(+ 2 (* 3 5))')\n self.assertEqual(self.init_lisp('(2 + 3) * 5').interpret(),\n '(* (+ 2 3) 5)')\n\n def test_examples_chapter_seven(self):\n self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(),\n '5 3 + 12 * 3 DIV')\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(),\n '(+ 2 (* 3 5))')\n\n\n<mask token>\n",
"step-4": "from calc1 import LispTranslator, RPNTranslator, Parser, Lexer\nimport unittest\n\n\nclass TestTranslators(unittest.TestCase):\n\n def init_rpn(self, program):\n return RPNTranslator(Parser(Lexer(program)))\n\n def init_lisp(self, program):\n return LispTranslator(Parser(Lexer(program)))\n\n def test_simple_rpn(self):\n self.assertEqual(self.init_rpn('2 + 3').interpret(), '2 3 +')\n self.assertEqual(self.init_rpn('2 + 3 + 5').interpret(), '2 3 + 5 +')\n self.assertEqual(self.init_rpn('2 + 3 * 5').interpret(), '2 3 5 * +')\n self.assertEqual(self.init_rpn('(2 + 3) * 5').interpret(), '2 3 + 5 *')\n\n def test_simple_lisp(self):\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('2 + 3 + 5').interpret(),\n '(+ (+ 2 3) 5)')\n self.assertEqual(self.init_lisp('2 + 3 * 5').interpret(),\n '(+ 2 (* 3 5))')\n self.assertEqual(self.init_lisp('(2 + 3) * 5').interpret(),\n '(* (+ 2 3) 5)')\n\n def test_examples_chapter_seven(self):\n self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(),\n '5 3 + 12 * 3 DIV')\n self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')\n self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(),\n '(+ 2 (* 3 5))')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
5,
6,
8
]
}
|
[
3,
5,
6,
8
] |
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import xarray as xr
import metpy
from datetime import datetime
import datetime as dt
from metpy.units import units
import scipy.ndimage as ndimage
from metpy.plots import USCOUNTIES
import cartopy
from scipy.ndimage.filters import generic_filter as gf
def mkdir_p(mypath):
'''Creates a directory. equivalent to using mkdir -p on the command line'''
from errno import EEXIST
from os import makedirs,path
try:
makedirs(mypath)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(mypath):
pass
else: raise
startTime=datetime.now()
m_date='20200903'
m_hour='12'
year = startTime.year
if startTime.month <10:
month = '0'+str(startTime.month)
else:
month = str(startTime.month)
if startTime.day <10:
day = '0'+str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour <10:
hour = '0'+str(startTime.hour)
else:
hour = str(startTime.hour)
mdate = str(year)+str(month)+str(day)
def get_init_hr(hour):
if int(hour) <6:
init_hour = '00'
elif int(hour) <11:
init_hour = '06'
elif int(hour) <17:
init_hour = '12'
elif int(hour) <22:
init_hour = '18'
else:
init_hour = '00'
return(init_hour)
url = 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs'+mdate+'/gfs_0p25_1hr_'+get_init_hr(hour)+'z'
init_hour = get_init_hr(hour)
'''
for i in range(119):
fhr = i+1
'''
# Create new directory
output_dir = str(year)+str(month)+str(day)+'_'+str(init_hour)+'00'
mkdir_p(output_dir)
mkdir_p(output_dir+'/GFS')
#Parse data using MetPy
ds = xr.open_dataset(url)
init_hr = dt.datetime(int(year),int(month),int(day),int(init_hour))
times = ds['tmp2m'].metpy.time
init_time = ds['time'][0]
lats = np.arange(15,70,0.25)
lons = np.arange(220,330,0.25)
for i in range(1,120):
fc_hr = init_hr+dt.timedelta(hours=1*i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
#Rename variables to useful things
data = data.rename({
'absvprs':'avort',
'hgtprs':'gph',
'rhprs':'rh',
'tmpprs':'temp',
'ugrdprs':'u',
'vgrdprs': 'v',
})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0,lat=lats,lon=lons)
u5 = data['u'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
v5 = data['v'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
av5 = data['avort'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1e5
rh5 = data['rh'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5,-5,5)
########## SET UP FIGURE ##################################################
fig = plt.figure(figsize=(15,15))
ax1 = fig.add_subplot(111, projection = zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
#fig.suptitle("NAM Forecast valid at " + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=36)
########## PLOTTING #######################################################
h5c = ax1.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c = ax1.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c = ax1.contourf(x,y,av5,cmap='autumn_r',levels=range(10,60,2),alpha=0.8,extend='max')
a5cb = fig.colorbar(a5c, orientation = 'horizontal', aspect = 80, ax = ax1, pad = 0.01,
extendrect=False, ticks = range(10,61,5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax1.barbs(x[wind_slice],y[wind_slice],u5[wind_slice,wind_slice],v5[wind_slice,wind_slice], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax1.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax1.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax1.set_extent((265, 300, 25, 50))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vort_'+str(i)+'.png')
plt.clf()
plt.close()
########## PLOT 2 #######################################################
wind_slice_s = slice (10,-10,10)
fig2 = plt.figure(figsize=(15,15))
ax2 = fig2.add_subplot(111,projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax2.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax2.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax2, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax2.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax2.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax2.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax2.set_extent((225, 300, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortCONUS_v2_'+str(i)+'.png')
########## PLOT 3 #######################################################
wind_slice_s = slice (10,-10,10)
fig3 = plt.figure(figsize=(15,15))
ax3 = fig3.add_subplot(111,projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax3.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax3.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax3, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax3.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax3.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax3.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax3.set_extent((260, 320, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortC_ec_v1_'+str(i)+'.png')
fcst_hr = str(0)
print('Hour '+str(i)+' completed!')
plt.close()
timeelapsed = datetime.now()-startTime
print(timeelapsed)
'''
url= 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs20200903/gfs_0p25_1hr_12z'
ds = xr.open_dataset(url)
t2m_ds = ds['tmp2m']
init_hr = t2m_ds['time'][0].values
#fc_hr = t2m.ds['time'][i].values
lats = np.arange(20,50,0.25)
lons = np.arange(240,300,0.25)
t2m = t2m_ds.sel(time = init_hr, lat = lats, lon = lons)
print(t2m)
fig = plt.figure(figsize = (12,12))
fig.clf()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_extent((240,300, 20, 50), crs = ccrs.PlateCarree())
t2m_c = ax.contourf(t2m, cmap='RdPu')
plt.savefig('testingnomads6.png')
'''
|
normal
|
{
"blob_id": "8771f71a69f3afdc5de4d38db6efe61b553ae880",
"index": 9396,
"step-1": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\n<mask token>\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\n<mask token>\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\n<mask token>\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\n<mask token>\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\n<mask token>\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\nstartTime = datetime.now()\nm_date = '20200903'\nm_hour = '12'\nyear = startTime.year\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\nmdate = str(year) + str(month) + str(day)\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\nurl = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +\n '/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')\ninit_hour = get_init_hr(hour)\n<mask token>\noutput_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\nds = xr.open_dataset(url)\ninit_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))\ntimes = ds['tmp2m'].metpy.time\ninit_time = ds['time'][0]\nlats = np.arange(15, 70, 0.25)\nlons = np.arange(220, 330, 0.25)\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-4": "import cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport xarray as xr\nimport metpy\nfrom datetime import datetime\nimport datetime as dt\nfrom metpy.units import units\nimport scipy.ndimage as ndimage\nfrom metpy.plots import USCOUNTIES\nimport cartopy\nfrom scipy.ndimage.filters import generic_filter as gf\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\nstartTime = datetime.now()\nm_date = '20200903'\nm_hour = '12'\nyear = startTime.year\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\nmdate = str(year) + str(month) + str(day)\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\nurl = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +\n '/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')\ninit_hour = get_init_hr(hour)\n<mask token>\noutput_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\nds = xr.open_dataset(url)\ninit_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))\ntimes = ds['tmp2m'].metpy.time\ninit_time = ds['time'][0]\nlats = np.arange(15, 70, 0.25)\nlons = np.arange(220, 330, 0.25)\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-5": "import cartopy.crs as ccrs\r\nimport cartopy.feature as cfeature\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport netCDF4\r\nimport xarray as xr\r\nimport metpy\r\nfrom datetime import datetime\r\nimport datetime as dt\r\nfrom metpy.units import units\r\nimport scipy.ndimage as ndimage\r\nfrom metpy.plots import USCOUNTIES\r\nimport cartopy\r\nfrom scipy.ndimage.filters import generic_filter as gf\r\n\r\n\r\ndef mkdir_p(mypath):\r\n '''Creates a directory. equivalent to using mkdir -p on the command line'''\r\n\r\n from errno import EEXIST\r\n from os import makedirs,path\r\n\r\n try:\r\n makedirs(mypath)\r\n except OSError as exc: # Python >2.5\r\n if exc.errno == EEXIST and path.isdir(mypath):\r\n pass\r\n else: raise\r\n\r\nstartTime=datetime.now()\r\n\r\nm_date='20200903'\r\nm_hour='12'\r\n\r\nyear = startTime.year\r\n\r\nif startTime.month <10:\r\n month = '0'+str(startTime.month)\r\nelse:\r\n month = str(startTime.month)\r\n\r\nif startTime.day <10:\r\n day = '0'+str(startTime.day)\r\nelse:\r\n day = str(startTime.day)\r\n\r\nif startTime.hour <10:\r\n hour = '0'+str(startTime.hour)\r\nelse:\r\n hour = str(startTime.hour)\r\n\r\nmdate = str(year)+str(month)+str(day)\r\n\r\ndef get_init_hr(hour):\r\n if int(hour) <6:\r\n init_hour = '00'\r\n elif int(hour) <11:\r\n init_hour = '06'\r\n elif int(hour) <17:\r\n init_hour = '12'\r\n elif int(hour) <22:\r\n init_hour = '18'\r\n else:\r\n init_hour = '00'\r\n return(init_hour)\r\n\r\nurl = 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs'+mdate+'/gfs_0p25_1hr_'+get_init_hr(hour)+'z'\r\ninit_hour = get_init_hr(hour)\r\n'''\r\nfor i in range(119):\r\n fhr = i+1\r\n'''\r\n# Create new directory\r\noutput_dir = str(year)+str(month)+str(day)+'_'+str(init_hour)+'00'\r\nmkdir_p(output_dir)\r\nmkdir_p(output_dir+'/GFS')\r\n#Parse data using MetPy\r\nds = xr.open_dataset(url)\r\ninit_hr = dt.datetime(int(year),int(month),int(day),int(init_hour))\r\ntimes = ds['tmp2m'].metpy.time\r\ninit_time = ds['time'][0]\r\n\r\nlats = np.arange(15,70,0.25)\r\nlons = np.arange(220,330,0.25)\r\n\r\nfor i in range(1,120):\r\n fc_hr = init_hr+dt.timedelta(hours=1*i)\r\n forecast_hour = times[0].values\r\n\r\n data = ds.metpy.parse_cf()\r\n data = data.isel(time=i)\r\n #Rename variables to useful things\r\n data = data.rename({\r\n 'absvprs':'avort',\r\n 'hgtprs':'gph',\r\n 'rhprs':'rh',\r\n 'tmpprs':'temp',\r\n 'ugrdprs':'u',\r\n 'vgrdprs': 'v',\r\n })\r\n\r\n vertical, = data['temp'].metpy.coordinates('vertical')\r\n time = data['temp'].metpy.time\r\n zH5_crs = data['temp'].metpy.cartopy_crs\r\n\r\n t5 = data['temp'].sel(lev=500.0,lat=lats,lon=lons)\r\n u5 = data['u'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449\r\n v5 = data['v'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449\r\n av5 = data['avort'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1e5\r\n rh5 = data['rh'].sel(lev=500.0,lat=lats,lon=lons).squeeze()\r\n h5 = data['gph'].sel(lev=500.0,lat=lats,lon=lons).squeeze()\r\n x, y = t5.metpy.coordinates('x', 'y')\r\n lat, lon = xr.broadcast(y, x)\r\n wind_slice = slice(5,-5,5)\r\n ########## SET UP FIGURE ##################################################\r\n fig = plt.figure(figsize=(15,15))\r\n ax1 = fig.add_subplot(111, projection = zH5_crs)\r\n\r\n ax1.coastlines(resolution='10m')\r\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\r\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\r\n\r\n #fig.suptitle(\"NAM Forecast valid at \" + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=36)\r\n\r\n ########## PLOTTING #######################################################\r\n h5c = ax1.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c = ax1.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c = ax1.contourf(x,y,av5,cmap='autumn_r',levels=range(10,60,2),alpha=0.8,extend='max')\r\n a5cb = fig.colorbar(a5c, orientation = 'horizontal', aspect = 80, ax = ax1, pad = 0.01,\r\n extendrect=False, ticks = range(10,61,5))\r\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax1.barbs(x[wind_slice],y[wind_slice],u5[wind_slice,wind_slice],v5[wind_slice,wind_slice], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax1.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax1.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax1.set_extent((265, 300, 25, 50))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vort_'+str(i)+'.png')\r\n plt.clf()\r\n plt.close()\r\n ########## PLOT 2 #######################################################\r\n wind_slice_s = slice (10,-10,10)\r\n fig2 = plt.figure(figsize=(15,15))\r\n ax2 = fig2.add_subplot(111,projection=zH5_crs)\r\n ax2.coastlines(resolution='50m')\r\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\r\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\r\n h5c2 = ax2.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c2 = ax2.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c2 = ax2.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)\r\n a5cb2 = fig2.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax2, pad = 0.01,\r\n extendrect=False, ticks = range(10,60,5))\r\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax2.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax2.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax2.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax2.set_extent((225, 300, 20, 65))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortCONUS_v2_'+str(i)+'.png')\r\n\r\n ########## PLOT 3 #######################################################\r\n wind_slice_s = slice (10,-10,10)\r\n fig3 = plt.figure(figsize=(15,15))\r\n ax3 = fig3.add_subplot(111,projection=zH5_crs)\r\n ax3.coastlines(resolution='50m')\r\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\r\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\r\n h5c2 = ax3.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c2 = ax3.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c2 = ax3.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)\r\n a5cb2 = fig3.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax3, pad = 0.01,\r\n extendrect=False, ticks = range(10,60,5))\r\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax3.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax3.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax3.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax3.set_extent((260, 320, 20, 65))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortC_ec_v1_'+str(i)+'.png')\r\n\r\n fcst_hr = str(0)\r\n print('Hour '+str(i)+' completed!')\r\n plt.close()\r\n timeelapsed = datetime.now()-startTime\r\n print(timeelapsed)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\nurl= 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs20200903/gfs_0p25_1hr_12z'\r\nds = xr.open_dataset(url)\r\nt2m_ds = ds['tmp2m']\r\ninit_hr = t2m_ds['time'][0].values\r\n#fc_hr = t2m.ds['time'][i].values\r\nlats = np.arange(20,50,0.25)\r\nlons = np.arange(240,300,0.25)\r\nt2m = t2m_ds.sel(time = init_hr, lat = lats, lon = lons)\r\nprint(t2m)\r\n\r\nfig = plt.figure(figsize = (12,12))\r\nfig.clf()\r\nax = plt.axes(projection=ccrs.PlateCarree())\r\nax.coastlines()\r\nax.set_extent((240,300, 20, 50), crs = ccrs.PlateCarree())\r\nt2m_c = ax.contourf(t2m, cmap='RdPu')\r\nplt.savefig('testingnomads6.png')\r\n'''\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#from tkinter import Tk, Text, INSERT
import mnemonicos as mne
class Ensambler(object):
def __init__(self, fileName):
#Nombre del archivo
self.fileName = fileName
#Lineas del Archivo
self.fileLines = []
#Contador de Localidades
self.cl = 0
#Tamaño
self.size = 0
#Opcode
self.code = ""
#Intruccion
self.instruction = ""
#Contador de operadores
self.num_ope = 0
#Operandos
self.operands = []
# Tabla de simbolos
self.TS = {}
# Codigo Objeto
self.CO = []
#Aux
self.x = 0
#self.window = Tk()
#self.window.geometry('400x50')
def leerArchivo(self):
file = open(self.fileName, "r")
for line in file:
line = line.replace("\n", "")
line = line.replace("\t", "")
self.fileLines.append(line)
file.close()
#Primera Pasada
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == "JP":
self.x = self.TS[operands[0]]
print("l")
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = "nn"
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")("0000")
self.cl += size
else:
#Valida si no es opcode valido
print(self.instruction)
#code, size = mne.map_mnem.get(self.instruction,"Error")()
#lst = "CL: " + str(self.cl) + " Code: " + code
#self.CO.append(code)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + " " + "nn"
code, size = mne.map_mnem.get(self.instruction,"Error")(str(self.x))
self.CO.append(code)
else:
print("Error")
else:
if self.num_ope == 2:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")()
self.CO.append(code)
print(self.CO)
#Quitar Comentarios
def clean_line(self,line):
line = line.split(";")
self.instruction = line[0].upper().replace(",","")
# Obtener y guardar etiqueta si existe
def get_label(self):
label = self.instruction.split(":")
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print("Error etiqueta invalida")
#Quitar espacio al inicio
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
#Obtener los operandos y la instruccion
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
aux = Ensambler("1.txt")
aux.leerArchivo()
aux.first_pass()
aux.Second_pass()
|
normal
|
{
"blob_id": "3bc009271c7dd34ad09bcef81214387b63dfac59",
"index": 2549,
"step-1": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n <mask token>\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-4": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\naux = Ensambler('1.txt')\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-5": "\n#from tkinter import Tk, Text, INSERT\nimport mnemonicos as mne\n\n\nclass Ensambler(object):\n\n\n\tdef __init__(self, fileName):\n\t\n\t\t#Nombre del archivo\n\t\tself.fileName = fileName\n\t\t#Lineas del Archivo\n\t\tself.fileLines = []\n\t\t#Contador de Localidades\n\t\tself.cl = 0\n\t\t#Tamaño\n\t\tself.size = 0\n\t\t#Opcode\n\t\tself.code = \"\"\n\t\t#Intruccion\n\t\tself.instruction = \"\"\n\t\t#Contador de operadores\n\t\tself.num_ope = 0\n\t\t#Operandos\n\t\tself.operands = []\n\t\t# Tabla de simbolos\n\t\tself.TS = {}\n\t\t# Codigo Objeto\n\t\tself.CO = []\n\t\t#Aux\n\t\tself.x = 0\n\n\t\t#self.window = Tk()\n\t\t#self.window.geometry('400x50')\n\n\tdef leerArchivo(self):\n\t\tfile = open(self.fileName, \"r\")\n\t\tfor line in file:\n\t\t\tline = line.replace(\"\\n\", \"\")\n\t\t\tline = line.replace(\"\\t\", \"\")\n\t\t\tself.fileLines.append(line)\n\t\tfile.close()\n\n\t#Primera Pasada\n\tdef first_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\tif self.num_ope == 1:\n\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\tif self.instruction == \"JP\":\n\t\t\t\t\t\tself.x = self.TS[operands[0]]\n\t\t\t\t\t\tprint(\"l\")\n\t\t\t\t\t\tprint(self.x)\n\n\n\t\t\t\tif self.operands[0] in mne.v_jump:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\t\t\t\tif self.operands[0][1:-1].isnumeric():\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\t\tself.operands[0] = \"nn\"\n\t\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(\"0000\")\n\t\t\t\t\t\tself.cl += size \n\t\t\telse:\n\t\t\t\t\n\t\t\t#Valida si no es opcode valido\n\t\t\t\tprint(self.instruction)\n\t\t\t#code, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\n\t\t\t#lst = \"CL: \" + str(self.cl) + \" Code: \" + code\n\t\t\t#self.CO.append(code)\n\t\tprint(self.CO)\n\t\tprint(self.cl)\n\t\tprint(self.TS)\n\n\n\tdef Second_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\t\n\t\t\tif self.instruction in mne.v_jump:\n\n\t\t\t\tif len(self.operands) == 2:\n\t\t\t\t\taux = self.operands[1]\n\t\t\t\telse:\n\t\t\t\t\taux = self.operands[0]\n\n\t\t\t\tif aux in self.TS.keys():\n\t\t\t\t\tself.x = self.TS[aux]\n\t\t\t\t\tself.instruction = self.instruction + \" \" + \"nn\"\n\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(str(self.x))\n\t\t\t\t\tself.CO.append(code)\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error\")\n\t\t\telse:\n\t\t\t\tif self.num_ope == 2:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\tself.CO.append(code)\n\t\tprint(self.CO)\n\n\n\t#Quitar Comentarios\n\tdef clean_line(self,line):\n\t\tline = line.split(\";\")\n\t\tself.instruction = line[0].upper().replace(\",\",\"\")\n\n\t# Obtener y guardar etiqueta si existe\n\tdef get_label(self):\n\n\t\tlabel = self.instruction.split(\":\")\n\n\t\tif len(label) > 1:\n\n\t\t\tif label[0] in mne.v_ops or label[0] in mne.map_mnem:\n\t\t\t\tprint(\"Error etiqueta invalida\")\n\t\t\t#Quitar espacio al inicio\n\t\t\tself.TS[label[0].strip()] = self.cl\n\n\t\t\tdel label[0]\n\n\n\t\tself.instruction = label[0]\n\n\t#Obtener los operandos y la instruccion\n\tdef get_operands(self):\n\t\tline = self.instruction.split()\n\t\tself.operands = [operand for operand in line]\n\t\tself.instruction = self.operands[0]\n\t\tdel self.operands[0]\n\t\tself.num_ope = len(self.operands)\n\n\t\t\n\t\naux = Ensambler(\"1.txt\")\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
print('\n')
# Первый вариант
def fn1():
print("One")
def fn2():
print("Two")
def fn3():
print("Three")
fndict = {"A": fn1, "B": fn2, "C": fn3}
keynames = ["A", "B", "C"]
fndict[keynames[1]]()
fndict['C']()
# Второй вариант
def add(one,two):
c = one+two
print(c)
print(type(c))
def sub(one,two):
c = one-two
print(c)
print(type(c))
trainee = {1:add, 2:sub}
trainee[1](10,4)
print('\n PROVERKA TIPA', type(trainee[1]))
print('\n PROVERKA TIPA', type(trainee[1](10,4)))
|
normal
|
{
"blob_id": "dc226a646af32d052c6d51832b95a340d6986e08",
"index": 489,
"step-1": "<mask token>\n\n\ndef fn1():\n print('One')\n\n\ndef fn2():\n print('Two')\n\n\ndef fn3():\n print('Three')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fn1():\n print('One')\n\n\ndef fn2():\n print('Two')\n\n\ndef fn3():\n print('Three')\n\n\n<mask token>\n\n\ndef sub(one, two):\n c = one - two\n print(c)\n print(type(c))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fn1():\n print('One')\n\n\ndef fn2():\n print('Two')\n\n\ndef fn3():\n print('Three')\n\n\n<mask token>\n\n\ndef add(one, two):\n c = one + two\n print(c)\n print(type(c))\n\n\ndef sub(one, two):\n c = one - two\n print(c)\n print(type(c))\n\n\n<mask token>\n",
"step-4": "print('\\n')\n\n\ndef fn1():\n print('One')\n\n\ndef fn2():\n print('Two')\n\n\ndef fn3():\n print('Three')\n\n\n<mask token>\nfndict[keynames[1]]()\nfndict['C']()\n\n\ndef add(one, two):\n c = one + two\n print(c)\n print(type(c))\n\n\ndef sub(one, two):\n c = one - two\n print(c)\n print(type(c))\n\n\n<mask token>\ntrainee[1](10, 4)\nprint('\\n PROVERKA TIPA', type(trainee[1]))\nprint('\\n PROVERKA TIPA', type(trainee[1](10, 4)))\n",
"step-5": "\nprint('\\n')\n\n#\tПервый вариант\n\ndef fn1():\n print(\"One\")\n\ndef fn2():\n print(\"Two\")\n\ndef fn3():\n print(\"Three\")\n\nfndict = {\"A\": fn1, \"B\": fn2, \"C\": fn3}\n\nkeynames = [\"A\", \"B\", \"C\"]\n\nfndict[keynames[1]]()\nfndict['C']()\n\n\n#\t\tВторой вариант\n\ndef add(one,two):\n\tc = one+two\n\tprint(c)\n\tprint(type(c))\n\ndef sub(one,two):\n\tc = one-two\n\tprint(c)\n\tprint(type(c))\n\ntrainee = {1:add, 2:sub}\n\ntrainee[1](10,4)\nprint('\\n PROVERKA TIPA', type(trainee[1]))\nprint('\\n PROVERKA TIPA', type(trainee[1](10,4)))\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
import pkgutil
import mimetypes
import time
from datetime import datetime
from pywb.utils.wbexception import NotFoundException
from pywb.utils.loaders import BlockLoader
from pywb.utils.statusandheaders import StatusAndHeaders
from pywb.framework.basehandlers import BaseHandler, WbUrlHandler
from pywb.framework.wbrequestresponse import WbResponse
from pywb.warc.recordloader import ArcWarcRecordLoader
from pywb.warc.resolvingloader import ResolvingLoader
from views import J2TemplateView
from replay_views import ReplayView
from pywb.framework.memento import MementoResponse
from pywb.utils.timeutils import datetime_to_timestamp
#=================================================================
class SearchPageWbUrlHandler(WbUrlHandler):
"""
Loads a default search page html template to be shown when
the wb_url is empty
"""
def __init__(self, config):
self.search_view = (J2TemplateView.
create_template(config.get('search_html'),
'Search Page'))
self.is_frame_mode = config.get('framed_replay', False)
self.response_class = WbResponse
if self.is_frame_mode:
html = config.get('frame_insert_html', 'ui/frame_insert.html')
self.frame_insert_view = (J2TemplateView.
create_template(html, 'Frame Insert'))
self.banner_html = config.get('banner_html', 'banner.html')
if config.get('enable_memento', False):
self.response_class = MementoResponse
else:
self.frame_insert_view = None
self.banner_html = None
def render_search_page(self, wbrequest, **kwargs):
if self.search_view:
return self.search_view.render_response(wbrequest=wbrequest,
prefix=wbrequest.wb_prefix,
**kwargs)
else:
return WbResponse.text_response('No Lookup Url Specified')
def __call__(self, wbrequest):
# root search page
if wbrequest.wb_url_str == '/':
return self.render_search_page(wbrequest)
# render top level frame if in frame mode
# (not supported in proxy mode)
if (self.is_frame_mode and wbrequest.wb_url and
not wbrequest.wb_url.is_query() and
not wbrequest.options['is_proxy']):
if wbrequest.wb_url.is_top_frame:
return self.get_top_frame_response(wbrequest)
else:
wbrequest.final_mod = 'tf_'
return self.handle_request(wbrequest)
def get_top_frame_params(self, wbrequest):
embed_url = wbrequest.wb_url.to_str(mod='')
if wbrequest.wb_url.timestamp:
timestamp = wbrequest.wb_url.timestamp
else:
timestamp = datetime_to_timestamp(datetime.utcnow())
params = dict(embed_url=embed_url,
wbrequest=wbrequest,
timestamp=timestamp,
url=wbrequest.wb_url.url,
banner_html=self.banner_html)
return params
def get_top_frame_response(self, wbrequest):
params = self.get_top_frame_params(wbrequest)
headers = [('Content-Type', 'text/html; charset=utf-8')]
status_headers = StatusAndHeaders('200 OK', headers)
template_result = self.frame_insert_view.render_to_string(**params)
body = template_result.encode('utf-8')
return self.response_class(status_headers, [body], wbrequest=wbrequest)
#=================================================================
# Standard WB Handler
#=================================================================
class WBHandler(SearchPageWbUrlHandler):
def __init__(self, query_handler, config=None):
super(WBHandler, self).__init__(config)
self.index_reader = query_handler
cookie_maker = config.get('cookie_maker')
record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)
paths = config.get('archive_paths')
resolving_loader = ResolvingLoader(paths=paths,
record_loader=record_loader)
self.replay = ReplayView(resolving_loader, config)
self.fallback_handler = None
self.fallback_name = config.get('fallback')
def resolve_refs(self, handler_dict):
if self.fallback_name:
self.fallback_handler = handler_dict.get(self.fallback_name)
def handle_request(self, wbrequest):
try:
cdx_lines, output = self.index_reader.load_for_request(wbrequest)
except NotFoundException as nfe:
return self.handle_not_found(wbrequest, nfe)
if output != 'text' and wbrequest.wb_url.is_replay():
return self.handle_replay(wbrequest, cdx_lines)
else:
return self.handle_query(wbrequest, cdx_lines, output)
def handle_query(self, wbrequest, cdx_lines, output):
return self.index_reader.make_cdx_response(wbrequest,
cdx_lines,
output)
def handle_replay(self, wbrequest, cdx_lines):
cdx_callback = self.index_reader.cdx_load_callback(wbrequest)
return self.replay.render_content(wbrequest,
cdx_lines,
cdx_callback)
def handle_not_found(self, wbrequest, nfe):
if (not self.fallback_handler or
wbrequest.wb_url.is_query() or
wbrequest.wb_url.is_identity):
raise
return self.fallback_handler(wbrequest)
def __str__(self):
return 'Web Archive Replay Handler'
#=================================================================
# Static Content Handler
#=================================================================
class StaticHandler(BaseHandler):
def __init__(self, static_path):
mimetypes.init()
self.static_path = static_path
self.block_loader = BlockLoader()
def __call__(self, wbrequest):
url = wbrequest.wb_url_str.split('?')[0]
full_path = self.static_path + url
try:
data = self.block_loader.load(full_path)
try:
data.seek(0, 2)
size = data.tell()
data.seek(0)
headers = [('Content-Length', str(size))]
except IOError:
headers = None
if 'wsgi.file_wrapper' in wbrequest.env:
reader = wbrequest.env['wsgi.file_wrapper'](data)
else:
reader = iter(lambda: data.read(), '')
content_type, _ = mimetypes.guess_type(full_path)
return WbResponse.text_stream(data,
content_type=content_type,
headers=headers)
except IOError:
raise NotFoundException('Static File Not Found: ' +
wbrequest.wb_url_str)
def __str__(self): # pragma: no cover
return 'Static files from ' + self.static_path
#=================================================================
# Debug Handlers
#=================================================================
class DebugEchoEnvHandler(BaseHandler): # pragma: no cover
def __call__(self, wbrequest):
return WbResponse.text_response(str(wbrequest.env))
#=================================================================
class DebugEchoHandler(BaseHandler): # pragma: no cover
def __call__(self, wbrequest):
return WbResponse.text_response(str(wbrequest))
|
normal
|
{
"blob_id": "df1486afcc99e03510512ed6ed3e8b3471459d50",
"index": 5343,
"step-1": "<mask token>\n\n\nclass WBHandler(SearchPageWbUrlHandler):\n <mask token>\n <mask token>\n <mask token>\n\n def handle_query(self, wbrequest, cdx_lines, output):\n return self.index_reader.make_cdx_response(wbrequest, cdx_lines, output\n )\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StaticHandler(BaseHandler):\n\n def __init__(self, static_path):\n mimetypes.init()\n self.static_path = static_path\n self.block_loader = BlockLoader()\n\n def __call__(self, wbrequest):\n url = wbrequest.wb_url_str.split('?')[0]\n full_path = self.static_path + url\n try:\n data = self.block_loader.load(full_path)\n try:\n data.seek(0, 2)\n size = data.tell()\n data.seek(0)\n headers = [('Content-Length', str(size))]\n except IOError:\n headers = None\n if 'wsgi.file_wrapper' in wbrequest.env:\n reader = wbrequest.env['wsgi.file_wrapper'](data)\n else:\n reader = iter(lambda : data.read(), '')\n content_type, _ = mimetypes.guess_type(full_path)\n return WbResponse.text_stream(data, content_type=content_type,\n headers=headers)\n except IOError:\n raise NotFoundException('Static File Not Found: ' + wbrequest.\n wb_url_str)\n\n def __str__(self):\n return 'Static files from ' + self.static_path\n\n\nclass DebugEchoEnvHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest.env))\n\n\nclass DebugEchoHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest))\n",
"step-2": "<mask token>\n\n\nclass SearchPageWbUrlHandler(WbUrlHandler):\n <mask token>\n <mask token>\n\n def render_search_page(self, wbrequest, **kwargs):\n if self.search_view:\n return self.search_view.render_response(wbrequest=wbrequest,\n prefix=wbrequest.wb_prefix, **kwargs)\n else:\n return WbResponse.text_response('No Lookup Url Specified')\n\n def __call__(self, wbrequest):\n if wbrequest.wb_url_str == '/':\n return self.render_search_page(wbrequest)\n if (self.is_frame_mode and wbrequest.wb_url and not wbrequest.\n wb_url.is_query() and not wbrequest.options['is_proxy']):\n if wbrequest.wb_url.is_top_frame:\n return self.get_top_frame_response(wbrequest)\n else:\n wbrequest.final_mod = 'tf_'\n return self.handle_request(wbrequest)\n <mask token>\n <mask token>\n\n\nclass WBHandler(SearchPageWbUrlHandler):\n\n def __init__(self, query_handler, config=None):\n super(WBHandler, self).__init__(config)\n self.index_reader = query_handler\n cookie_maker = config.get('cookie_maker')\n record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)\n paths = config.get('archive_paths')\n resolving_loader = ResolvingLoader(paths=paths, record_loader=\n record_loader)\n self.replay = ReplayView(resolving_loader, config)\n self.fallback_handler = None\n self.fallback_name = config.get('fallback')\n\n def resolve_refs(self, handler_dict):\n if self.fallback_name:\n self.fallback_handler = handler_dict.get(self.fallback_name)\n\n def handle_request(self, wbrequest):\n try:\n cdx_lines, output = self.index_reader.load_for_request(wbrequest)\n except NotFoundException as nfe:\n return self.handle_not_found(wbrequest, nfe)\n if output != 'text' and wbrequest.wb_url.is_replay():\n return self.handle_replay(wbrequest, cdx_lines)\n else:\n return self.handle_query(wbrequest, cdx_lines, output)\n\n def handle_query(self, wbrequest, cdx_lines, output):\n return self.index_reader.make_cdx_response(wbrequest, cdx_lines, output\n )\n\n def handle_replay(self, wbrequest, cdx_lines):\n cdx_callback = self.index_reader.cdx_load_callback(wbrequest)\n return self.replay.render_content(wbrequest, cdx_lines, cdx_callback)\n\n def handle_not_found(self, wbrequest, nfe):\n if not self.fallback_handler or wbrequest.wb_url.is_query(\n ) or wbrequest.wb_url.is_identity:\n raise\n return self.fallback_handler(wbrequest)\n\n def __str__(self):\n return 'Web Archive Replay Handler'\n\n\nclass StaticHandler(BaseHandler):\n\n def __init__(self, static_path):\n mimetypes.init()\n self.static_path = static_path\n self.block_loader = BlockLoader()\n\n def __call__(self, wbrequest):\n url = wbrequest.wb_url_str.split('?')[0]\n full_path = self.static_path + url\n try:\n data = self.block_loader.load(full_path)\n try:\n data.seek(0, 2)\n size = data.tell()\n data.seek(0)\n headers = [('Content-Length', str(size))]\n except IOError:\n headers = None\n if 'wsgi.file_wrapper' in wbrequest.env:\n reader = wbrequest.env['wsgi.file_wrapper'](data)\n else:\n reader = iter(lambda : data.read(), '')\n content_type, _ = mimetypes.guess_type(full_path)\n return WbResponse.text_stream(data, content_type=content_type,\n headers=headers)\n except IOError:\n raise NotFoundException('Static File Not Found: ' + wbrequest.\n wb_url_str)\n\n def __str__(self):\n return 'Static files from ' + self.static_path\n\n\nclass DebugEchoEnvHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest.env))\n\n\nclass DebugEchoHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest))\n",
"step-3": "<mask token>\n\n\nclass SearchPageWbUrlHandler(WbUrlHandler):\n <mask token>\n <mask token>\n\n def render_search_page(self, wbrequest, **kwargs):\n if self.search_view:\n return self.search_view.render_response(wbrequest=wbrequest,\n prefix=wbrequest.wb_prefix, **kwargs)\n else:\n return WbResponse.text_response('No Lookup Url Specified')\n\n def __call__(self, wbrequest):\n if wbrequest.wb_url_str == '/':\n return self.render_search_page(wbrequest)\n if (self.is_frame_mode and wbrequest.wb_url and not wbrequest.\n wb_url.is_query() and not wbrequest.options['is_proxy']):\n if wbrequest.wb_url.is_top_frame:\n return self.get_top_frame_response(wbrequest)\n else:\n wbrequest.final_mod = 'tf_'\n return self.handle_request(wbrequest)\n\n def get_top_frame_params(self, wbrequest):\n embed_url = wbrequest.wb_url.to_str(mod='')\n if wbrequest.wb_url.timestamp:\n timestamp = wbrequest.wb_url.timestamp\n else:\n timestamp = datetime_to_timestamp(datetime.utcnow())\n params = dict(embed_url=embed_url, wbrequest=wbrequest, timestamp=\n timestamp, url=wbrequest.wb_url.url, banner_html=self.banner_html)\n return params\n\n def get_top_frame_response(self, wbrequest):\n params = self.get_top_frame_params(wbrequest)\n headers = [('Content-Type', 'text/html; charset=utf-8')]\n status_headers = StatusAndHeaders('200 OK', headers)\n template_result = self.frame_insert_view.render_to_string(**params)\n body = template_result.encode('utf-8')\n return self.response_class(status_headers, [body], wbrequest=wbrequest)\n\n\nclass WBHandler(SearchPageWbUrlHandler):\n\n def __init__(self, query_handler, config=None):\n super(WBHandler, self).__init__(config)\n self.index_reader = query_handler\n cookie_maker = config.get('cookie_maker')\n record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)\n paths = config.get('archive_paths')\n resolving_loader = ResolvingLoader(paths=paths, record_loader=\n record_loader)\n self.replay = ReplayView(resolving_loader, config)\n self.fallback_handler = None\n self.fallback_name = config.get('fallback')\n\n def resolve_refs(self, handler_dict):\n if self.fallback_name:\n self.fallback_handler = handler_dict.get(self.fallback_name)\n\n def handle_request(self, wbrequest):\n try:\n cdx_lines, output = self.index_reader.load_for_request(wbrequest)\n except NotFoundException as nfe:\n return self.handle_not_found(wbrequest, nfe)\n if output != 'text' and wbrequest.wb_url.is_replay():\n return self.handle_replay(wbrequest, cdx_lines)\n else:\n return self.handle_query(wbrequest, cdx_lines, output)\n\n def handle_query(self, wbrequest, cdx_lines, output):\n return self.index_reader.make_cdx_response(wbrequest, cdx_lines, output\n )\n\n def handle_replay(self, wbrequest, cdx_lines):\n cdx_callback = self.index_reader.cdx_load_callback(wbrequest)\n return self.replay.render_content(wbrequest, cdx_lines, cdx_callback)\n\n def handle_not_found(self, wbrequest, nfe):\n if not self.fallback_handler or wbrequest.wb_url.is_query(\n ) or wbrequest.wb_url.is_identity:\n raise\n return self.fallback_handler(wbrequest)\n\n def __str__(self):\n return 'Web Archive Replay Handler'\n\n\nclass StaticHandler(BaseHandler):\n\n def __init__(self, static_path):\n mimetypes.init()\n self.static_path = static_path\n self.block_loader = BlockLoader()\n\n def __call__(self, wbrequest):\n url = wbrequest.wb_url_str.split('?')[0]\n full_path = self.static_path + url\n try:\n data = self.block_loader.load(full_path)\n try:\n data.seek(0, 2)\n size = data.tell()\n data.seek(0)\n headers = [('Content-Length', str(size))]\n except IOError:\n headers = None\n if 'wsgi.file_wrapper' in wbrequest.env:\n reader = wbrequest.env['wsgi.file_wrapper'](data)\n else:\n reader = iter(lambda : data.read(), '')\n content_type, _ = mimetypes.guess_type(full_path)\n return WbResponse.text_stream(data, content_type=content_type,\n headers=headers)\n except IOError:\n raise NotFoundException('Static File Not Found: ' + wbrequest.\n wb_url_str)\n\n def __str__(self):\n return 'Static files from ' + self.static_path\n\n\nclass DebugEchoEnvHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest.env))\n\n\nclass DebugEchoHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest))\n",
"step-4": "<mask token>\n\n\nclass SearchPageWbUrlHandler(WbUrlHandler):\n <mask token>\n\n def __init__(self, config):\n self.search_view = J2TemplateView.create_template(config.get(\n 'search_html'), 'Search Page')\n self.is_frame_mode = config.get('framed_replay', False)\n self.response_class = WbResponse\n if self.is_frame_mode:\n html = config.get('frame_insert_html', 'ui/frame_insert.html')\n self.frame_insert_view = J2TemplateView.create_template(html,\n 'Frame Insert')\n self.banner_html = config.get('banner_html', 'banner.html')\n if config.get('enable_memento', False):\n self.response_class = MementoResponse\n else:\n self.frame_insert_view = None\n self.banner_html = None\n\n def render_search_page(self, wbrequest, **kwargs):\n if self.search_view:\n return self.search_view.render_response(wbrequest=wbrequest,\n prefix=wbrequest.wb_prefix, **kwargs)\n else:\n return WbResponse.text_response('No Lookup Url Specified')\n\n def __call__(self, wbrequest):\n if wbrequest.wb_url_str == '/':\n return self.render_search_page(wbrequest)\n if (self.is_frame_mode and wbrequest.wb_url and not wbrequest.\n wb_url.is_query() and not wbrequest.options['is_proxy']):\n if wbrequest.wb_url.is_top_frame:\n return self.get_top_frame_response(wbrequest)\n else:\n wbrequest.final_mod = 'tf_'\n return self.handle_request(wbrequest)\n\n def get_top_frame_params(self, wbrequest):\n embed_url = wbrequest.wb_url.to_str(mod='')\n if wbrequest.wb_url.timestamp:\n timestamp = wbrequest.wb_url.timestamp\n else:\n timestamp = datetime_to_timestamp(datetime.utcnow())\n params = dict(embed_url=embed_url, wbrequest=wbrequest, timestamp=\n timestamp, url=wbrequest.wb_url.url, banner_html=self.banner_html)\n return params\n\n def get_top_frame_response(self, wbrequest):\n params = self.get_top_frame_params(wbrequest)\n headers = [('Content-Type', 'text/html; charset=utf-8')]\n status_headers = StatusAndHeaders('200 OK', headers)\n template_result = self.frame_insert_view.render_to_string(**params)\n body = template_result.encode('utf-8')\n return self.response_class(status_headers, [body], wbrequest=wbrequest)\n\n\nclass WBHandler(SearchPageWbUrlHandler):\n\n def __init__(self, query_handler, config=None):\n super(WBHandler, self).__init__(config)\n self.index_reader = query_handler\n cookie_maker = config.get('cookie_maker')\n record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)\n paths = config.get('archive_paths')\n resolving_loader = ResolvingLoader(paths=paths, record_loader=\n record_loader)\n self.replay = ReplayView(resolving_loader, config)\n self.fallback_handler = None\n self.fallback_name = config.get('fallback')\n\n def resolve_refs(self, handler_dict):\n if self.fallback_name:\n self.fallback_handler = handler_dict.get(self.fallback_name)\n\n def handle_request(self, wbrequest):\n try:\n cdx_lines, output = self.index_reader.load_for_request(wbrequest)\n except NotFoundException as nfe:\n return self.handle_not_found(wbrequest, nfe)\n if output != 'text' and wbrequest.wb_url.is_replay():\n return self.handle_replay(wbrequest, cdx_lines)\n else:\n return self.handle_query(wbrequest, cdx_lines, output)\n\n def handle_query(self, wbrequest, cdx_lines, output):\n return self.index_reader.make_cdx_response(wbrequest, cdx_lines, output\n )\n\n def handle_replay(self, wbrequest, cdx_lines):\n cdx_callback = self.index_reader.cdx_load_callback(wbrequest)\n return self.replay.render_content(wbrequest, cdx_lines, cdx_callback)\n\n def handle_not_found(self, wbrequest, nfe):\n if not self.fallback_handler or wbrequest.wb_url.is_query(\n ) or wbrequest.wb_url.is_identity:\n raise\n return self.fallback_handler(wbrequest)\n\n def __str__(self):\n return 'Web Archive Replay Handler'\n\n\nclass StaticHandler(BaseHandler):\n\n def __init__(self, static_path):\n mimetypes.init()\n self.static_path = static_path\n self.block_loader = BlockLoader()\n\n def __call__(self, wbrequest):\n url = wbrequest.wb_url_str.split('?')[0]\n full_path = self.static_path + url\n try:\n data = self.block_loader.load(full_path)\n try:\n data.seek(0, 2)\n size = data.tell()\n data.seek(0)\n headers = [('Content-Length', str(size))]\n except IOError:\n headers = None\n if 'wsgi.file_wrapper' in wbrequest.env:\n reader = wbrequest.env['wsgi.file_wrapper'](data)\n else:\n reader = iter(lambda : data.read(), '')\n content_type, _ = mimetypes.guess_type(full_path)\n return WbResponse.text_stream(data, content_type=content_type,\n headers=headers)\n except IOError:\n raise NotFoundException('Static File Not Found: ' + wbrequest.\n wb_url_str)\n\n def __str__(self):\n return 'Static files from ' + self.static_path\n\n\nclass DebugEchoEnvHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest.env))\n\n\nclass DebugEchoHandler(BaseHandler):\n\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest))\n",
"step-5": "import pkgutil\nimport mimetypes\nimport time\n\nfrom datetime import datetime\n\nfrom pywb.utils.wbexception import NotFoundException\nfrom pywb.utils.loaders import BlockLoader\nfrom pywb.utils.statusandheaders import StatusAndHeaders\n\nfrom pywb.framework.basehandlers import BaseHandler, WbUrlHandler\nfrom pywb.framework.wbrequestresponse import WbResponse\n\nfrom pywb.warc.recordloader import ArcWarcRecordLoader\nfrom pywb.warc.resolvingloader import ResolvingLoader\n\nfrom views import J2TemplateView\nfrom replay_views import ReplayView\nfrom pywb.framework.memento import MementoResponse\nfrom pywb.utils.timeutils import datetime_to_timestamp\n\n\n#=================================================================\nclass SearchPageWbUrlHandler(WbUrlHandler):\n \"\"\"\n Loads a default search page html template to be shown when\n the wb_url is empty\n \"\"\"\n def __init__(self, config):\n self.search_view = (J2TemplateView.\n create_template(config.get('search_html'),\n 'Search Page'))\n\n self.is_frame_mode = config.get('framed_replay', False)\n self.response_class = WbResponse\n\n if self.is_frame_mode:\n html = config.get('frame_insert_html', 'ui/frame_insert.html')\n self.frame_insert_view = (J2TemplateView.\n create_template(html, 'Frame Insert'))\n\n self.banner_html = config.get('banner_html', 'banner.html')\n \n if config.get('enable_memento', False):\n self.response_class = MementoResponse\n\n else:\n self.frame_insert_view = None\n self.banner_html = None\n\n def render_search_page(self, wbrequest, **kwargs):\n if self.search_view:\n return self.search_view.render_response(wbrequest=wbrequest,\n prefix=wbrequest.wb_prefix,\n **kwargs)\n else:\n return WbResponse.text_response('No Lookup Url Specified')\n\n def __call__(self, wbrequest):\n # root search page\n if wbrequest.wb_url_str == '/':\n return self.render_search_page(wbrequest)\n\n # render top level frame if in frame mode\n # (not supported in proxy mode)\n if (self.is_frame_mode and wbrequest.wb_url and\n not wbrequest.wb_url.is_query() and\n not wbrequest.options['is_proxy']):\n\n if wbrequest.wb_url.is_top_frame:\n return self.get_top_frame_response(wbrequest)\n else:\n wbrequest.final_mod = 'tf_'\n\n return self.handle_request(wbrequest)\n\n def get_top_frame_params(self, wbrequest):\n embed_url = wbrequest.wb_url.to_str(mod='')\n\n if wbrequest.wb_url.timestamp:\n timestamp = wbrequest.wb_url.timestamp\n else:\n timestamp = datetime_to_timestamp(datetime.utcnow())\n\n params = dict(embed_url=embed_url,\n wbrequest=wbrequest,\n timestamp=timestamp,\n url=wbrequest.wb_url.url,\n banner_html=self.banner_html)\n\n return params\n\n def get_top_frame_response(self, wbrequest):\n params = self.get_top_frame_params(wbrequest)\n\n headers = [('Content-Type', 'text/html; charset=utf-8')]\n status_headers = StatusAndHeaders('200 OK', headers)\n\n template_result = self.frame_insert_view.render_to_string(**params)\n body = template_result.encode('utf-8')\n\n return self.response_class(status_headers, [body], wbrequest=wbrequest)\n\n\n#=================================================================\n# Standard WB Handler\n#=================================================================\nclass WBHandler(SearchPageWbUrlHandler):\n def __init__(self, query_handler, config=None):\n super(WBHandler, self).__init__(config)\n\n self.index_reader = query_handler\n\n cookie_maker = config.get('cookie_maker')\n record_loader = ArcWarcRecordLoader(cookie_maker=cookie_maker)\n\n paths = config.get('archive_paths')\n\n resolving_loader = ResolvingLoader(paths=paths,\n record_loader=record_loader)\n\n self.replay = ReplayView(resolving_loader, config)\n\n self.fallback_handler = None\n self.fallback_name = config.get('fallback')\n\n def resolve_refs(self, handler_dict):\n if self.fallback_name:\n self.fallback_handler = handler_dict.get(self.fallback_name)\n\n def handle_request(self, wbrequest):\n try:\n cdx_lines, output = self.index_reader.load_for_request(wbrequest)\n except NotFoundException as nfe:\n return self.handle_not_found(wbrequest, nfe)\n\n if output != 'text' and wbrequest.wb_url.is_replay():\n return self.handle_replay(wbrequest, cdx_lines)\n else:\n return self.handle_query(wbrequest, cdx_lines, output)\n\n def handle_query(self, wbrequest, cdx_lines, output):\n return self.index_reader.make_cdx_response(wbrequest,\n cdx_lines,\n output)\n\n def handle_replay(self, wbrequest, cdx_lines):\n cdx_callback = self.index_reader.cdx_load_callback(wbrequest)\n\n return self.replay.render_content(wbrequest,\n cdx_lines,\n cdx_callback)\n\n def handle_not_found(self, wbrequest, nfe):\n if (not self.fallback_handler or\n wbrequest.wb_url.is_query() or\n wbrequest.wb_url.is_identity):\n raise\n\n return self.fallback_handler(wbrequest)\n\n def __str__(self):\n return 'Web Archive Replay Handler'\n\n\n#=================================================================\n# Static Content Handler\n#=================================================================\nclass StaticHandler(BaseHandler):\n def __init__(self, static_path):\n mimetypes.init()\n\n self.static_path = static_path\n self.block_loader = BlockLoader()\n\n def __call__(self, wbrequest):\n url = wbrequest.wb_url_str.split('?')[0]\n full_path = self.static_path + url\n\n try:\n data = self.block_loader.load(full_path)\n\n try:\n data.seek(0, 2)\n size = data.tell()\n data.seek(0)\n headers = [('Content-Length', str(size))]\n except IOError:\n headers = None\n\n if 'wsgi.file_wrapper' in wbrequest.env:\n reader = wbrequest.env['wsgi.file_wrapper'](data)\n else:\n reader = iter(lambda: data.read(), '')\n\n content_type, _ = mimetypes.guess_type(full_path)\n\n return WbResponse.text_stream(data,\n content_type=content_type,\n headers=headers)\n\n except IOError:\n raise NotFoundException('Static File Not Found: ' +\n wbrequest.wb_url_str)\n\n def __str__(self): # pragma: no cover\n return 'Static files from ' + self.static_path\n\n\n#=================================================================\n# Debug Handlers\n#=================================================================\nclass DebugEchoEnvHandler(BaseHandler): # pragma: no cover\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest.env))\n\n\n#=================================================================\nclass DebugEchoHandler(BaseHandler): # pragma: no cover\n def __call__(self, wbrequest):\n return WbResponse.text_response(str(wbrequest))\n",
"step-ids": [
10,
19,
21,
22,
25
]
}
|
[
10,
19,
21,
22,
25
] |
def mysum(*c):
print(sum([x for x in c]))
mysum(1,2,3,4,0xB)
|
normal
|
{
"blob_id": "2c4fa92b28fa46a26f21ada8826474baac204e00",
"index": 1234,
"step-1": "<mask token>\n",
"step-2": "def mysum(*c):\n print(sum([x for x in c]))\n\n\n<mask token>\n",
"step-3": "def mysum(*c):\n print(sum([x for x in c]))\n\n\nmysum(1, 2, 3, 4, 11)\n",
"step-4": "def mysum(*c):\n print(sum([x for x in c]))\n\nmysum(1,2,3,4,0xB)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<<<<<<< HEAD
{'_data': [['Common', [['Skin', u'Ospecifika hud-reakti oner'], ['General', u'Tr\xf6tthet']]],
['Uncommon',
[['GI',
u'Buksm\xe4rta, diarr\xe9, f\xf6r-stoppnin g, illam\xe5ende (dessa symptom g\xe5r vanligt-vis \xf6ver vid fortsatt behandling).']]],
['Rare',
[['Blood', u'Hemolytisk anemi'],
['Immune system',
u'\xd6verk\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \xf6dem, feber, dyspn\xe9, tr\xe5nghetsk\xe4nsla i svalget, bronkospasm, hypotension och br\xf6stsm\xe4rta). Dessa h\xe4ndelser har rapporterats efter singeldos. L\xe4kemedels-\xf6verk \xe4nslighet Hepatit'],
=======
{'_data': [['Common', [['Skin', u'Ospecifika hud-reaktioner'], ['General', u'Tr\xf6tthet']]],
['Uncommon',
[['GI',
u'Buksm\xe4rta, diarr\xe9, f\xf6r-stoppning, illam\xe5ende (dessa symptom g\xe5r vanligt-vis \xf6ver vid fortsatt behandling).']]],
['Rare',
[['Blood', u'Hemolytisk anemi'],
['Immune system',
u'\xd6verk\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \xf6dem, feber, dyspn\xe9, tr\xe5nghetsk\xe4nsla i svalget, bronkospasm, hypotension och br\xf6stsm\xe4rta). Dessa h\xe4ndelser har rapporterats efter singeldos. L\xe4kemedels-\xf6verk\xe4nslighet Hepatit'],
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
['Hepato',
u'Leversvikt, ibland med d\xf6dlig utg\xe5ng, \xf6verg\xe5ende och reversibla f\xf6r\xe4ndringar av leverfunktionstest.'],
['Skin', u'Hudutslag'],
['Renal',
u'F\xf6rh\xf6jt plasma-kreatinin (vanligtvis ringa; normaliseras under fortsatt behandling)'],
['Reproductive system', u'Erektil dysfunktion'],
['General', u'Feber']]],
['Very rare',
[['Blood',
u'F\xf6r\xe4ndringar i blodbilden (leukopeni, Trombo-cytopeni). Detta \xe4r normalt reversibelt. Agranulocytos eller pancytopeni ibland med benm\xe4rgs-hypoplasi eller aplasi.'],
['Immune system', u'Anafylaktisk chock (rapporterat efter singeldos).'],
['Psychiatric',
u'Mental f\xf6rvirring (reversibel), depression och hallucinationer, s\xe4rskilt hos \xe4ldre och sv\xe5rt sjuka.'],
['Nervous system',
u'Huvudv\xe4rk (ibland allvarlig), yrsel och reversibla tillst\xe5nd med ofrivilliga r\xf6relser'],
<<<<<<< HEAD
['Eye',
u'Dimsyn (reversibel), troligen orsakade av ackommodations-st \xf6rningar'],
['Cardiac', u'Som med andra H2-receptor-antago nister: bradykardi och AV-block'],
=======
['Eye', u'Dimsyn (reversibel), troligen orsakade av ackommodations-st\xf6rningar'],
['Cardiac', u'Som med andra H2-receptor-antagonister: bradykardi och AV-block'],
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
['Vascular', u'Vaskulit'],
['GI', u'Akut pankreatit'],
['Hepato',
u'Hepatit (hepatocellul\xe4r, kanalikul\xe4r eller blandad art) med eller utan gulsot, Detta \xe4r vanligtvis reversibelt.'],
['Skin', u'Erythema multiforme, alopeci'],
['Musculoskeletal', u'Artralgi, myalgi'],
['Renal', u'Akut interstitiell nefrit'],
['Reproductive system',
u'Reversibel impotens, br\xf6stsymptom och andra tillst\xe5nd (s\xe5som gynekomasti och galaktorr\xe9)']]]],
'_note': u' ?MSFU',
'_pages': [4, 6],
u'_rank': 23,
u'_type': u'MSFU'}
|
normal
|
{
"blob_id": "efe13de4ed5a3f42a9f2ece68fd329d8e3147ca2",
"index": 4869,
"step-1": "<<<<<<< HEAD\n{'_data': [['Common', [['Skin', u'Ospecifika hud-reakti oner'], ['General', u'Tr\\xf6tthet']]],\n ['Uncommon',\n [['GI',\n u'Buksm\\xe4rta, diarr\\xe9, f\\xf6r-stoppnin g, illam\\xe5ende (dessa symptom g\\xe5r vanligt-vis \\xf6ver vid fortsatt behandling).']]],\n ['Rare',\n [['Blood', u'Hemolytisk anemi'],\n ['Immune system',\n u'\\xd6verk\\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \\xf6dem, feber, dyspn\\xe9, tr\\xe5nghetsk\\xe4nsla i svalget, bronkospasm, hypotension och br\\xf6stsm\\xe4rta). Dessa h\\xe4ndelser har rapporterats efter singeldos. L\\xe4kemedels-\\xf6verk \\xe4nslighet Hepatit'],\n=======\n{'_data': [['Common', [['Skin', u'Ospecifika hud-reaktioner'], ['General', u'Tr\\xf6tthet']]],\n ['Uncommon',\n [['GI',\n u'Buksm\\xe4rta, diarr\\xe9, f\\xf6r-stoppning, illam\\xe5ende (dessa symptom g\\xe5r vanligt-vis \\xf6ver vid fortsatt behandling).']]],\n ['Rare',\n [['Blood', u'Hemolytisk anemi'],\n ['Immune system',\n u'\\xd6verk\\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \\xf6dem, feber, dyspn\\xe9, tr\\xe5nghetsk\\xe4nsla i svalget, bronkospasm, hypotension och br\\xf6stsm\\xe4rta). Dessa h\\xe4ndelser har rapporterats efter singeldos. L\\xe4kemedels-\\xf6verk\\xe4nslighet Hepatit'],\n>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc\n ['Hepato',\n u'Leversvikt, ibland med d\\xf6dlig utg\\xe5ng, \\xf6verg\\xe5ende och reversibla f\\xf6r\\xe4ndringar av leverfunktionstest.'],\n ['Skin', u'Hudutslag'],\n ['Renal',\n u'F\\xf6rh\\xf6jt plasma-kreatinin (vanligtvis ringa; normaliseras under fortsatt behandling)'],\n ['Reproductive system', u'Erektil dysfunktion'],\n ['General', u'Feber']]],\n ['Very rare',\n [['Blood',\n u'F\\xf6r\\xe4ndringar i blodbilden (leukopeni, Trombo-cytopeni). Detta \\xe4r normalt reversibelt. Agranulocytos eller pancytopeni ibland med benm\\xe4rgs-hypoplasi eller aplasi.'],\n ['Immune system', u'Anafylaktisk chock (rapporterat efter singeldos).'],\n ['Psychiatric',\n u'Mental f\\xf6rvirring (reversibel), depression och hallucinationer, s\\xe4rskilt hos \\xe4ldre och sv\\xe5rt sjuka.'],\n ['Nervous system',\n u'Huvudv\\xe4rk (ibland allvarlig), yrsel och reversibla tillst\\xe5nd med ofrivilliga r\\xf6relser'],\n<<<<<<< HEAD\n ['Eye',\n u'Dimsyn (reversibel), troligen orsakade av ackommodations-st \\xf6rningar'],\n ['Cardiac', u'Som med andra H2-receptor-antago nister: bradykardi och AV-block'],\n=======\n ['Eye', u'Dimsyn (reversibel), troligen orsakade av ackommodations-st\\xf6rningar'],\n ['Cardiac', u'Som med andra H2-receptor-antagonister: bradykardi och AV-block'],\n>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc\n ['Vascular', u'Vaskulit'],\n ['GI', u'Akut pankreatit'],\n ['Hepato',\n u'Hepatit (hepatocellul\\xe4r, kanalikul\\xe4r eller blandad art) med eller utan gulsot, Detta \\xe4r vanligtvis reversibelt.'],\n ['Skin', u'Erythema multiforme, alopeci'],\n ['Musculoskeletal', u'Artralgi, myalgi'],\n ['Renal', u'Akut interstitiell nefrit'],\n ['Reproductive system',\n u'Reversibel impotens, br\\xf6stsymptom och andra tillst\\xe5nd (s\\xe5som gynekomasti och galaktorr\\xe9)']]]],\n '_note': u' ?MSFU',\n '_pages': [4, 6],\n u'_rank': 23,\n u'_type': u'MSFU'}",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from sudo.decorators import sudo_required
from sentry.models import (Project, ProjectStatus, Organization, OrganizationStatus)
from sentry.plugins import plugins
from sentry.web.forms.accounts import (
ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm,
NotificationDeploySettingsForm
)
from sentry.web.decorators import login_required
from sentry.web.frontend.base import BaseView
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers
from sentry.utils.safe import safe_execute
class AccountNotificationView(BaseView):
notification_settings_form = NotificationSettingsForm
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user, request.POST or None)
reports_form = NotificationReportSettingsForm(
request.user, request.POST or None, prefix='reports'
)
org_list = list(
Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=request.user,
).distinct()
)
org_forms = [
(
org, NotificationDeploySettingsForm(
request.user, org, request.POST or None, prefix='deploys-org-%s' % (org.id, )
)
) for org in sorted(org_list, key=lambda o: o.name)
]
project_list = list(
Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.user,
team__organizationmemberteam__is_active=True,
status=ProjectStatus.VISIBLE,
).distinct()
)
project_forms = [
(
project, ProjectEmailOptionsForm(
project,
request.user,
request.POST or None,
prefix='project-%s' % (project.id, )
)
) for project in sorted(project_list, key=lambda x: (x.organization.name, x.name))
]
ext_forms = []
for plugin in plugins.all():
for form in safe_execute(plugin.get_notification_forms, _with_transaction=False) or ():
form = safe_execute(
form,
plugin,
request.user,
request.POST or None,
prefix=plugin.slug,
_with_transaction=False
)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(
itertools.chain(
[settings_form, reports_form], ext_forms, (f for _, f in project_forms),
(f for _, f in org_forms)
)
)
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'settings_form': settings_form,
'project_forms': project_forms,
'org_forms': org_forms,
'reports_form': reports_form,
'ext_forms': ext_forms,
'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers(),
}
)
return render_to_response('sentry/account/notifications.html', context, request)
|
normal
|
{
"blob_id": "46f218829e1bf324d4c50ea0ff7003bc48b64e2a",
"index": 4258,
"step-1": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n <mask token>\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-3": "<mask token>\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-4": "from __future__ import absolute_import\nimport itertools\nfrom django.contrib import messages\nfrom django.core.context_processors import csrf\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\nfrom sudo.decorators import sudo_required\nfrom sentry.models import Project, ProjectStatus, Organization, OrganizationStatus\nfrom sentry.plugins import plugins\nfrom sentry.web.forms.accounts import ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm, NotificationDeploySettingsForm\nfrom sentry.web.decorators import login_required\nfrom sentry.web.frontend.base import BaseView\nfrom sentry.web.helpers import render_to_response\nfrom sentry.utils.auth import get_auth_providers\nfrom sentry.utils.safe import safe_execute\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, \n request.POST or None)\n reports_form = NotificationReportSettingsForm(request.user, request\n .POST or None, prefix='reports')\n org_list = list(Organization.objects.filter(status=\n OrganizationStatus.VISIBLE, member_set__user=request.user).\n distinct())\n org_forms = [(org, NotificationDeploySettingsForm(request.user, org,\n request.POST or None, prefix='deploys-org-%s' % (org.id,))) for\n org in sorted(org_list, key=lambda o: o.name)]\n project_list = list(Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.\n user, team__organizationmemberteam__is_active=True, status=\n ProjectStatus.VISIBLE).distinct())\n project_forms = [(project, ProjectEmailOptionsForm(project, request\n .user, request.POST or None, prefix='project-%s' % (project.id,\n ))) for project in sorted(project_list, key=lambda x: (x.\n organization.name, x.name))]\n ext_forms = []\n for plugin in plugins.all():\n for form in (safe_execute(plugin.get_notification_forms,\n _with_transaction=False) or ()):\n form = safe_execute(form, plugin, request.user, request.\n POST or None, prefix=plugin.slug, _with_transaction=False)\n if not form:\n continue\n ext_forms.append(form)\n if request.POST:\n all_forms = list(itertools.chain([settings_form, reports_form],\n ext_forms, (f for _, f in project_forms), (f for _, f in\n org_forms)))\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n context = csrf(request)\n context.update({'settings_form': settings_form, 'project_forms':\n project_forms, 'org_forms': org_forms, 'reports_form':\n reports_form, 'ext_forms': ext_forms, 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers()})\n return render_to_response('sentry/account/notifications.html',\n context, request)\n",
"step-5": "from __future__ import absolute_import\n\nimport itertools\n\nfrom django.contrib import messages\nfrom django.core.context_processors import csrf\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\n\nfrom sudo.decorators import sudo_required\n\nfrom sentry.models import (Project, ProjectStatus, Organization, OrganizationStatus)\nfrom sentry.plugins import plugins\nfrom sentry.web.forms.accounts import (\n ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm,\n NotificationDeploySettingsForm\n)\nfrom sentry.web.decorators import login_required\nfrom sentry.web.frontend.base import BaseView\nfrom sentry.web.helpers import render_to_response\nfrom sentry.utils.auth import get_auth_providers\nfrom sentry.utils.safe import safe_execute\n\n\nclass AccountNotificationView(BaseView):\n notification_settings_form = NotificationSettingsForm\n\n @method_decorator(never_cache)\n @method_decorator(login_required)\n @method_decorator(sudo_required)\n @method_decorator(transaction.atomic)\n def handle(self, request):\n settings_form = self.notification_settings_form(request.user, request.POST or None)\n reports_form = NotificationReportSettingsForm(\n request.user, request.POST or None, prefix='reports'\n )\n\n org_list = list(\n Organization.objects.filter(\n status=OrganizationStatus.VISIBLE,\n member_set__user=request.user,\n ).distinct()\n )\n\n org_forms = [\n (\n org, NotificationDeploySettingsForm(\n request.user, org, request.POST or None, prefix='deploys-org-%s' % (org.id, )\n )\n ) for org in sorted(org_list, key=lambda o: o.name)\n ]\n\n project_list = list(\n Project.objects.filter(\n team__organizationmemberteam__organizationmember__user=request.user,\n team__organizationmemberteam__is_active=True,\n status=ProjectStatus.VISIBLE,\n ).distinct()\n )\n\n project_forms = [\n (\n project, ProjectEmailOptionsForm(\n project,\n request.user,\n request.POST or None,\n prefix='project-%s' % (project.id, )\n )\n ) for project in sorted(project_list, key=lambda x: (x.organization.name, x.name))\n ]\n\n ext_forms = []\n for plugin in plugins.all():\n for form in safe_execute(plugin.get_notification_forms, _with_transaction=False) or ():\n form = safe_execute(\n form,\n plugin,\n request.user,\n request.POST or None,\n prefix=plugin.slug,\n _with_transaction=False\n )\n if not form:\n continue\n ext_forms.append(form)\n\n if request.POST:\n all_forms = list(\n itertools.chain(\n [settings_form, reports_form], ext_forms, (f for _, f in project_forms),\n (f for _, f in org_forms)\n )\n )\n if all(f.is_valid() for f in all_forms):\n for form in all_forms:\n form.save()\n messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')\n return HttpResponseRedirect(request.path)\n\n context = csrf(request)\n context.update(\n {\n 'settings_form': settings_form,\n 'project_forms': project_forms,\n 'org_forms': org_forms,\n 'reports_form': reports_form,\n 'ext_forms': ext_forms,\n 'page': 'notifications',\n 'AUTH_PROVIDERS': get_auth_providers(),\n }\n )\n return render_to_response('sentry/account/notifications.html', context, request)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#
# PySNMP MIB module CISCO-LWAPP-CLIENT-ROAMING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LWAPP-CLIENT-ROAMING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:04:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
cLApDot11IfSlotId, cLApSysMacAddress = mibBuilder.importSymbols("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId", "cLApSysMacAddress")
CLDot11RfParamMode, CLDot11Channel = mibBuilder.importSymbols("CISCO-LWAPP-TC-MIB", "CLDot11RfParamMode", "CLDot11Channel")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Integer32, IpAddress, MibIdentifier, NotificationType, TimeTicks, Bits, ObjectIdentity, Counter64, ModuleIdentity, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "IpAddress", "MibIdentifier", "NotificationType", "TimeTicks", "Bits", "ObjectIdentity", "Counter64", "ModuleIdentity", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32")
DisplayString, MacAddress, TextualConvention, TimeInterval = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "MacAddress", "TextualConvention", "TimeInterval")
ciscoLwappClRoamMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 523))
ciscoLwappClRoamMIB.setRevisions(('2010-01-29 00:00', '2006-04-11 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setRevisionsDescriptions(('Deprecated following attributes:- clcrDot11aMinRssi, clcrDot11aHysteresis, clcrDot11aAdaptiveScanThreshold, clcrDot11aTransitionTime, clcrDot11bMinRssi, clcrDot11bHysteresis, clcrDot11bAdaptiveScanThreshold, clcrDot11bTransitionTime. clcrMIBCompliance, ciscoLwappClRoamDot11aRfParamsGroup, ciscoLwappClRoamDot11bRfParamsGroup Added following attributes:- clcrDot11aMinRssiV2, clcrDot11aHysteresisV2, clcrDot11aAdaptiveScanThresholdV2, clcrDot11aTransitionTimeV2, clcrDot11bMinRssiV2, clcrDot11bHysteresisV2, clcrDot11bAdaptiveScanThresholdV2, clcrDot11bTransitionTimeV2. clcrMIBComplianceRev1, ciscoLwappClRoamDot11aRfParamsGroupSup1, ciscoLwappClRoamDot11bRfParamsGroupSup1', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setLastUpdated('201001290000Z')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setContactInfo('Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: [email protected]')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setDescription("This MIB is intended to be implemented on all those devices operating as Central controllers, that terminate the Light Weight Access Point Protocol tunnel from Cisco Light-weight LWAPP Access Points. Information provided by this MIB is for CCX related features as specified in the CCX specifications. This MIB covers roaming RF parameters for CCX clients. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ + + + + + + + CC + + CC + + CC + + + + + + + +......+ +......+ +......+ .. . . .. . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + AP + + AP + + AP + + AP + + + + + + + + + +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + MN + + MN + + MN + + MN + + + + + + + + + +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends them to the controller to which it is logically connected. Basic Service Set ( BSS ) The IEEE 802.11 BSS of an AP comprises of the stations directly associating with the AP. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity is also referred to as 'controller'. Cisco Compatible eXtensions (CCX) Wireless LAN Access Points (APs) manufactured by Cisco Systems have features and capabilities beyond those in related standards (e.g., IEEE 802.11 suite of standards ,Wi-Fi recommendations by WECA, 802.1X security suite,etc). A number of features provide higher performance.For example, Cisco AP transmits a specific Information Element, which the clients adapt to for enhanced performance. Similarly, a number of features are implemented by means of proprietary Information Elements, which Cisco clients use in specific ways to carry out tasks above and beyond the standard. Other examples of feature categories are roaming and power saving. Client Roaming A client may decide to reassociate with another AP for reasons of its own choosing. The decision of whether or not to use the information contained in the AP list is up to the discretion of the implementor, as long as the roam time requirement is met. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Mobile Node and client are used interchangeably. REFERENCE [1] Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol")
ciscoLwappClRoamMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 0))
ciscoLwappClRoamMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1))
ciscoLwappClRoamMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2))
clcrRoamDot11aRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1))
clcrRoamDot11bRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2))
clcrRoamReasonReport = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3))
clcrRoamDot11Stats = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4))
clcrDot11aMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 1), CLDot11RfParamMode().clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMode.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11a networks.')
clcrDot11aMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMinRssi.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aMinRssi.setDescription("This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11aMinRssiV2")
clcrDot11aHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aHysteresis.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11aHysteresisV2')
clcrDot11aAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime. This object is superceded by clcrDot11aAdaptiveScanThresholdV2')
clcrDot11aTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aTransitionTime.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client?s associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11aTransitionTimeV2')
clcrDot11aMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMinRssiV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aMinRssiV2.setDescription("This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.")
clcrDot11aHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aHysteresisV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')
clcrDot11aAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime.')
clcrDot11aTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the clients associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')
clcrDot11bMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 1), CLDot11RfParamMode().clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMode.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11b/g networks.')
clcrDot11bMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMinRssi.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bMinRssi.setDescription("This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11bMinRssiV2")
clcrDot11bHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bHysteresis.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11bHysteresisV2')
clcrDot11bAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime. This object is superceded by clcrDot11bAdaptiveScanThresholdV2')
clcrDot11bTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bTransitionTime.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11bTransitionTimeV2')
clcrDot11bMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMinRssiV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bMinRssiV2.setDescription("This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.")
clcrDot11bHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bHysteresisV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')
clcrDot11bAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime.')
clcrDot11bTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')
clcrRoamReasonReportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1), )
if mibBuilder.loadTexts: clcrRoamReasonReportTable.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReasonReportTable.setDescription('This table provides the reasons for CCX clients roaming from one AP to another. When a CCX client associates to an AP, it will always send an IAPP information packet to the new AP listing the characteristics of the previous AP. An entry is added to this table when a roam reason report is sent by a CCX client when it roams to a new AP.')
clcrRoamReasonReportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamClientMacAddress"), (0, "CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamClientTimeStamp"))
if mibBuilder.loadTexts: clcrRoamReasonReportEntry.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReasonReportEntry.setDescription('Each entry corresponds to the roam reason report sent by a CCX client to the new AP to which client associates.')
clcrRoamClientMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: clcrRoamClientMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamClientMacAddress.setDescription('This object indicates the mac address of the client which has roamed to a new AP.')
clcrRoamClientTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 2), TimeTicks())
if mibBuilder.loadTexts: clcrRoamClientTimeStamp.setStatus('current')
if mibBuilder.loadTexts: clcrRoamClientTimeStamp.setDescription("This object indicates the time instance at which this report was received by the new AP, to which client roamed to. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.")
clcrRoamNewApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamNewApMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamNewApMacAddress.setDescription('This object indicates the mac address of the current AP to which client has roamed to. This AP receives the roam reason report.')
clcrRoamPrevApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setDescription('This object indicates the mac address of the previous AP to which client was associated.')
clcrRoamPrevApChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 5), CLDot11Channel()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApChannel.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApChannel.setDescription('This object indicates the channel number at which the client was associated to the previous AP.')
clcrRoamPrevApSsid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApSsid.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApSsid.setDescription('This object indicates the SSID at which the client was associated to the previous AP.')
clcrRoamDisassocTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 7), TimeInterval()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setStatus('current')
if mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setDescription('This object indicates the time elapsed since the client disassociated, in hundredth of a second.')
clcrRoamReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("clcrUnspecified", 0), ("clcrPoorLink", 1), ("clcrLoadBalancing", 2), ("clcrInsufficientCapacity", 3), ("clcrDirectedRoam", 4), ("clcrFirstAssociation", 5), ("clcrRoamingIn", 6), ("clcrRoamingOut", 7), ("clcrBetterAp", 8), ("clcrDisassociated", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamReason.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReason.setDescription("This object indicates the reason for a client to roam to a new AP. The semantics are as follows. clcrUnspecified - The reason is not known or can't be found. clcrPoorLink - Normal roam due to poor link (excessive retries, too much interference, RSSI too low, etc.) clcrLoadBalancing - Normal roam due to load balancing clcrInsufficientCapacity - Roaming occured due to the insufficient capacity on the previous AP (TSPEC rejected) clcrDirectedRoam - Roaming is directed by the 802.11 wireless Infrastructure clcrFirstAssociation - This is the first association to a particular WLAN clcrRoamingIn - Roaming in from cellular or other WAN clcrRoamingOut - Roaming out to cellular or other WAN clcrBetterAp - Normal roam due to better AP found clcrDisassociated - Deauthenticated or Disassociated from the previous AP.")
clcrDot11StatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1), )
if mibBuilder.loadTexts: clcrDot11StatsTable.setStatus('current')
if mibBuilder.loadTexts: clcrDot11StatsTable.setDescription('This table populates the statistics collected when the client roamed in the WLAN. There exists a row in this table for each conceptual row in cLApDot11IfTable that represents a dot11 interface of an AP.')
clcrDot11StatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"))
if mibBuilder.loadTexts: clcrDot11StatsEntry.setStatus('current')
if mibBuilder.loadTexts: clcrDot11StatsEntry.setDescription('Each entry represents a conceptual row in clcrDot11StatsTable and corresponds to the roam reason report sent by a CCX client to the new AP which the client associates to.')
clcrDot11NeighborRequestRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setStatus('current')
if mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setDescription('This object indicates the count of the number of requests received from an E2E client for neighbor updates.')
clcrDot11NeighborReplySent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11NeighborReplySent.setStatus('current')
if mibBuilder.loadTexts: clcrDot11NeighborReplySent.setDescription('This object indicates the count of the number of replies sent to the client in reply to the request for neighbor updates received from the client.')
clcrDot11RoamReasonReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setStatus('current')
if mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setDescription('This object reports the count of the number of roam reason reports received from CCX clients.')
clcrDot11BcastUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setStatus('current')
if mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setDescription('This object indicates the count of the number of broadcast neighbor updates sent by an AP.')
ciscoLwappClRoamMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1))
ciscoLwappClRoamMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2))
clcrMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 1)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11aRfParamsGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11bRfParamsGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamReasonGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamingStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clcrMIBCompliance = clcrMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: clcrMIBCompliance.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')
clcrMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 2)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11aRfParamsGroupSup1"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11bRfParamsGroupSup1"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamReasonGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamingStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clcrMIBComplianceRev1 = clcrMIBComplianceRev1.setStatus('current')
if mibBuilder.loadTexts: clcrMIBComplianceRev1.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')
ciscoLwappClRoamDot11aRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 1)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMinRssi"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aHysteresis"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aAdaptiveScanThreshold"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aTransitionTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11aRfParamsGroup = ciscoLwappClRoamDot11aRfParamsGroup.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')
ciscoLwappClRoamDot11bRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 2)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMinRssi"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bHysteresis"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bAdaptiveScanThreshold"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bTransitionTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11bRfParamsGroup = ciscoLwappClRoamDot11bRfParamsGroup.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')
ciscoLwappClRoamroamReasonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 3)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamNewApMacAddress"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApMacAddress"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApChannel"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApSsid"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamDisassocTimeInterval"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamReason"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamroamReasonGroup = ciscoLwappClRoamroamReasonGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamroamReasonGroup.setDescription('This collection of objects provide the reasons for clients roaming between APs.')
ciscoLwappClRoamroamingStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 4)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11NeighborRequestRx"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11NeighborReplySent"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11RoamReasonReportRx"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11BcastUpdatesSent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamroamingStatsGroup = ciscoLwappClRoamroamingStatsGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamroamingStatsGroup.setDescription('This collection of objects provide the counters related to roaming.')
ciscoLwappClRoamDot11aRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 5)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMinRssiV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aHysteresisV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aAdaptiveScanThresholdV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aTransitionTimeV2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11aRfParamsGroupSup1 = ciscoLwappClRoamDot11aRfParamsGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')
ciscoLwappClRoamDot11bRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 6)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMinRssiV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bHysteresisV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bAdaptiveScanThresholdV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bTransitionTimeV2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11bRfParamsGroupSup1 = ciscoLwappClRoamDot11bRfParamsGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')
mibBuilder.exportSymbols("CISCO-LWAPP-CLIENT-ROAMING-MIB", clcrDot11aMinRssi=clcrDot11aMinRssi, clcrRoamClientMacAddress=clcrRoamClientMacAddress, ciscoLwappClRoamroamingStatsGroup=ciscoLwappClRoamroamingStatsGroup, clcrDot11bTransitionTimeV2=clcrDot11bTransitionTimeV2, clcrRoamNewApMacAddress=clcrRoamNewApMacAddress, clcrMIBCompliance=clcrMIBCompliance, clcrRoamDot11aRfParamConfig=clcrRoamDot11aRfParamConfig, clcrDot11BcastUpdatesSent=clcrDot11BcastUpdatesSent, clcrRoamPrevApSsid=clcrRoamPrevApSsid, clcrMIBComplianceRev1=clcrMIBComplianceRev1, clcrDot11bHysteresisV2=clcrDot11bHysteresisV2, ciscoLwappClRoamMIBConform=ciscoLwappClRoamMIBConform, clcrDot11aTransitionTime=clcrDot11aTransitionTime, clcrDot11aHysteresis=clcrDot11aHysteresis, ciscoLwappClRoamDot11bRfParamsGroupSup1=ciscoLwappClRoamDot11bRfParamsGroupSup1, PYSNMP_MODULE_ID=ciscoLwappClRoamMIB, clcrDot11bHysteresis=clcrDot11bHysteresis, clcrDot11StatsEntry=clcrDot11StatsEntry, clcrRoamDisassocTimeInterval=clcrRoamDisassocTimeInterval, ciscoLwappClRoamDot11aRfParamsGroupSup1=ciscoLwappClRoamDot11aRfParamsGroupSup1, clcrDot11bAdaptiveScanThreshold=clcrDot11bAdaptiveScanThreshold, clcrDot11NeighborRequestRx=clcrDot11NeighborRequestRx, clcrRoamClientTimeStamp=clcrRoamClientTimeStamp, clcrRoamReason=clcrRoamReason, clcrDot11bMode=clcrDot11bMode, clcrDot11aAdaptiveScanThreshold=clcrDot11aAdaptiveScanThreshold, clcrDot11RoamReasonReportRx=clcrDot11RoamReasonReportRx, clcrDot11bAdaptiveScanThresholdV2=clcrDot11bAdaptiveScanThresholdV2, ciscoLwappClRoamDot11bRfParamsGroup=ciscoLwappClRoamDot11bRfParamsGroup, ciscoLwappClRoamMIBNotifs=ciscoLwappClRoamMIBNotifs, clcrRoamReasonReportTable=clcrRoamReasonReportTable, clcrDot11aMinRssiV2=clcrDot11aMinRssiV2, ciscoLwappClRoamMIBObjects=ciscoLwappClRoamMIBObjects, clcrDot11NeighborReplySent=clcrDot11NeighborReplySent, clcrDot11aAdaptiveScanThresholdV2=clcrDot11aAdaptiveScanThresholdV2, ciscoLwappClRoamroamReasonGroup=ciscoLwappClRoamroamReasonGroup, clcrDot11StatsTable=clcrDot11StatsTable, clcrRoamDot11Stats=clcrRoamDot11Stats, clcrRoamDot11bRfParamConfig=clcrRoamDot11bRfParamConfig, clcrDot11bMinRssi=clcrDot11bMinRssi, clcrRoamReasonReport=clcrRoamReasonReport, clcrRoamPrevApMacAddress=clcrRoamPrevApMacAddress, ciscoLwappClRoamDot11aRfParamsGroup=ciscoLwappClRoamDot11aRfParamsGroup, clcrRoamReasonReportEntry=clcrRoamReasonReportEntry, ciscoLwappClRoamMIBGroups=ciscoLwappClRoamMIBGroups, clcrDot11bMinRssiV2=clcrDot11bMinRssiV2, ciscoLwappClRoamMIBCompliances=ciscoLwappClRoamMIBCompliances, clcrDot11aMode=clcrDot11aMode, clcrDot11aTransitionTimeV2=clcrDot11aTransitionTimeV2, clcrRoamPrevApChannel=clcrRoamPrevApChannel, clcrDot11bTransitionTime=clcrDot11bTransitionTime, ciscoLwappClRoamMIB=ciscoLwappClRoamMIB, clcrDot11aHysteresisV2=clcrDot11aHysteresisV2)
|
normal
|
{
"blob_id": "76fbe055b53af9321cc0d57a210cfffe9188f800",
"index": 6531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nciscoLwappClRoamMIB.setRevisions(('2010-01-29 00:00', '2006-04-11 00:00'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n if mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setRevisionsDescriptions((\n 'Deprecated following attributes:- clcrDot11aMinRssi, clcrDot11aHysteresis, clcrDot11aAdaptiveScanThreshold, clcrDot11aTransitionTime, clcrDot11bMinRssi, clcrDot11bHysteresis, clcrDot11bAdaptiveScanThreshold, clcrDot11bTransitionTime. clcrMIBCompliance, ciscoLwappClRoamDot11aRfParamsGroup, ciscoLwappClRoamDot11bRfParamsGroup Added following attributes:- clcrDot11aMinRssiV2, clcrDot11aHysteresisV2, clcrDot11aAdaptiveScanThresholdV2, clcrDot11aTransitionTimeV2, clcrDot11bMinRssiV2, clcrDot11bHysteresisV2, clcrDot11bAdaptiveScanThresholdV2, clcrDot11bTransitionTimeV2. clcrMIBComplianceRev1, ciscoLwappClRoamDot11aRfParamsGroupSup1, ciscoLwappClRoamDot11bRfParamsGroupSup1'\n , 'Initial version of this MIB module.'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setLastUpdated('201001290000Z')\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setOrganization('Cisco Systems, Inc.')\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setContactInfo(\n 'Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: [email protected]'\n )\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setDescription(\n \"This MIB is intended to be implemented on all those devices operating as Central controllers, that terminate the Light Weight Access Point Protocol tunnel from Cisco Light-weight LWAPP Access Points. Information provided by this MIB is for CCX related features as specified in the CCX specifications. This MIB covers roaming RF parameters for CCX clients. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ + + + + + + + CC + + CC + + CC + + + + + + + +......+ +......+ +......+ .. . . .. . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + AP + + AP + + AP + + AP + + + + + + + + + +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + MN + + MN + + MN + + MN + + + + + + + + + +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends them to the controller to which it is logically connected. Basic Service Set ( BSS ) The IEEE 802.11 BSS of an AP comprises of the stations directly associating with the AP. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity is also referred to as 'controller'. Cisco Compatible eXtensions (CCX) Wireless LAN Access Points (APs) manufactured by Cisco Systems have features and capabilities beyond those in related standards (e.g., IEEE 802.11 suite of standards ,Wi-Fi recommendations by WECA, 802.1X security suite,etc). A number of features provide higher performance.For example, Cisco AP transmits a specific Information Element, which the clients adapt to for enhanced performance. Similarly, a number of features are implemented by means of proprietary Information Elements, which Cisco clients use in specific ways to carry out tasks above and beyond the standard. Other examples of feature categories are roaming and power saving. Client Roaming A client may decide to reassociate with another AP for reasons of its own choosing. The decision of whether or not to use the information contained in the AP list is up to the discretion of the implementor, as long as the roam time requirement is met. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Mobile Node and client are used interchangeably. REFERENCE [1] Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aMode.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aMode.setDescription(\n 'This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11a networks.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssi.setDescription(\n \"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11aMinRssiV2\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresis.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11aHysteresisV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThreshold.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime. This object is superceded by clcrDot11aAdaptiveScanThresholdV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTime.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client?s associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11aTransitionTimeV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssiV2.setDescription(\n \"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresisV2.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThresholdV2.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTimeV2.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the clients associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bMode.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bMode.setDescription(\n 'This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11b/g networks.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssi.setDescription(\n \"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11bMinRssiV2\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresis.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11bHysteresisV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThreshold.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime. This object is superceded by clcrDot11bAdaptiveScanThresholdV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTime.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11bTransitionTimeV2'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssiV2.setDescription(\n \"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresisV2.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThresholdV2.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTimeV2.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamReasonReportTable.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReasonReportTable.setDescription(\n 'This table provides the reasons for CCX clients roaming from one AP to another. When a CCX client associates to an AP, it will always send an IAPP information packet to the new AP listing the characteristics of the previous AP. An entry is added to this table when a roam reason report is sent by a CCX client when it roams to a new AP.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamReasonReportEntry.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReasonReportEntry.setDescription(\n 'Each entry corresponds to the roam reason report sent by a CCX client to the new AP to which client associates.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamClientMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamClientMacAddress.setDescription(\n 'This object indicates the mac address of the client which has roamed to a new AP.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamClientTimeStamp.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamClientTimeStamp.setDescription(\n \"This object indicates the time instance at which this report was received by the new AP, to which client roamed to. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamNewApMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamNewApMacAddress.setDescription(\n 'This object indicates the mac address of the current AP to which client has roamed to. This AP receives the roam reason report.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamPrevApMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApMacAddress.setDescription(\n 'This object indicates the mac address of the previous AP to which client was associated.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamPrevApChannel.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApChannel.setDescription(\n 'This object indicates the channel number at which the client was associated to the previous AP.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamPrevApSsid.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApSsid.setDescription(\n 'This object indicates the SSID at which the client was associated to the previous AP.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamDisassocTimeInterval.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamDisassocTimeInterval.setDescription(\n 'This object indicates the time elapsed since the client disassociated, in hundredth of a second.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrRoamReason.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReason.setDescription(\n \"This object indicates the reason for a client to roam to a new AP. The semantics are as follows. clcrUnspecified - The reason is not known or can't be found. clcrPoorLink - Normal roam due to poor link (excessive retries, too much interference, RSSI too low, etc.) clcrLoadBalancing - Normal roam due to load balancing clcrInsufficientCapacity - Roaming occured due to the insufficient capacity on the previous AP (TSPEC rejected) clcrDirectedRoam - Roaming is directed by the 802.11 wireless Infrastructure clcrFirstAssociation - This is the first association to a particular WLAN clcrRoamingIn - Roaming in from cellular or other WAN clcrRoamingOut - Roaming out to cellular or other WAN clcrBetterAp - Normal roam due to better AP found clcrDisassociated - Deauthenticated or Disassociated from the previous AP.\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11StatsTable.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11StatsTable.setDescription(\n 'This table populates the statistics collected when the client roamed in the WLAN. There exists a row in this table for each conceptual row in cLApDot11IfTable that represents a dot11 interface of an AP.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11StatsEntry.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11StatsEntry.setDescription(\n 'Each entry represents a conceptual row in clcrDot11StatsTable and corresponds to the roam reason report sent by a CCX client to the new AP which the client associates to.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11NeighborRequestRx.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11NeighborRequestRx.setDescription(\n 'This object indicates the count of the number of requests received from an E2E client for neighbor updates.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11NeighborReplySent.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11NeighborReplySent.setDescription(\n 'This object indicates the count of the number of replies sent to the client in reply to the request for neighbor updates received from the client.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11RoamReasonReportRx.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11RoamReasonReportRx.setDescription(\n 'This object reports the count of the number of roam reason reports received from CCX clients.'\n )\n<mask token>\nif mibBuilder.loadTexts:\n clcrDot11BcastUpdatesSent.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11BcastUpdatesSent.setDescription(\n 'This object indicates the count of the number of broadcast neighbor updates sent by an AP.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBCompliance = clcrMIBCompliance.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrMIBCompliance.setDescription(\n 'The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBComplianceRev1 = clcrMIBComplianceRev1.setStatus('current')\nif mibBuilder.loadTexts:\n clcrMIBComplianceRev1.setDescription(\n 'The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroup = (ciscoLwappClRoamDot11aRfParamsGroup\n .setStatus('deprecated'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11aRfParamsGroup.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11a networks.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroup = (ciscoLwappClRoamDot11bRfParamsGroup\n .setStatus('deprecated'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11bRfParamsGroup.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11b/g bands.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamReasonGroup = (ciscoLwappClRoamroamReasonGroup.\n setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamroamReasonGroup.setDescription(\n 'This collection of objects provide the reasons for clients roaming between APs.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamingStatsGroup = (ciscoLwappClRoamroamingStatsGroup.\n setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamroamingStatsGroup.setDescription(\n 'This collection of objects provide the counters related to roaming.')\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroupSup1 = (\n ciscoLwappClRoamDot11aRfParamsGroupSup1.setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11aRfParamsGroupSup1.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11a networks.'\n )\n<mask token>\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroupSup1 = (\n ciscoLwappClRoamDot11bRfParamsGroupSup1.setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11bRfParamsGroupSup1.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11b/g bands.'\n )\nmibBuilder.exportSymbols('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n clcrDot11aMinRssi=clcrDot11aMinRssi, clcrRoamClientMacAddress=\n clcrRoamClientMacAddress, ciscoLwappClRoamroamingStatsGroup=\n ciscoLwappClRoamroamingStatsGroup, clcrDot11bTransitionTimeV2=\n clcrDot11bTransitionTimeV2, clcrRoamNewApMacAddress=\n clcrRoamNewApMacAddress, clcrMIBCompliance=clcrMIBCompliance,\n clcrRoamDot11aRfParamConfig=clcrRoamDot11aRfParamConfig,\n clcrDot11BcastUpdatesSent=clcrDot11BcastUpdatesSent, clcrRoamPrevApSsid\n =clcrRoamPrevApSsid, clcrMIBComplianceRev1=clcrMIBComplianceRev1,\n clcrDot11bHysteresisV2=clcrDot11bHysteresisV2,\n ciscoLwappClRoamMIBConform=ciscoLwappClRoamMIBConform,\n clcrDot11aTransitionTime=clcrDot11aTransitionTime, clcrDot11aHysteresis\n =clcrDot11aHysteresis, ciscoLwappClRoamDot11bRfParamsGroupSup1=\n ciscoLwappClRoamDot11bRfParamsGroupSup1, PYSNMP_MODULE_ID=\n ciscoLwappClRoamMIB, clcrDot11bHysteresis=clcrDot11bHysteresis,\n clcrDot11StatsEntry=clcrDot11StatsEntry, clcrRoamDisassocTimeInterval=\n clcrRoamDisassocTimeInterval, ciscoLwappClRoamDot11aRfParamsGroupSup1=\n ciscoLwappClRoamDot11aRfParamsGroupSup1,\n clcrDot11bAdaptiveScanThreshold=clcrDot11bAdaptiveScanThreshold,\n clcrDot11NeighborRequestRx=clcrDot11NeighborRequestRx,\n clcrRoamClientTimeStamp=clcrRoamClientTimeStamp, clcrRoamReason=\n clcrRoamReason, clcrDot11bMode=clcrDot11bMode,\n clcrDot11aAdaptiveScanThreshold=clcrDot11aAdaptiveScanThreshold,\n clcrDot11RoamReasonReportRx=clcrDot11RoamReasonReportRx,\n clcrDot11bAdaptiveScanThresholdV2=clcrDot11bAdaptiveScanThresholdV2,\n ciscoLwappClRoamDot11bRfParamsGroup=ciscoLwappClRoamDot11bRfParamsGroup,\n ciscoLwappClRoamMIBNotifs=ciscoLwappClRoamMIBNotifs,\n clcrRoamReasonReportTable=clcrRoamReasonReportTable,\n clcrDot11aMinRssiV2=clcrDot11aMinRssiV2, ciscoLwappClRoamMIBObjects=\n ciscoLwappClRoamMIBObjects, clcrDot11NeighborReplySent=\n clcrDot11NeighborReplySent, clcrDot11aAdaptiveScanThresholdV2=\n clcrDot11aAdaptiveScanThresholdV2, ciscoLwappClRoamroamReasonGroup=\n ciscoLwappClRoamroamReasonGroup, clcrDot11StatsTable=\n clcrDot11StatsTable, clcrRoamDot11Stats=clcrRoamDot11Stats,\n clcrRoamDot11bRfParamConfig=clcrRoamDot11bRfParamConfig,\n clcrDot11bMinRssi=clcrDot11bMinRssi, clcrRoamReasonReport=\n clcrRoamReasonReport, clcrRoamPrevApMacAddress=clcrRoamPrevApMacAddress,\n ciscoLwappClRoamDot11aRfParamsGroup=ciscoLwappClRoamDot11aRfParamsGroup,\n clcrRoamReasonReportEntry=clcrRoamReasonReportEntry,\n ciscoLwappClRoamMIBGroups=ciscoLwappClRoamMIBGroups,\n clcrDot11bMinRssiV2=clcrDot11bMinRssiV2, ciscoLwappClRoamMIBCompliances\n =ciscoLwappClRoamMIBCompliances, clcrDot11aMode=clcrDot11aMode,\n clcrDot11aTransitionTimeV2=clcrDot11aTransitionTimeV2,\n clcrRoamPrevApChannel=clcrRoamPrevApChannel, clcrDot11bTransitionTime=\n clcrDot11bTransitionTime, ciscoLwappClRoamMIB=ciscoLwappClRoamMIB,\n clcrDot11aHysteresisV2=clcrDot11aHysteresisV2)\n",
"step-3": "ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols('ASN1',\n 'ObjectIdentifier', 'OctetString', 'Integer')\nNamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')\n(ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion,\n ConstraintsIntersection, ValueRangeConstraint) = (mibBuilder.\n importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint',\n 'SingleValueConstraint', 'ConstraintsUnion', 'ConstraintsIntersection',\n 'ValueRangeConstraint'))\ncLApDot11IfSlotId, cLApSysMacAddress = mibBuilder.importSymbols(\n 'CISCO-LWAPP-AP-MIB', 'cLApDot11IfSlotId', 'cLApSysMacAddress')\nCLDot11RfParamMode, CLDot11Channel = mibBuilder.importSymbols(\n 'CISCO-LWAPP-TC-MIB', 'CLDot11RfParamMode', 'CLDot11Channel')\nciscoMgmt, = mibBuilder.importSymbols('CISCO-SMI', 'ciscoMgmt')\nObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols(\n 'SNMPv2-CONF', 'ObjectGroup', 'NotificationGroup', 'ModuleCompliance')\n(Integer32, IpAddress, MibIdentifier, NotificationType, TimeTicks, Bits,\n ObjectIdentity, Counter64, ModuleIdentity, iso, Gauge32, MibScalar,\n MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32) = (mibBuilder\n .importSymbols('SNMPv2-SMI', 'Integer32', 'IpAddress', 'MibIdentifier',\n 'NotificationType', 'TimeTicks', 'Bits', 'ObjectIdentity', 'Counter64',\n 'ModuleIdentity', 'iso', 'Gauge32', 'MibScalar', 'MibTable',\n 'MibTableRow', 'MibTableColumn', 'Counter32', 'Unsigned32'))\nDisplayString, MacAddress, TextualConvention, TimeInterval = (mibBuilder.\n importSymbols('SNMPv2-TC', 'DisplayString', 'MacAddress',\n 'TextualConvention', 'TimeInterval'))\nciscoLwappClRoamMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 523))\nciscoLwappClRoamMIB.setRevisions(('2010-01-29 00:00', '2006-04-11 00:00'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n if mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setRevisionsDescriptions((\n 'Deprecated following attributes:- clcrDot11aMinRssi, clcrDot11aHysteresis, clcrDot11aAdaptiveScanThreshold, clcrDot11aTransitionTime, clcrDot11bMinRssi, clcrDot11bHysteresis, clcrDot11bAdaptiveScanThreshold, clcrDot11bTransitionTime. clcrMIBCompliance, ciscoLwappClRoamDot11aRfParamsGroup, ciscoLwappClRoamDot11bRfParamsGroup Added following attributes:- clcrDot11aMinRssiV2, clcrDot11aHysteresisV2, clcrDot11aAdaptiveScanThresholdV2, clcrDot11aTransitionTimeV2, clcrDot11bMinRssiV2, clcrDot11bHysteresisV2, clcrDot11bAdaptiveScanThresholdV2, clcrDot11bTransitionTimeV2. clcrMIBComplianceRev1, ciscoLwappClRoamDot11aRfParamsGroupSup1, ciscoLwappClRoamDot11bRfParamsGroupSup1'\n , 'Initial version of this MIB module.'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setLastUpdated('201001290000Z')\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setOrganization('Cisco Systems, Inc.')\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setContactInfo(\n 'Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: [email protected]'\n )\nif mibBuilder.loadTexts:\n ciscoLwappClRoamMIB.setDescription(\n \"This MIB is intended to be implemented on all those devices operating as Central controllers, that terminate the Light Weight Access Point Protocol tunnel from Cisco Light-weight LWAPP Access Points. Information provided by this MIB is for CCX related features as specified in the CCX specifications. This MIB covers roaming RF parameters for CCX clients. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ + + + + + + + CC + + CC + + CC + + + + + + + +......+ +......+ +......+ .. . . .. . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + AP + + AP + + AP + + AP + + + + + + + + + +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + MN + + MN + + MN + + MN + + + + + + + + + +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends them to the controller to which it is logically connected. Basic Service Set ( BSS ) The IEEE 802.11 BSS of an AP comprises of the stations directly associating with the AP. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity is also referred to as 'controller'. Cisco Compatible eXtensions (CCX) Wireless LAN Access Points (APs) manufactured by Cisco Systems have features and capabilities beyond those in related standards (e.g., IEEE 802.11 suite of standards ,Wi-Fi recommendations by WECA, 802.1X security suite,etc). A number of features provide higher performance.For example, Cisco AP transmits a specific Information Element, which the clients adapt to for enhanced performance. Similarly, a number of features are implemented by means of proprietary Information Elements, which Cisco clients use in specific ways to carry out tasks above and beyond the standard. Other examples of feature categories are roaming and power saving. Client Roaming A client may decide to reassociate with another AP for reasons of its own choosing. The decision of whether or not to use the information contained in the AP list is up to the discretion of the implementor, as long as the roam time requirement is met. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Mobile Node and client are used interchangeably. REFERENCE [1] Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol\"\n )\nciscoLwappClRoamMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 0))\nciscoLwappClRoamMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1))\nciscoLwappClRoamMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2))\nclcrRoamDot11aRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1)\n )\nclcrRoamDot11bRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2)\n )\nclcrRoamReasonReport = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3))\nclcrRoamDot11Stats = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4))\nclcrDot11aMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 1),\n CLDot11RfParamMode().clone('default')).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aMode.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aMode.setDescription(\n 'This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11a networks.'\n )\nclcrDot11aMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 2),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)\n ).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssi.setDescription(\n \"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11aMinRssiV2\"\n )\nclcrDot11aHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 3),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)\n ).setUnits('dB').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresis.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11aHysteresisV2'\n )\nclcrDot11aAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1,\n 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).\n clone(-72)).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThreshold.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime. This object is superceded by clcrDot11aAdaptiveScanThresholdV2'\n )\nclcrDot11aTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 5),\n TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).\n clone(500)).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTime.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client?s associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11aTransitionTimeV2'\n )\nclcrDot11aMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 6),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits(\n 'dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aMinRssiV2.setDescription(\n \"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\"\n )\nclcrDot11aHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 7),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits(\n 'dB').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aHysteresisV2.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.'\n )\nclcrDot11aAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523,\n 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))\n ).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aAdaptiveScanThresholdV2.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime.'\n )\nclcrDot11aTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, \n 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))\n ).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11aTransitionTimeV2.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the clients associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.'\n )\nclcrDot11bMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 1),\n CLDot11RfParamMode().clone('default')).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bMode.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bMode.setDescription(\n 'This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11b/g networks.'\n )\nclcrDot11bMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 2),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)\n ).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssi.setDescription(\n \"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11bMinRssiV2\"\n )\nclcrDot11bHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 3),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)\n ).setUnits('dB').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresis.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11bHysteresisV2'\n )\nclcrDot11bAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1,\n 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).\n clone(-72)).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThreshold.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime. This object is superceded by clcrDot11bAdaptiveScanThresholdV2'\n )\nclcrDot11bTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 5),\n TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).\n clone(500)).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTime.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11bTransitionTimeV2'\n )\nclcrDot11bMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 6),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits(\n 'dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bMinRssiV2.setDescription(\n \"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\"\n )\nclcrDot11bHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 7),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits(\n 'dB').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bHysteresisV2.setDescription(\n 'This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.'\n )\nclcrDot11bAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523,\n 1, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))\n ).setUnits('dBm').setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bAdaptiveScanThresholdV2.setDescription(\n 'This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime.'\n )\nclcrDot11bTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, \n 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))\n ).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11bTransitionTimeV2.setDescription(\n 'This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.'\n )\nclcrRoamReasonReportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1))\nif mibBuilder.loadTexts:\n clcrRoamReasonReportTable.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReasonReportTable.setDescription(\n 'This table provides the reasons for CCX clients roaming from one AP to another. When a CCX client associates to an AP, it will always send an IAPP information packet to the new AP listing the characteristics of the previous AP. An entry is added to this table when a roam reason report is sent by a CCX client when it roams to a new AP.'\n )\nclcrRoamReasonReportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3,\n 1, 1)).setIndexNames((0, 'CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamClientMacAddress'), (0, 'CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamClientTimeStamp'))\nif mibBuilder.loadTexts:\n clcrRoamReasonReportEntry.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReasonReportEntry.setDescription(\n 'Each entry corresponds to the roam reason report sent by a CCX client to the new AP to which client associates.'\n )\nclcrRoamClientMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, \n 3, 1, 1, 1), MacAddress())\nif mibBuilder.loadTexts:\n clcrRoamClientMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamClientMacAddress.setDescription(\n 'This object indicates the mac address of the client which has roamed to a new AP.'\n )\nclcrRoamClientTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3,\n 1, 1, 2), TimeTicks())\nif mibBuilder.loadTexts:\n clcrRoamClientTimeStamp.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamClientTimeStamp.setDescription(\n \"This object indicates the time instance at which this report was received by the new AP, to which client roamed to. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.\"\n )\nclcrRoamNewApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3,\n 1, 1, 3), MacAddress()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamNewApMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamNewApMacAddress.setDescription(\n 'This object indicates the mac address of the current AP to which client has roamed to. This AP receives the roam reason report.'\n )\nclcrRoamPrevApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, \n 3, 1, 1, 4), MacAddress()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamPrevApMacAddress.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApMacAddress.setDescription(\n 'This object indicates the mac address of the previous AP to which client was associated.'\n )\nclcrRoamPrevApChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, \n 1, 1, 5), CLDot11Channel()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamPrevApChannel.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApChannel.setDescription(\n 'This object indicates the channel number at which the client was associated to the previous AP.'\n )\nclcrRoamPrevApSsid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, \n 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))\n ).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamPrevApSsid.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamPrevApSsid.setDescription(\n 'This object indicates the SSID at which the client was associated to the previous AP.'\n )\nclcrRoamDisassocTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523,\n 1, 3, 1, 1, 7), TimeInterval()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamDisassocTimeInterval.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamDisassocTimeInterval.setDescription(\n 'This object indicates the time elapsed since the client disassociated, in hundredth of a second.'\n )\nclcrRoamReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 8\n ), Integer32().subtype(subtypeSpec=ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues\n =NamedValues(('clcrUnspecified', 0), ('clcrPoorLink', 1), (\n 'clcrLoadBalancing', 2), ('clcrInsufficientCapacity', 3), (\n 'clcrDirectedRoam', 4), ('clcrFirstAssociation', 5), ('clcrRoamingIn', \n 6), ('clcrRoamingOut', 7), ('clcrBetterAp', 8), ('clcrDisassociated', 9)))\n ).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrRoamReason.setStatus('current')\nif mibBuilder.loadTexts:\n clcrRoamReason.setDescription(\n \"This object indicates the reason for a client to roam to a new AP. The semantics are as follows. clcrUnspecified - The reason is not known or can't be found. clcrPoorLink - Normal roam due to poor link (excessive retries, too much interference, RSSI too low, etc.) clcrLoadBalancing - Normal roam due to load balancing clcrInsufficientCapacity - Roaming occured due to the insufficient capacity on the previous AP (TSPEC rejected) clcrDirectedRoam - Roaming is directed by the 802.11 wireless Infrastructure clcrFirstAssociation - This is the first association to a particular WLAN clcrRoamingIn - Roaming in from cellular or other WAN clcrRoamingOut - Roaming out to cellular or other WAN clcrBetterAp - Normal roam due to better AP found clcrDisassociated - Deauthenticated or Disassociated from the previous AP.\"\n )\nclcrDot11StatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1))\nif mibBuilder.loadTexts:\n clcrDot11StatsTable.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11StatsTable.setDescription(\n 'This table populates the statistics collected when the client roamed in the WLAN. There exists a row in this table for each conceptual row in cLApDot11IfTable that represents a dot11 interface of an AP.'\n )\nclcrDot11StatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1)\n ).setIndexNames((0, 'CISCO-LWAPP-AP-MIB', 'cLApSysMacAddress'), (0,\n 'CISCO-LWAPP-AP-MIB', 'cLApDot11IfSlotId'))\nif mibBuilder.loadTexts:\n clcrDot11StatsEntry.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11StatsEntry.setDescription(\n 'Each entry represents a conceptual row in clcrDot11StatsTable and corresponds to the roam reason report sent by a CCX client to the new AP which the client associates to.'\n )\nclcrDot11NeighborRequestRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1,\n 4, 1, 1, 1), Counter32()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrDot11NeighborRequestRx.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11NeighborRequestRx.setDescription(\n 'This object indicates the count of the number of requests received from an E2E client for neighbor updates.'\n )\nclcrDot11NeighborReplySent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1,\n 4, 1, 1, 2), Counter32()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrDot11NeighborReplySent.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11NeighborReplySent.setDescription(\n 'This object indicates the count of the number of replies sent to the client in reply to the request for neighbor updates received from the client.'\n )\nclcrDot11RoamReasonReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, \n 1, 4, 1, 1, 3), Counter32()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrDot11RoamReasonReportRx.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11RoamReasonReportRx.setDescription(\n 'This object reports the count of the number of roam reason reports received from CCX clients.'\n )\nclcrDot11BcastUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1,\n 4, 1, 1, 4), Counter32()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n clcrDot11BcastUpdatesSent.setStatus('current')\nif mibBuilder.loadTexts:\n clcrDot11BcastUpdatesSent.setDescription(\n 'This object indicates the count of the number of broadcast neighbor updates sent by an AP.'\n )\nciscoLwappClRoamMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523,\n 2, 1))\nciscoLwappClRoamMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2))\nclcrMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 1)\n ).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'ciscoLwappClRoamDot11aRfParamsGroup'), (\n 'CISCO-LWAPP-CLIENT-ROAMING-MIB', 'ciscoLwappClRoamDot11bRfParamsGroup'\n ), ('CISCO-LWAPP-CLIENT-ROAMING-MIB', 'ciscoLwappClRoamroamReasonGroup'\n ), ('CISCO-LWAPP-CLIENT-ROAMING-MIB', 'ciscoLwappClRoamroamingStatsGroup'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBCompliance = clcrMIBCompliance.setStatus('deprecated')\nif mibBuilder.loadTexts:\n clcrMIBCompliance.setDescription(\n 'The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.'\n )\nclcrMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 2)\n ).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'ciscoLwappClRoamDot11aRfParamsGroupSup1'), (\n 'CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'ciscoLwappClRoamDot11bRfParamsGroupSup1'), (\n 'CISCO-LWAPP-CLIENT-ROAMING-MIB', 'ciscoLwappClRoamroamReasonGroup'), (\n 'CISCO-LWAPP-CLIENT-ROAMING-MIB', 'ciscoLwappClRoamroamingStatsGroup'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBComplianceRev1 = clcrMIBComplianceRev1.setStatus('current')\nif mibBuilder.loadTexts:\n clcrMIBComplianceRev1.setDescription(\n 'The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.'\n )\nciscoLwappClRoamDot11aRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, \n 523, 2, 2, 1)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aMode'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aMinRssi'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aHysteresis'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aAdaptiveScanThreshold'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aTransitionTime'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroup = (ciscoLwappClRoamDot11aRfParamsGroup\n .setStatus('deprecated'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11aRfParamsGroup.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11a networks.'\n )\nciscoLwappClRoamDot11bRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, \n 523, 2, 2, 2)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bMode'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bMinRssi'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bHysteresis'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bAdaptiveScanThreshold'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bTransitionTime'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroup = (ciscoLwappClRoamDot11bRfParamsGroup\n .setStatus('deprecated'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11bRfParamsGroup.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11b/g bands.'\n )\nciscoLwappClRoamroamReasonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523,\n 2, 2, 3)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamNewApMacAddress'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamPrevApMacAddress'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamPrevApChannel'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamPrevApSsid'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamDisassocTimeInterval'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrRoamReason'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamReasonGroup = (ciscoLwappClRoamroamReasonGroup.\n setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamroamReasonGroup.setDescription(\n 'This collection of objects provide the reasons for clients roaming between APs.'\n )\nciscoLwappClRoamroamingStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, \n 523, 2, 2, 4)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11NeighborRequestRx'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11NeighborReplySent'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11RoamReasonReportRx'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11BcastUpdatesSent'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamingStatsGroup = (ciscoLwappClRoamroamingStatsGroup.\n setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamroamingStatsGroup.setDescription(\n 'This collection of objects provide the counters related to roaming.')\nciscoLwappClRoamDot11aRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9,\n 9, 523, 2, 2, 5)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aMode'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aMinRssiV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aHysteresisV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aAdaptiveScanThresholdV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11aTransitionTimeV2'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroupSup1 = (\n ciscoLwappClRoamDot11aRfParamsGroupSup1.setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11aRfParamsGroupSup1.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11a networks.'\n )\nciscoLwappClRoamDot11bRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9,\n 9, 523, 2, 2, 6)).setObjects(('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bMode'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bMinRssiV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bHysteresisV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bAdaptiveScanThresholdV2'), ('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n 'clcrDot11bTransitionTimeV2'))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroupSup1 = (\n ciscoLwappClRoamDot11bRfParamsGroupSup1.setStatus('current'))\nif mibBuilder.loadTexts:\n ciscoLwappClRoamDot11bRfParamsGroupSup1.setDescription(\n 'This collection of objects represent the radio parameters for the 802.11b/g bands.'\n )\nmibBuilder.exportSymbols('CISCO-LWAPP-CLIENT-ROAMING-MIB',\n clcrDot11aMinRssi=clcrDot11aMinRssi, clcrRoamClientMacAddress=\n clcrRoamClientMacAddress, ciscoLwappClRoamroamingStatsGroup=\n ciscoLwappClRoamroamingStatsGroup, clcrDot11bTransitionTimeV2=\n clcrDot11bTransitionTimeV2, clcrRoamNewApMacAddress=\n clcrRoamNewApMacAddress, clcrMIBCompliance=clcrMIBCompliance,\n clcrRoamDot11aRfParamConfig=clcrRoamDot11aRfParamConfig,\n clcrDot11BcastUpdatesSent=clcrDot11BcastUpdatesSent, clcrRoamPrevApSsid\n =clcrRoamPrevApSsid, clcrMIBComplianceRev1=clcrMIBComplianceRev1,\n clcrDot11bHysteresisV2=clcrDot11bHysteresisV2,\n ciscoLwappClRoamMIBConform=ciscoLwappClRoamMIBConform,\n clcrDot11aTransitionTime=clcrDot11aTransitionTime, clcrDot11aHysteresis\n =clcrDot11aHysteresis, ciscoLwappClRoamDot11bRfParamsGroupSup1=\n ciscoLwappClRoamDot11bRfParamsGroupSup1, PYSNMP_MODULE_ID=\n ciscoLwappClRoamMIB, clcrDot11bHysteresis=clcrDot11bHysteresis,\n clcrDot11StatsEntry=clcrDot11StatsEntry, clcrRoamDisassocTimeInterval=\n clcrRoamDisassocTimeInterval, ciscoLwappClRoamDot11aRfParamsGroupSup1=\n ciscoLwappClRoamDot11aRfParamsGroupSup1,\n clcrDot11bAdaptiveScanThreshold=clcrDot11bAdaptiveScanThreshold,\n clcrDot11NeighborRequestRx=clcrDot11NeighborRequestRx,\n clcrRoamClientTimeStamp=clcrRoamClientTimeStamp, clcrRoamReason=\n clcrRoamReason, clcrDot11bMode=clcrDot11bMode,\n clcrDot11aAdaptiveScanThreshold=clcrDot11aAdaptiveScanThreshold,\n clcrDot11RoamReasonReportRx=clcrDot11RoamReasonReportRx,\n clcrDot11bAdaptiveScanThresholdV2=clcrDot11bAdaptiveScanThresholdV2,\n ciscoLwappClRoamDot11bRfParamsGroup=ciscoLwappClRoamDot11bRfParamsGroup,\n ciscoLwappClRoamMIBNotifs=ciscoLwappClRoamMIBNotifs,\n clcrRoamReasonReportTable=clcrRoamReasonReportTable,\n clcrDot11aMinRssiV2=clcrDot11aMinRssiV2, ciscoLwappClRoamMIBObjects=\n ciscoLwappClRoamMIBObjects, clcrDot11NeighborReplySent=\n clcrDot11NeighborReplySent, clcrDot11aAdaptiveScanThresholdV2=\n clcrDot11aAdaptiveScanThresholdV2, ciscoLwappClRoamroamReasonGroup=\n ciscoLwappClRoamroamReasonGroup, clcrDot11StatsTable=\n clcrDot11StatsTable, clcrRoamDot11Stats=clcrRoamDot11Stats,\n clcrRoamDot11bRfParamConfig=clcrRoamDot11bRfParamConfig,\n clcrDot11bMinRssi=clcrDot11bMinRssi, clcrRoamReasonReport=\n clcrRoamReasonReport, clcrRoamPrevApMacAddress=clcrRoamPrevApMacAddress,\n ciscoLwappClRoamDot11aRfParamsGroup=ciscoLwappClRoamDot11aRfParamsGroup,\n clcrRoamReasonReportEntry=clcrRoamReasonReportEntry,\n ciscoLwappClRoamMIBGroups=ciscoLwappClRoamMIBGroups,\n clcrDot11bMinRssiV2=clcrDot11bMinRssiV2, ciscoLwappClRoamMIBCompliances\n =ciscoLwappClRoamMIBCompliances, clcrDot11aMode=clcrDot11aMode,\n clcrDot11aTransitionTimeV2=clcrDot11aTransitionTimeV2,\n clcrRoamPrevApChannel=clcrRoamPrevApChannel, clcrDot11bTransitionTime=\n clcrDot11bTransitionTime, ciscoLwappClRoamMIB=ciscoLwappClRoamMIB,\n clcrDot11aHysteresisV2=clcrDot11aHysteresisV2)\n",
"step-4": "#\n# PySNMP MIB module CISCO-LWAPP-CLIENT-ROAMING-MIB (http://snmplabs.com/pysmi)\n# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LWAPP-CLIENT-ROAMING-MIB\n# Produced by pysmi-0.3.4 at Wed May 1 12:04:56 2019\n# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4\n# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) \n#\nObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\")\nNamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\nValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ValueSizeConstraint\", \"SingleValueConstraint\", \"ConstraintsUnion\", \"ConstraintsIntersection\", \"ValueRangeConstraint\")\ncLApDot11IfSlotId, cLApSysMacAddress = mibBuilder.importSymbols(\"CISCO-LWAPP-AP-MIB\", \"cLApDot11IfSlotId\", \"cLApSysMacAddress\")\nCLDot11RfParamMode, CLDot11Channel = mibBuilder.importSymbols(\"CISCO-LWAPP-TC-MIB\", \"CLDot11RfParamMode\", \"CLDot11Channel\")\nciscoMgmt, = mibBuilder.importSymbols(\"CISCO-SMI\", \"ciscoMgmt\")\nObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"ObjectGroup\", \"NotificationGroup\", \"ModuleCompliance\")\nInteger32, IpAddress, MibIdentifier, NotificationType, TimeTicks, Bits, ObjectIdentity, Counter64, ModuleIdentity, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"Integer32\", \"IpAddress\", \"MibIdentifier\", \"NotificationType\", \"TimeTicks\", \"Bits\", \"ObjectIdentity\", \"Counter64\", \"ModuleIdentity\", \"iso\", \"Gauge32\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\", \"Unsigned32\")\nDisplayString, MacAddress, TextualConvention, TimeInterval = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"MacAddress\", \"TextualConvention\", \"TimeInterval\")\nciscoLwappClRoamMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 523))\nciscoLwappClRoamMIB.setRevisions(('2010-01-29 00:00', '2006-04-11 00:00',))\n\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setRevisionsDescriptions(('Deprecated following attributes:- clcrDot11aMinRssi, clcrDot11aHysteresis, clcrDot11aAdaptiveScanThreshold, clcrDot11aTransitionTime, clcrDot11bMinRssi, clcrDot11bHysteresis, clcrDot11bAdaptiveScanThreshold, clcrDot11bTransitionTime. clcrMIBCompliance, ciscoLwappClRoamDot11aRfParamsGroup, ciscoLwappClRoamDot11bRfParamsGroup Added following attributes:- clcrDot11aMinRssiV2, clcrDot11aHysteresisV2, clcrDot11aAdaptiveScanThresholdV2, clcrDot11aTransitionTimeV2, clcrDot11bMinRssiV2, clcrDot11bHysteresisV2, clcrDot11bAdaptiveScanThresholdV2, clcrDot11bTransitionTimeV2. clcrMIBComplianceRev1, ciscoLwappClRoamDot11aRfParamsGroupSup1, ciscoLwappClRoamDot11bRfParamsGroupSup1', 'Initial version of this MIB module.',))\nif mibBuilder.loadTexts: ciscoLwappClRoamMIB.setLastUpdated('201001290000Z')\nif mibBuilder.loadTexts: ciscoLwappClRoamMIB.setOrganization('Cisco Systems, Inc.')\nif mibBuilder.loadTexts: ciscoLwappClRoamMIB.setContactInfo('Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: [email protected]')\nif mibBuilder.loadTexts: ciscoLwappClRoamMIB.setDescription(\"This MIB is intended to be implemented on all those devices operating as Central controllers, that terminate the Light Weight Access Point Protocol tunnel from Cisco Light-weight LWAPP Access Points. Information provided by this MIB is for CCX related features as specified in the CCX specifications. This MIB covers roaming RF parameters for CCX clients. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ + + + + + + + CC + + CC + + CC + + + + + + + +......+ +......+ +......+ .. . . .. . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + AP + + AP + + AP + + AP + + + + + + + + + +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + MN + + MN + + MN + + MN + + + + + + + + + +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends them to the controller to which it is logically connected. Basic Service Set ( BSS ) The IEEE 802.11 BSS of an AP comprises of the stations directly associating with the AP. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity is also referred to as 'controller'. Cisco Compatible eXtensions (CCX) Wireless LAN Access Points (APs) manufactured by Cisco Systems have features and capabilities beyond those in related standards (e.g., IEEE 802.11 suite of standards ,Wi-Fi recommendations by WECA, 802.1X security suite,etc). A number of features provide higher performance.For example, Cisco AP transmits a specific Information Element, which the clients adapt to for enhanced performance. Similarly, a number of features are implemented by means of proprietary Information Elements, which Cisco clients use in specific ways to carry out tasks above and beyond the standard. Other examples of feature categories are roaming and power saving. Client Roaming A client may decide to reassociate with another AP for reasons of its own choosing. The decision of whether or not to use the information contained in the AP list is up to the discretion of the implementor, as long as the roam time requirement is met. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Mobile Node and client are used interchangeably. REFERENCE [1] Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol\")\nciscoLwappClRoamMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 0))\nciscoLwappClRoamMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1))\nciscoLwappClRoamMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2))\nclcrRoamDot11aRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1))\nclcrRoamDot11bRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2))\nclcrRoamReasonReport = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3))\nclcrRoamDot11Stats = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4))\nclcrDot11aMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 1), CLDot11RfParamMode().clone('default')).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aMode.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11aMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11a networks.')\nclcrDot11aMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11aMinRssi.setDescription(\"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11aMinRssiV2\")\nclcrDot11aHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11aHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11aHysteresisV2')\nclcrDot11aAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime. This object is superceded by clcrDot11aAdaptiveScanThresholdV2')\nclcrDot11aTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11aTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client?s associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11aTransitionTimeV2')\nclcrDot11aMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11aMinRssiV2.setDescription(\"This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\")\nclcrDot11aHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11aHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')\nclcrDot11aAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime.')\nclcrDot11aTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the clients associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')\nclcrDot11bMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 1), CLDot11RfParamMode().clone('default')).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bMode.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11bMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11b/g networks.')\nclcrDot11bMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bMinRssi.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11bMinRssi.setDescription(\"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11bMinRssiV2\")\nclcrDot11bHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bHysteresis.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11bHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11bHysteresisV2')\nclcrDot11bAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime. This object is superceded by clcrDot11bAdaptiveScanThresholdV2')\nclcrDot11bTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bTransitionTime.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrDot11bTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11bTransitionTimeV2')\nclcrDot11bMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bMinRssiV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11bMinRssiV2.setDescription(\"This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.\")\nclcrDot11bHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bHysteresisV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11bHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')\nclcrDot11bAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime.')\nclcrDot11bTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')\nclcrRoamReasonReportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1), )\nif mibBuilder.loadTexts: clcrRoamReasonReportTable.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamReasonReportTable.setDescription('This table provides the reasons for CCX clients roaming from one AP to another. When a CCX client associates to an AP, it will always send an IAPP information packet to the new AP listing the characteristics of the previous AP. An entry is added to this table when a roam reason report is sent by a CCX client when it roams to a new AP.')\nclcrRoamReasonReportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1), ).setIndexNames((0, \"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamClientMacAddress\"), (0, \"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamClientTimeStamp\"))\nif mibBuilder.loadTexts: clcrRoamReasonReportEntry.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamReasonReportEntry.setDescription('Each entry corresponds to the roam reason report sent by a CCX client to the new AP to which client associates.')\nclcrRoamClientMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 1), MacAddress())\nif mibBuilder.loadTexts: clcrRoamClientMacAddress.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamClientMacAddress.setDescription('This object indicates the mac address of the client which has roamed to a new AP.')\nclcrRoamClientTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 2), TimeTicks())\nif mibBuilder.loadTexts: clcrRoamClientTimeStamp.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamClientTimeStamp.setDescription(\"This object indicates the time instance at which this report was received by the new AP, to which client roamed to. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.\")\nclcrRoamNewApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 3), MacAddress()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamNewApMacAddress.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamNewApMacAddress.setDescription('This object indicates the mac address of the current AP to which client has roamed to. This AP receives the roam reason report.')\nclcrRoamPrevApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 4), MacAddress()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setDescription('This object indicates the mac address of the previous AP to which client was associated.')\nclcrRoamPrevApChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 5), CLDot11Channel()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamPrevApChannel.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamPrevApChannel.setDescription('This object indicates the channel number at which the client was associated to the previous AP.')\nclcrRoamPrevApSsid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamPrevApSsid.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamPrevApSsid.setDescription('This object indicates the SSID at which the client was associated to the previous AP.')\nclcrRoamDisassocTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 7), TimeInterval()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setDescription('This object indicates the time elapsed since the client disassociated, in hundredth of a second.')\nclcrRoamReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues((\"clcrUnspecified\", 0), (\"clcrPoorLink\", 1), (\"clcrLoadBalancing\", 2), (\"clcrInsufficientCapacity\", 3), (\"clcrDirectedRoam\", 4), (\"clcrFirstAssociation\", 5), (\"clcrRoamingIn\", 6), (\"clcrRoamingOut\", 7), (\"clcrBetterAp\", 8), (\"clcrDisassociated\", 9)))).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrRoamReason.setStatus('current')\nif mibBuilder.loadTexts: clcrRoamReason.setDescription(\"This object indicates the reason for a client to roam to a new AP. The semantics are as follows. clcrUnspecified - The reason is not known or can't be found. clcrPoorLink - Normal roam due to poor link (excessive retries, too much interference, RSSI too low, etc.) clcrLoadBalancing - Normal roam due to load balancing clcrInsufficientCapacity - Roaming occured due to the insufficient capacity on the previous AP (TSPEC rejected) clcrDirectedRoam - Roaming is directed by the 802.11 wireless Infrastructure clcrFirstAssociation - This is the first association to a particular WLAN clcrRoamingIn - Roaming in from cellular or other WAN clcrRoamingOut - Roaming out to cellular or other WAN clcrBetterAp - Normal roam due to better AP found clcrDisassociated - Deauthenticated or Disassociated from the previous AP.\")\nclcrDot11StatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1), )\nif mibBuilder.loadTexts: clcrDot11StatsTable.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11StatsTable.setDescription('This table populates the statistics collected when the client roamed in the WLAN. There exists a row in this table for each conceptual row in cLApDot11IfTable that represents a dot11 interface of an AP.')\nclcrDot11StatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1), ).setIndexNames((0, \"CISCO-LWAPP-AP-MIB\", \"cLApSysMacAddress\"), (0, \"CISCO-LWAPP-AP-MIB\", \"cLApDot11IfSlotId\"))\nif mibBuilder.loadTexts: clcrDot11StatsEntry.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11StatsEntry.setDescription('Each entry represents a conceptual row in clcrDot11StatsTable and corresponds to the roam reason report sent by a CCX client to the new AP which the client associates to.')\nclcrDot11NeighborRequestRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 1), Counter32()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setDescription('This object indicates the count of the number of requests received from an E2E client for neighbor updates.')\nclcrDot11NeighborReplySent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 2), Counter32()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrDot11NeighborReplySent.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11NeighborReplySent.setDescription('This object indicates the count of the number of replies sent to the client in reply to the request for neighbor updates received from the client.')\nclcrDot11RoamReasonReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 3), Counter32()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setDescription('This object reports the count of the number of roam reason reports received from CCX clients.')\nclcrDot11BcastUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 4), Counter32()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setStatus('current')\nif mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setDescription('This object indicates the count of the number of broadcast neighbor updates sent by an AP.')\nciscoLwappClRoamMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1))\nciscoLwappClRoamMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2))\nclcrMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 1)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamDot11aRfParamsGroup\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamDot11bRfParamsGroup\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamroamReasonGroup\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamroamingStatsGroup\"))\n\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBCompliance = clcrMIBCompliance.setStatus('deprecated')\nif mibBuilder.loadTexts: clcrMIBCompliance.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')\nclcrMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 2)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamDot11aRfParamsGroupSup1\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamDot11bRfParamsGroupSup1\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamroamReasonGroup\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"ciscoLwappClRoamroamingStatsGroup\"))\n\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n clcrMIBComplianceRev1 = clcrMIBComplianceRev1.setStatus('current')\nif mibBuilder.loadTexts: clcrMIBComplianceRev1.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')\nciscoLwappClRoamDot11aRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 1)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aMode\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aMinRssi\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aHysteresis\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aAdaptiveScanThreshold\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aTransitionTime\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroup = ciscoLwappClRoamDot11aRfParamsGroup.setStatus('deprecated')\nif mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')\nciscoLwappClRoamDot11bRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 2)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bMode\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bMinRssi\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bHysteresis\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bAdaptiveScanThreshold\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bTransitionTime\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroup = ciscoLwappClRoamDot11bRfParamsGroup.setStatus('deprecated')\nif mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')\nciscoLwappClRoamroamReasonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 3)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamNewApMacAddress\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamPrevApMacAddress\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamPrevApChannel\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamPrevApSsid\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamDisassocTimeInterval\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrRoamReason\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamReasonGroup = ciscoLwappClRoamroamReasonGroup.setStatus('current')\nif mibBuilder.loadTexts: ciscoLwappClRoamroamReasonGroup.setDescription('This collection of objects provide the reasons for clients roaming between APs.')\nciscoLwappClRoamroamingStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 4)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11NeighborRequestRx\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11NeighborReplySent\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11RoamReasonReportRx\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11BcastUpdatesSent\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamroamingStatsGroup = ciscoLwappClRoamroamingStatsGroup.setStatus('current')\nif mibBuilder.loadTexts: ciscoLwappClRoamroamingStatsGroup.setDescription('This collection of objects provide the counters related to roaming.')\nciscoLwappClRoamDot11aRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 5)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aMode\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aMinRssiV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aHysteresisV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aAdaptiveScanThresholdV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11aTransitionTimeV2\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11aRfParamsGroupSup1 = ciscoLwappClRoamDot11aRfParamsGroupSup1.setStatus('current')\nif mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')\nciscoLwappClRoamDot11bRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 6)).setObjects((\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bMode\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bMinRssiV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bHysteresisV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bAdaptiveScanThresholdV2\"), (\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", \"clcrDot11bTransitionTimeV2\"))\nif getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):\n ciscoLwappClRoamDot11bRfParamsGroupSup1 = ciscoLwappClRoamDot11bRfParamsGroupSup1.setStatus('current')\nif mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')\nmibBuilder.exportSymbols(\"CISCO-LWAPP-CLIENT-ROAMING-MIB\", clcrDot11aMinRssi=clcrDot11aMinRssi, clcrRoamClientMacAddress=clcrRoamClientMacAddress, ciscoLwappClRoamroamingStatsGroup=ciscoLwappClRoamroamingStatsGroup, clcrDot11bTransitionTimeV2=clcrDot11bTransitionTimeV2, clcrRoamNewApMacAddress=clcrRoamNewApMacAddress, clcrMIBCompliance=clcrMIBCompliance, clcrRoamDot11aRfParamConfig=clcrRoamDot11aRfParamConfig, clcrDot11BcastUpdatesSent=clcrDot11BcastUpdatesSent, clcrRoamPrevApSsid=clcrRoamPrevApSsid, clcrMIBComplianceRev1=clcrMIBComplianceRev1, clcrDot11bHysteresisV2=clcrDot11bHysteresisV2, ciscoLwappClRoamMIBConform=ciscoLwappClRoamMIBConform, clcrDot11aTransitionTime=clcrDot11aTransitionTime, clcrDot11aHysteresis=clcrDot11aHysteresis, ciscoLwappClRoamDot11bRfParamsGroupSup1=ciscoLwappClRoamDot11bRfParamsGroupSup1, PYSNMP_MODULE_ID=ciscoLwappClRoamMIB, clcrDot11bHysteresis=clcrDot11bHysteresis, clcrDot11StatsEntry=clcrDot11StatsEntry, clcrRoamDisassocTimeInterval=clcrRoamDisassocTimeInterval, ciscoLwappClRoamDot11aRfParamsGroupSup1=ciscoLwappClRoamDot11aRfParamsGroupSup1, clcrDot11bAdaptiveScanThreshold=clcrDot11bAdaptiveScanThreshold, clcrDot11NeighborRequestRx=clcrDot11NeighborRequestRx, clcrRoamClientTimeStamp=clcrRoamClientTimeStamp, clcrRoamReason=clcrRoamReason, clcrDot11bMode=clcrDot11bMode, clcrDot11aAdaptiveScanThreshold=clcrDot11aAdaptiveScanThreshold, clcrDot11RoamReasonReportRx=clcrDot11RoamReasonReportRx, clcrDot11bAdaptiveScanThresholdV2=clcrDot11bAdaptiveScanThresholdV2, ciscoLwappClRoamDot11bRfParamsGroup=ciscoLwappClRoamDot11bRfParamsGroup, ciscoLwappClRoamMIBNotifs=ciscoLwappClRoamMIBNotifs, clcrRoamReasonReportTable=clcrRoamReasonReportTable, clcrDot11aMinRssiV2=clcrDot11aMinRssiV2, ciscoLwappClRoamMIBObjects=ciscoLwappClRoamMIBObjects, clcrDot11NeighborReplySent=clcrDot11NeighborReplySent, clcrDot11aAdaptiveScanThresholdV2=clcrDot11aAdaptiveScanThresholdV2, ciscoLwappClRoamroamReasonGroup=ciscoLwappClRoamroamReasonGroup, clcrDot11StatsTable=clcrDot11StatsTable, clcrRoamDot11Stats=clcrRoamDot11Stats, clcrRoamDot11bRfParamConfig=clcrRoamDot11bRfParamConfig, clcrDot11bMinRssi=clcrDot11bMinRssi, clcrRoamReasonReport=clcrRoamReasonReport, clcrRoamPrevApMacAddress=clcrRoamPrevApMacAddress, ciscoLwappClRoamDot11aRfParamsGroup=ciscoLwappClRoamDot11aRfParamsGroup, clcrRoamReasonReportEntry=clcrRoamReasonReportEntry, ciscoLwappClRoamMIBGroups=ciscoLwappClRoamMIBGroups, clcrDot11bMinRssiV2=clcrDot11bMinRssiV2, ciscoLwappClRoamMIBCompliances=ciscoLwappClRoamMIBCompliances, clcrDot11aMode=clcrDot11aMode, clcrDot11aTransitionTimeV2=clcrDot11aTransitionTimeV2, clcrRoamPrevApChannel=clcrRoamPrevApChannel, clcrDot11bTransitionTime=clcrDot11bTransitionTime, ciscoLwappClRoamMIB=ciscoLwappClRoamMIB, clcrDot11aHysteresisV2=clcrDot11aHysteresisV2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox
class ChoiceTargetNumbers(QWidget):
"""Виджет с выбором номеров целей"""
def __init__(self, parent=None) -> None:
QWidget.__init__(self, parent)
# Нужные компоненты
label = QLabel(text="Выберите номера целей:")
self.first_target_number_combo_box = QComboBox()
self.first_target_number_combo_box.addItems(["1", "2", "3"])
self.second_target_number_combo_box = QComboBox()
self.second_target_number_combo_box.addItems(["1", "2", "3"])
# Основной контейнер
layout = QHBoxLayout(self)
layout.addWidget(label)
layout.addWidget(self.first_target_number_combo_box)
layout.addWidget(self.second_target_number_combo_box)
|
normal
|
{
"blob_id": "291cd789ac3ab7b794be8feafe0f608ad0c081d7",
"index": 9674,
"step-1": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n <mask token>\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-3": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-4": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-5": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n def __init__(self, parent=None) -> None:\n QWidget.__init__(self, parent)\n\n # Нужные компоненты\n label = QLabel(text=\"Выберите номера целей:\")\n\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems([\"1\", \"2\", \"3\"])\n\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems([\"1\", \"2\", \"3\"])\n\n # Основной контейнер\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
# ----------------------------------------------------------------------------
# Paths.
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
# experimental - replace Dense layers with TreeConnect
use_treeconnect = False
treeconnect_threshold = 1024
# ----------------------------------------------------------------------------
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {
'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'
],
'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'
],
'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'
],
'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl'
],
}
|
normal
|
{
"blob_id": "cb904408486ad9ea8cc0c8ff2ec393e480309a57",
"index": 2403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-3": "<mask token>\nfrom facegan import ROOT_PATH\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-4": "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\"\"\"Global configuration.\"\"\"\n\n# ----------------------------------------------------------------------------\n# Paths.\nfrom facegan import ROOT_PATH\n\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\n\n# experimental - replace Dense layers with TreeConnect\nuse_treeconnect = False\ntreeconnect_threshold = 1024\n\n# ----------------------------------------------------------------------------\n\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\n\nnetworks_urls = {\n 'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'\n ],\n 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'\n ],\n 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'\n ],\n 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl'\n ],\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Create Pandas dataframe from the DarkSage output G['']
import pandas as pd
import numpy as np
# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype("object"))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype("object"))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype("object"))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype("object"))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype("object"))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype("object"))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype("object"))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas , dtype=np.dtype("object"))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype("object"))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype("object"))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype("object"))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype("object"))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype("object"))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype("object"))
######################################
DS = pd.DataFrame({'Type' : G['Type' ],
'GalaxyIndex' : G['GalaxyIndex' ],
'HaloIndex' : G['HaloIndex' ],
'SimulationHaloIndex' : G['SimulationHaloIndex' ],
'TreeIndex' : G['TreeIndex' ],
'SnapNum' : G['SnapNum' ],
'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],
'CentralMvir' : G['CentralMvir' ],
'mergeType' : G['mergeType' ],
'mergeIntoID' : G['mergeIntoID' ],
'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],
'dT' : G['dT' ],
'Pos' : Pos_df,
'Vel' : Vel_df ,
'Spin' : Spin_df ,
'Len' : G['Len' ],
'LenMax' : G['LenMax' ],
'Mvir' : G['Mvir' ],
'Rvir' : G['Rvir' ],
'Vvir' : G['Vvir' ],
'Vmax' : G['Vmax' ],
'VelDisp' : G['VelDisp' ],
'DiscRadii' : Disc_df,
'ColdGas' : G['ColdGas' ],
'StellarMass' : G['StellarMass' ],
'MergerBulgeMass' : G['MergerBulgeMass' ],
'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],
'HotGas' : G['HotGas' ],
'EjectedMass' : G['EjectedMass' ],
'BlackHoleMass' : G['BlackHoleMass' ],
'IntraClusterStars' : G['IntraClusterStars' ],
'DiscGas' : Disc_gas_df,
'DiscStars' : Disc_stars_df,
'SpinStars' : SpinStars_df,
'SpinGas' : SpinGas_df,
'SpinClassicalBulge' : SpinClassicalBulge_df,
'StarsInSitu' : G['StarsInSitu' ],
'StarsInstability' : G['StarsInstability' ],
'StarsMergeBurst' : G['StarsMergeBurst' ],
'DiscHI' : DiscHI_df,
'DiscH2' : DiscH2_df,
'DiscSFR' : DiscSFR_df,
'MetalsColdGas' : G['MetalsColdGas' ],
'MetalsStellarMass' : G['MetalsStellarMass' ],
'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],
'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],
'MetalsHotGas' : G['MetalsHotGas' ],
'MetalsEjectedMass' : G['MetalsEjectedMass' ],
'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],
'DiscGasMetals' : DiscGasMetals_df,
'DiscStarsMetals' : DiscStarsMetals_df,
'SfrFromH2' : G['SfrFromH2' ],
'SfrInstab' : G['SfrInstab' ],
'SfrMergeBurst' : G['SfrMergeBurst' ],
'SfrDiskZ' : G['SfrDiskZ' ],
'SfrBulgeZ' : G['SfrBulgeZ' ],
'DiskScaleRadius' : G['DiskScaleRadius' ],
'CoolScaleRadius' : G['CoolScaleRadius' ],
'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],
'Cooling' : G['Cooling' ],
'Heating' : G['Heating' ],
'LastMajorMerger' : G['LastMajorMerger' ],
'LastMinorMerger' : G['LastMinorMerger' ],
'OutflowRate' : G['OutflowRate' ],
'infallMvir' : G['infallMvir' ],
'infallVvir' : G['infallVvir' ],
'infallVmax' : G['infallVmax' ]})
|
normal
|
{
"blob_id": "0d565c9f92a60d25f28c903c0a27e7b93d547a4f",
"index": 2971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor p in G['Pos']:\n Pos.append(p)\n<mask token>\nfor v in G['Vel']:\n Vel.append(v)\n<mask token>\nfor s in G['Spin']:\n Spin.append(s)\n<mask token>\nfor d in G['DiscRadii']:\n Disc_r.append(d)\n<mask token>\nfor g in G['DiscGas']:\n Disc_gas.append(g)\n<mask token>\nfor g in G['DiscStars']:\n Disc_stars.append(g)\n<mask token>\nfor g in G['SpinStars']:\n SpinStars.append(g)\n<mask token>\nfor g in G['SpinGas']:\n SpinGas.append(g)\n<mask token>\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\n<mask token>\nfor g in G['DiscHI']:\n DiscHI.append(g)\n<mask token>\nfor g in G['DiscH2']:\n DiscH2.append(g)\n<mask token>\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\n<mask token>\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\n<mask token>\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\n<mask token>\n",
"step-3": "<mask token>\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-4": "import pandas as pd\nimport numpy as np\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-5": "#Create Pandas dataframe from the DarkSage output G['']\n\nimport pandas as pd\nimport numpy as np\n\n\n# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype(\"object\"))\n\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype(\"object\"))\n\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype(\"object\"))\n\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype(\"object\"))\n\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype(\"object\"))\n\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype(\"object\"))\n\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype(\"object\"))\n\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas , dtype=np.dtype(\"object\"))\n\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype(\"object\"))\n\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype(\"object\"))\n\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype(\"object\"))\n\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype(\"object\"))\n\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype(\"object\"))\n\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype(\"object\"))\n\n\n\n\n######################################\n\n\nDS = pd.DataFrame({'Type' : G['Type' ],\n'GalaxyIndex' : G['GalaxyIndex' ],\n'HaloIndex' : G['HaloIndex' ],\n'SimulationHaloIndex' : G['SimulationHaloIndex' ],\n'TreeIndex' : G['TreeIndex' ],\n'SnapNum' : G['SnapNum' ],\n'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],\n'CentralMvir' : G['CentralMvir' ],\n'mergeType' : G['mergeType' ],\n'mergeIntoID' : G['mergeIntoID' ],\n'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],\n'dT' : G['dT' ],\n'Pos' : Pos_df,\n'Vel' : Vel_df ,\n'Spin' : Spin_df ,\n'Len' : G['Len' ],\n'LenMax' : G['LenMax' ],\n'Mvir' : G['Mvir' ],\n'Rvir' : G['Rvir' ],\n'Vvir' : G['Vvir' ],\n'Vmax' : G['Vmax' ],\n'VelDisp' : G['VelDisp' ],\n'DiscRadii' : Disc_df,\n'ColdGas' : G['ColdGas' ],\n'StellarMass' : G['StellarMass' ],\n'MergerBulgeMass' : G['MergerBulgeMass' ],\n'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],\n'HotGas' : G['HotGas' ],\n'EjectedMass' : G['EjectedMass' ],\n'BlackHoleMass' : G['BlackHoleMass' ],\n'IntraClusterStars' : G['IntraClusterStars' ],\n'DiscGas' : Disc_gas_df,\n'DiscStars' : Disc_stars_df,\n'SpinStars' : SpinStars_df,\n'SpinGas' : SpinGas_df,\n'SpinClassicalBulge' : SpinClassicalBulge_df,\n'StarsInSitu' : G['StarsInSitu' ],\n'StarsInstability' : G['StarsInstability' ],\n'StarsMergeBurst' : G['StarsMergeBurst' ],\n'DiscHI' : DiscHI_df,\n'DiscH2' : DiscH2_df,\n'DiscSFR' : DiscSFR_df,\n'MetalsColdGas' : G['MetalsColdGas' ],\n'MetalsStellarMass' : G['MetalsStellarMass' ],\n'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],\n'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],\n'MetalsHotGas' : G['MetalsHotGas' ],\n'MetalsEjectedMass' : G['MetalsEjectedMass' ],\n'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],\n'DiscGasMetals' : DiscGasMetals_df,\n'DiscStarsMetals' : DiscStarsMetals_df,\n'SfrFromH2' : G['SfrFromH2' ],\n'SfrInstab' : G['SfrInstab' ],\n'SfrMergeBurst' : G['SfrMergeBurst' ],\n'SfrDiskZ' : G['SfrDiskZ' ],\n'SfrBulgeZ' : G['SfrBulgeZ' ],\n'DiskScaleRadius' : G['DiskScaleRadius' ],\n'CoolScaleRadius' : G['CoolScaleRadius' ],\n'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],\n'Cooling' : G['Cooling' ],\n'Heating' : G['Heating' ],\n'LastMajorMerger' : G['LastMajorMerger' ],\n'LastMinorMerger' : G['LastMinorMerger' ],\n'OutflowRate' : G['OutflowRate' ],\n'infallMvir' : G['infallMvir' ],\n'infallVvir' : G['infallVvir' ],\n'infallVmax' : G['infallVmax' ]})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from .models import Note
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = ['title', 'text']
class NoteFullForm(NoteForm):
note_id = forms.IntegerField(required=False)
images = forms.FileField(widget=forms.ClearableFileInput(attrs={
'multiple': True}), required=False)
tags = forms.CharField(max_length=50, required=False)
class Meta(NoteForm.Meta):
fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']
|
normal
|
{
"blob_id": "e0fd9663a5635873f4ffc0f73aff5106c0933781",
"index": 9180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-3": "<mask token>\n\n\nclass NoteForm(forms.ModelForm):\n\n\n class Meta:\n model = Note\n fields = ['title', 'text']\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-4": "from django import forms\nfrom .models import Note\n\n\nclass NoteForm(forms.ModelForm):\n\n\n class Meta:\n model = Note\n fields = ['title', 'text']\n\n\nclass NoteFullForm(NoteForm):\n note_id = forms.IntegerField(required=False)\n images = forms.FileField(widget=forms.ClearableFileInput(attrs={\n 'multiple': True}), required=False)\n tags = forms.CharField(max_length=50, required=False)\n\n\n class Meta(NoteForm.Meta):\n fields = NoteForm.Meta.fields + ['images', 'tags', 'note_id']\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
import pytest
from django_swagger_utils.drf_server.exceptions import NotFound
from unittest.mock import create_autospec
from content_management_portal.constants.enums import TextType
from content_management_portal.interactors.storages.storage_interface \
import StorageInterface
from content_management_portal.interactors.presenters. \
question_presenter_interface import PresenterInterface
from content_management_portal.interactors.question_creation_interactor \
import QuestionCreateInteractor
from content_management_portal.interactors.question_updation_interactor \
import QuestionUpdateInteractor
from content_management_portal.interactors.question_deletion_interactor \
import QuestionDeletionInteractor
class TestQuestionInteractor:
def test_question_create(self,questiondto):
user_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)
interactor.question_creation(user_id=user_id,short_title=short_title, \
content_type=content_type, content=content)
# Assert
storage.question_creation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self,questiondto):
user_id=1
question_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)
interactor.question_updation(user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
# Assert
storage.question_updation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
# Arrange
question_id=1
storage=create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
# Act
interactor.question_deletion(question_id=question_id)
# Assert
storage.question_deletion.assert_called_once_with(question_id=question_id)
|
normal
|
{
"blob_id": "1c66ccb80383feeee96b3fb492ff63be1a67a796",
"index": 5496,
"step-1": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n <mask token>\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-3": "<mask token>\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-4": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface import StorageInterface\nfrom content_management_portal.interactors.presenters.question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self, questiondto):\n user_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionCreateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_creation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content)\n storage.question_creation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self, questiondto):\n user_id = 1\n question_id = 1\n short_title = 'hello'\n content_type = 'HTML'\n content = 'hi'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n interactor = QuestionUpdateInteractor(storage=storage, presenter=\n presenter)\n interactor.question_updation(user_id=user_id, short_title=\n short_title, content_type=content_type, content=content,\n question_id=question_id)\n storage.question_updation.assert_called_once_with(user_id=user_id,\n short_title=short_title, content_type=content_type, content=\n content, question_id=question_id)\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_deletion(self):\n question_id = 1\n storage = create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n interactor.question_deletion(question_id=question_id)\n storage.question_deletion.assert_called_once_with(question_id=\n question_id)\n",
"step-5": "import pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\nfrom unittest.mock import create_autospec\n\nfrom content_management_portal.constants.enums import TextType\nfrom content_management_portal.interactors.storages.storage_interface \\\n import StorageInterface\nfrom content_management_portal.interactors.presenters. \\\n question_presenter_interface import PresenterInterface\nfrom content_management_portal.interactors.question_creation_interactor \\\n import QuestionCreateInteractor\nfrom content_management_portal.interactors.question_updation_interactor \\\n import QuestionUpdateInteractor\nfrom content_management_portal.interactors.question_deletion_interactor \\\n import QuestionDeletionInteractor\n\n\nclass TestQuestionInteractor:\n\n def test_question_create(self,questiondto):\n user_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)\n interactor.question_creation(user_id=user_id,short_title=short_title, \\\n content_type=content_type, content=content)\n\n # Assert\n storage.question_creation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n\n def test_question_update(self,questiondto):\n user_id=1\n question_id=1\n short_title=\"hello\"\n content_type=\"HTML\"\n content=\"hi\"\n\n storage=create_autospec(StorageInterface)\n presenter=create_autospec(PresenterInterface)\n\n interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)\n interactor.question_updation(user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n\n\n # Assert\n storage.question_updation.assert_called_once_with( \\\n user_id=user_id,\n short_title=short_title,\n content_type=content_type,\n content=content,\n question_id=question_id\n )\n presenter.get_question_dto_response(questiondto=questiondto)\n \n def test_question_deletion(self):\n\n # Arrange\n question_id=1\n storage=create_autospec(StorageInterface)\n interactor = QuestionDeletionInteractor(storage=storage)\n \n # Act\n interactor.question_deletion(question_id=question_id)\n \n # Assert\n storage.question_deletion.assert_called_once_with(question_id=question_id)\n \n \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
with open("file.txt", 'r') as fh:
data = fh.readline()
lis= data.split(' ')
my_dict={}
for key in lis:
if key in my_dict.keys():
my_dict[key] += 1
else:
my_dict[key] = 1
print(my_dict)
|
normal
|
{
"blob_id": "8cd582915c5abd96a4ef8a3a5309311f2a73a156",
"index": 460,
"step-1": "<mask token>\n",
"step-2": "with open('file.txt', 'r') as fh:\n data = fh.readline()\n<mask token>\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\nprint(my_dict)\n",
"step-3": "with open('file.txt', 'r') as fh:\n data = fh.readline()\nlis = data.split(' ')\nmy_dict = {}\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\nprint(my_dict)\n",
"step-4": "\n\nwith open(\"file.txt\", 'r') as fh:\n data = fh.readline()\n\nlis= data.split(' ')\nmy_dict={}\n\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\n\nprint(my_dict)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class NombreaplicacionConfig(AppConfig):
name = 'nombreAplicacion'
|
normal
|
{
"blob_id": "0c7efa99dc22154f9835b277cba5057b213a28e7",
"index": 2414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NombreaplicacionConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NombreaplicacionConfig(AppConfig):\n name = 'nombreAplicacion'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass NombreaplicacionConfig(AppConfig):\n name = 'nombreAplicacion'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from rest_framework.pagination import PageNumberPagination
class QuoteListPagination(PageNumberPagination):
page_size = 30
|
normal
|
{
"blob_id": "4245da12eb7f9dd08c863e368efbd0bcf0b8fa04",
"index": 6816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-4": "from rest_framework.pagination import PageNumberPagination\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 19 17:15:58 2021
@author: Professional
"""
#son = int(input("Biror son kiriting: ") )
#print(son, "ning kvadrati", son*son, "ga teng")
#print (son, "ning kubi", son*son*son, "ga teng")
#yosh = int(input("Yoshingiz nechida: "))
#print("Siz", 2021 - yosh, "yilda tug'ilgansz")
a = int(input("Birinchi sonni kiriting: "))
b = int(input("Ikkinchi sonni kiriting: "))
print("yig'indisi ", a + b)
print("ayirmasi ", a - b)
print("bo'linmasi ", a/b)
print("ko'paytmasi ", a*b)
|
normal
|
{
"blob_id": "0d32fe36f71ffb3df56738664c5dbd0b8ae585e3",
"index": 3303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"yig'indisi \", a + b)\nprint('ayirmasi ', a - b)\nprint(\"bo'linmasi \", a / b)\nprint(\"ko'paytmasi \", a * b)\n",
"step-3": "<mask token>\na = int(input('Birinchi sonni kiriting: '))\nb = int(input('Ikkinchi sonni kiriting: '))\nprint(\"yig'indisi \", a + b)\nprint('ayirmasi ', a - b)\nprint(\"bo'linmasi \", a / b)\nprint(\"ko'paytmasi \", a * b)\n",
"step-4": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 19 17:15:58 2021\r\n\r\n@author: Professional\r\n\"\"\"\r\n\r\n#son = int(input(\"Biror son kiriting: \") )\r\n#print(son, \"ning kvadrati\", son*son, \"ga teng\")\r\n#print (son, \"ning kubi\", son*son*son, \"ga teng\")\r\n\r\n#yosh = int(input(\"Yoshingiz nechida: \"))\r\n#print(\"Siz\", 2021 - yosh, \"yilda tug'ilgansz\")\r\n\r\na = int(input(\"Birinchi sonni kiriting: \"))\r\nb = int(input(\"Ikkinchi sonni kiriting: \"))\r\nprint(\"yig'indisi \", a + b)\r\nprint(\"ayirmasi \", a - b)\r\nprint(\"bo'linmasi \", a/b)\r\nprint(\"ko'paytmasi \", a*b)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
o = input()
v = []
s = 0
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
a = 1
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a+=1
if o == 'S':
print("%.1f"%s)
if o == 'M':
print("%.1f"%(s/66))
|
normal
|
{
"blob_id": "0df20722fba6223c9d4fc9f72bfb399b479db6ac",
"index": 7917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\n<mask token>\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-3": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-4": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a+=1\nif o == 'S':\n print(\"%.1f\"%s)\nif o == 'M':\n print(\"%.1f\"%(s/66))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
my_order = ['spam', 'eggs', 'sausage', 'spam', 'bacon', 'spam']
while 'spam' in my_order:
print("I don't like spam!")
my_order.remove('spam')
print(my_order)
|
normal
|
{
"blob_id": "8e8629dd2d4bb601347694b18d7cb6a94880201d",
"index": 8192,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile 'spam' in my_order:\n print(\"I don't like spam!\")\n my_order.remove('spam')\nprint(my_order)\n",
"step-3": "my_order = ['spam', 'eggs', 'sausage', 'spam', 'bacon', 'spam']\nwhile 'spam' in my_order:\n print(\"I don't like spam!\")\n my_order.remove('spam')\nprint(my_order)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import requests
save_result = requests.post('http://localhost:5000/save', json={'value':
'witam'})
print(save_result.text)
read_result = requests.get('http://localhost:5000/read')
print(read_result.text)
|
normal
|
{
"blob_id": "43362c564be0dfbc8f246a0589bcebde245ab7b5",
"index": 7015,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(save_result.text)\n<mask token>\nprint(read_result.text)\n",
"step-3": "<mask token>\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-4": "import requests\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
providers = {
'provider-1': {
'name': 'provider-1',
'roles': ['licensor', 'producer'],
'description': 'This is a full description of the provider',
'url': 'https://www.provider.com'
},
'provider-2': {
'name': 'provider-2',
'roles': ['licensor'],
'description': 'This is a full description of a second provider',
'url': 'https://www.provider.com/provider-2'
},
'provider-3': {
'name': 'provider-3',
}
}
providers_invalid = {
'provider-invalid': {
'name': 'provider invalid ', 'roles': ['Test'], 'url': 'This is not an url'
},
}
links = {
'link-1': {
'rel': 'describedBy',
'href': 'https://www.example.com/described-by',
'title': 'This is an extra link',
'link_type': 'description'
}
}
links_invalid = {
'link-invalid': {
'rel': 'invalid relation',
'href': 'not a url',
}
}
collections = {
'collection-1': {
'name': 'collection-1',
'description': 'This a collection description',
'title': 'My collection 1',
'license': 'proprietary',
'providers': providers.values(),
'links': links.values()
},
'collection-2': {
'name': 'collection-2',
'description': 'This a second open source collection description',
'title': 'My collection 2',
'license': 'MIT',
'providers': [providers['provider-2']]
},
'collection-3': {
'name': 'collection-3',
'description': 'This a third open source collection description',
'title': 'My collection 3',
'license': 'MIT',
'links': [links['link-1']]
},
'collection-4': {
'name': 'collection-3',
'description': 'This a fourth open source collection description',
'title': 'My collection 4',
'license': 'MIT'
},
'collection-invalid': {
'name': 'collection invalid name',
'description': 45,
'title': 34,
'license': ['proprietary'],
},
'collection-missing-mandatory-fields': {
'name': 'collection-missing-mandatory-fields'
},
'collection-invalid-links': {
'name': 'collection-invalid-link',
'description': 'This is a collection with invalid user link',
'license': 'proprietary',
'links': [links_invalid['link-invalid']]
},
'collection-invalid-providers': {
'name': 'collection-invalid-provider',
'description': 'This is a collection with invalid provider',
'license': 'proprietary',
'providers': providers_invalid.values()
},
}
|
normal
|
{
"blob_id": "7801676df91a7ded6f123113acc62f3955dfe6cb",
"index": 7113,
"step-1": "<mask token>\n",
"step-2": "providers = {'provider-1': {'name': 'provider-1', 'roles': ['licensor',\n 'producer'], 'description':\n 'This is a full description of the provider', 'url':\n 'https://www.provider.com'}, 'provider-2': {'name': 'provider-2',\n 'roles': ['licensor'], 'description':\n 'This is a full description of a second provider', 'url':\n 'https://www.provider.com/provider-2'}, 'provider-3': {'name':\n 'provider-3'}}\nproviders_invalid = {'provider-invalid': {'name': 'provider invalid ',\n 'roles': ['Test'], 'url': 'This is not an url'}}\nlinks = {'link-1': {'rel': 'describedBy', 'href':\n 'https://www.example.com/described-by', 'title':\n 'This is an extra link', 'link_type': 'description'}}\nlinks_invalid = {'link-invalid': {'rel': 'invalid relation', 'href':\n 'not a url'}}\ncollections = {'collection-1': {'name': 'collection-1', 'description':\n 'This a collection description', 'title': 'My collection 1', 'license':\n 'proprietary', 'providers': providers.values(), 'links': links.values()\n }, 'collection-2': {'name': 'collection-2', 'description':\n 'This a second open source collection description', 'title':\n 'My collection 2', 'license': 'MIT', 'providers': [providers[\n 'provider-2']]}, 'collection-3': {'name': 'collection-3', 'description':\n 'This a third open source collection description', 'title':\n 'My collection 3', 'license': 'MIT', 'links': [links['link-1']]},\n 'collection-4': {'name': 'collection-3', 'description':\n 'This a fourth open source collection description', 'title':\n 'My collection 4', 'license': 'MIT'}, 'collection-invalid': {'name':\n 'collection invalid name', 'description': 45, 'title': 34, 'license': [\n 'proprietary']}, 'collection-missing-mandatory-fields': {'name':\n 'collection-missing-mandatory-fields'}, 'collection-invalid-links': {\n 'name': 'collection-invalid-link', 'description':\n 'This is a collection with invalid user link', 'license': 'proprietary',\n 'links': [links_invalid['link-invalid']]},\n 'collection-invalid-providers': {'name': 'collection-invalid-provider',\n 'description': 'This is a collection with invalid provider', 'license':\n 'proprietary', 'providers': providers_invalid.values()}}\n",
"step-3": "providers = {\n 'provider-1': {\n 'name': 'provider-1',\n 'roles': ['licensor', 'producer'],\n 'description': 'This is a full description of the provider',\n 'url': 'https://www.provider.com'\n },\n 'provider-2': {\n 'name': 'provider-2',\n 'roles': ['licensor'],\n 'description': 'This is a full description of a second provider',\n 'url': 'https://www.provider.com/provider-2'\n },\n 'provider-3': {\n 'name': 'provider-3',\n }\n}\n\nproviders_invalid = {\n 'provider-invalid': {\n 'name': 'provider invalid ', 'roles': ['Test'], 'url': 'This is not an url'\n },\n}\n\nlinks = {\n 'link-1': {\n 'rel': 'describedBy',\n 'href': 'https://www.example.com/described-by',\n 'title': 'This is an extra link',\n 'link_type': 'description'\n }\n}\n\nlinks_invalid = {\n 'link-invalid': {\n 'rel': 'invalid relation',\n 'href': 'not a url',\n }\n}\n\ncollections = {\n 'collection-1': {\n 'name': 'collection-1',\n 'description': 'This a collection description',\n 'title': 'My collection 1',\n 'license': 'proprietary',\n 'providers': providers.values(),\n 'links': links.values()\n },\n 'collection-2': {\n 'name': 'collection-2',\n 'description': 'This a second open source collection description',\n 'title': 'My collection 2',\n 'license': 'MIT',\n 'providers': [providers['provider-2']]\n },\n 'collection-3': {\n 'name': 'collection-3',\n 'description': 'This a third open source collection description',\n 'title': 'My collection 3',\n 'license': 'MIT',\n 'links': [links['link-1']]\n },\n 'collection-4': {\n 'name': 'collection-3',\n 'description': 'This a fourth open source collection description',\n 'title': 'My collection 4',\n 'license': 'MIT'\n },\n 'collection-invalid': {\n 'name': 'collection invalid name',\n 'description': 45,\n 'title': 34,\n 'license': ['proprietary'],\n },\n 'collection-missing-mandatory-fields': {\n 'name': 'collection-missing-mandatory-fields'\n },\n 'collection-invalid-links': {\n 'name': 'collection-invalid-link',\n 'description': 'This is a collection with invalid user link',\n 'license': 'proprietary',\n 'links': [links_invalid['link-invalid']]\n },\n 'collection-invalid-providers': {\n 'name': 'collection-invalid-provider',\n 'description': 'This is a collection with invalid provider',\n 'license': 'proprietary',\n 'providers': providers_invalid.values()\n },\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import pygame
from pygame.locals import *
pygame.init()
ttt = pygame.display.set_mode((300,325)) #loome mänguakna
pygame.display.set_caption = ("Trips-Traps-Trull")
võitja = None
def init_tabel(ttt):
taust = pygame.Surface(ttt.get_size())
taust = taust.convert()
taust.fill((250,250,250))
#tõmbame jooned
pygame.draw.line (taust, (0,0,0), (100,0), (100,300), 2) #vertikaalsed jooned
pygame.draw.line (taust, (0,0,0), (200,0), (200,300), 2)
pygame.draw.line (taust, (0,0,0), (0,100), (300,100), 2) #horisontaalsed jooned
pygame.draw.line (taust, (0,0,0), (0,200), (300,200), 2)
return taust
def näita_tabelit (ttt, tabel):
hetkeseis(tabel)
ttt.blit (tabel, (0,0))
pygame.display.flip()
def hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat):
if (Ykoordinaat < 100): #millisele reale klikib
rida = 0
elif (Ykoordinaat < 200):
rida = 1
else:
rida = 2
if (Xkoordinaat < 100): #millisele veerule klikib
veerg = 0
elif (Xkoordinaat < 200):
veerg = 1
else:
veerg = 2
return (rida, veerg)
def klikk_tabelis (tabel): #teeme kindlaks kuhu klikiti
global joonestik, XO
(Xkoordinaat, Ykoordinaat) = pygame.mouse.get_pos()
(rida, veerg) = hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O': #kontrollime kas lahter on kasutusel
return #lahter on juba kasutusel
joonistamine (tabel, rida, veerg, XO) #joonista X või O
if (XO == 'X'):
XO = 'O' #käigu üleandmine teisele inimesele
else:
XO = 'X'
def joonistamine (tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
#leiame keskkoha
Ykeskkoht = tabelirida * 100 + 50
if (Tähis == 'O'): #joonistame O
pygame.draw.circle (tabel, (0,0,0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line (tabel, (0,0,0), (Xkeskkoht - 22, Ykeskkoht - 22), (Xkeskkoht + 22, Ykeskkoht + 22), 2)
#joonistame X
pygame.draw.line (tabel, (0,0,0), (Xkeskkoht + 22, Ykeskkoht - 22), (Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis #märgime lahtri kasutatuks
def mängu_võitja(tabel): #kontrollib, kas kumbki võitis
global joonestik, võitja
for rida in range (0, 3): #kontrollime ridu
if joonestik [rida][0] == joonestik[rida][1] == joonestik[rida][2] and joonestik [rida][0] is not None:
võitja = joonestik[rida][0] #see rida võitis
pygame.draw.line (tabel, (250,0,0), (0, (rida + 1)*100 - 50), (300, (rida + 1)*100 - 50), 2)
break
for veerg in range (0, 3): #kontrollime veerge
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg] #see veerg võitis
pygame.draw.line (tabel, (250,0,0), ((veerg + 1)* 100 - 50, 0), ((veerg + 1)* 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0] is not None: #kontrollime diagonaale
võitja = joonestik[0][0] #vasakult paremale diagonaal võitis
pygame.draw.line (tabel, (250,0,0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2] is not None:
võitja = joonestik[0][2] #paremalt vasakule diagonaal võitis
pygame.draw.line (tabel, (250,0,0), (250, 50), (50, 250), 2)
def hetkeseis (tabel): #kuva hetkeseis(kelle käik/kes võitis)
global XO, võitja
if võitja is None:
sõnum = XO + " käib"
else:
sõnum = võitja + " võitis!"
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0,0,0))
#kopeerime sõnumi mänguaknas
tabel.fill ((250, 250, 250), (0, 300, 300, 25))
tabel.blit (tekst, (10, 300))
XO = 'X' #X alustab
joonestik = [ [ None, None, None ], #tühjad lahtrid
[ None, None, None ],
[ None, None, None ] ]
tabel = init_tabel(ttt)
jooksutab = 1
while jooksutab == 1:
for event in pygame.event.get():
if event.type is QUIT:
jooksutab = 0
elif event.type is MOUSEBUTTONDOWN:
klikk_tabelis(tabel)
mängu_võitja(tabel) #kontrollib võitjat peale igat käiku
näita_tabelit(ttt,tabel) #uuendab mängulauda
if võitja is not None:
break
|
normal
|
{
"blob_id": "a667c4cb0a30ee67fe982bb96ece6bb75f25f110",
"index": 7084,
"step-1": "<mask token>\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\n<mask token>\n",
"step-3": "<mask token>\npygame.init()\nttt = pygame.display.set_mode((300, 325))\npygame.display.set_caption = 'Trips-Traps-Trull'\nvõitja = None\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\nXO = 'X'\njoonestik = [[None, None, None], [None, None, None], [None, None, None]]\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n mängu_võitja(tabel)\n näita_tabelit(ttt, tabel)\n if võitja is not None:\n break\n",
"step-4": "import pygame\nfrom pygame.locals import *\npygame.init()\nttt = pygame.display.set_mode((300, 325))\npygame.display.set_caption = 'Trips-Traps-Trull'\nvõitja = None\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\nXO = 'X'\njoonestik = [[None, None, None], [None, None, None], [None, None, None]]\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n mängu_võitja(tabel)\n näita_tabelit(ttt, tabel)\n if võitja is not None:\n break\n",
"step-5": "import pygame\n\nfrom pygame.locals import *\n\npygame.init()\nttt = pygame.display.set_mode((300,325)) #loome mänguakna\npygame.display.set_caption = (\"Trips-Traps-Trull\")\n\nvõitja = None\n\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250,250,250))\n \n #tõmbame jooned\n \n pygame.draw.line (taust, (0,0,0), (100,0), (100,300), 2) #vertikaalsed jooned\n pygame.draw.line (taust, (0,0,0), (200,0), (200,300), 2)\n\n pygame.draw.line (taust, (0,0,0), (0,100), (300,100), 2) #horisontaalsed jooned\n pygame.draw.line (taust, (0,0,0), (0,200), (300,200), 2)\n return taust\n\n\ndef näita_tabelit (ttt, tabel):\n hetkeseis(tabel)\n ttt.blit (tabel, (0,0))\n pygame.display.flip()\n\ndef hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat):\n if (Ykoordinaat < 100): #millisele reale klikib\n rida = 0\n elif (Ykoordinaat < 200):\n rida = 1\n else:\n rida = 2\n if (Xkoordinaat < 100): #millisele veerule klikib\n veerg = 0\n elif (Xkoordinaat < 200):\n veerg = 1\n else:\n veerg = 2\n return (rida, veerg)\n\ndef klikk_tabelis (tabel): #teeme kindlaks kuhu klikiti\n global joonestik, XO\n\n (Xkoordinaat, Ykoordinaat) = pygame.mouse.get_pos()\n\n (rida, veerg) = hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat)\n\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O': #kontrollime kas lahter on kasutusel\n return #lahter on juba kasutusel\n\n joonistamine (tabel, rida, veerg, XO) #joonista X või O\n \n if (XO == 'X'):\n XO = 'O' #käigu üleandmine teisele inimesele\n else:\n XO = 'X'\n\n\ndef joonistamine (tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n #leiame keskkoha\n Ykeskkoht = tabelirida * 100 + 50\n\n if (Tähis == 'O'): #joonistame O\n pygame.draw.circle (tabel, (0,0,0), (Xkeskkoht, Ykeskkoht), 44, 2)\n\n else:\n pygame.draw.line (tabel, (0,0,0), (Xkeskkoht - 22, Ykeskkoht - 22), (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n #joonistame X\n pygame.draw.line (tabel, (0,0,0), (Xkeskkoht + 22, Ykeskkoht - 22), (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n\n joonestik[tabelirida][tabeliveerg] = Tähis #märgime lahtri kasutatuks\n\n\ndef mängu_võitja(tabel): #kontrollib, kas kumbki võitis\n global joonestik, võitja\n\n for rida in range (0, 3): #kontrollime ridu\n if joonestik [rida][0] == joonestik[rida][1] == joonestik[rida][2] and joonestik [rida][0] is not None:\n võitja = joonestik[rida][0] #see rida võitis\n pygame.draw.line (tabel, (250,0,0), (0, (rida + 1)*100 - 50), (300, (rida + 1)*100 - 50), 2)\n break\n\n for veerg in range (0, 3): #kontrollime veerge\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg] #see veerg võitis\n pygame.draw.line (tabel, (250,0,0), ((veerg + 1)* 100 - 50, 0), ((veerg + 1)* 100 - 50, 300), 2)\n break\n\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0] is not None: #kontrollime diagonaale\n võitja = joonestik[0][0] #vasakult paremale diagonaal võitis\n pygame.draw.line (tabel, (250,0,0), (50, 50), (250, 250), 2)\n\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2] is not None:\n võitja = joonestik[0][2] #paremalt vasakule diagonaal võitis\n pygame.draw.line (tabel, (250,0,0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis (tabel): #kuva hetkeseis(kelle käik/kes võitis)\n global XO, võitja\n if võitja is None:\n sõnum = XO + \" käib\"\n else:\n sõnum = võitja + \" võitis!\"\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0,0,0))\n#kopeerime sõnumi mänguaknas\n tabel.fill ((250, 250, 250), (0, 300, 300, 25))\n tabel.blit (tekst, (10, 300))\n\n\nXO = 'X' #X alustab\n\njoonestik = [ [ None, None, None ], #tühjad lahtrid\n\n [ None, None, None ],\n\n [ None, None, None ] ]\n\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n\n mängu_võitja(tabel) #kontrollib võitjat peale igat käiku\n\n näita_tabelit(ttt,tabel) #uuendab mängulauda\n if võitja is not None:\n break\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
#!/usr/bin/python2
import unittest
import luna_utils as luna
import time
API_URL = "com.webos.service.videooutput/"
VERBOSE_LOG = True
SUPPORT_REGISTER = False
SINK_MAIN = "MAIN"
SINK_SUB = "SUB0"
#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state
#SINK_LIST = [SINK_MAIN, SINK_SUB]
SINK_LIST = [SINK_MAIN]
PID1 = "pipeline1"
PID2 = "pipeline2"
PID_LIST = [PID1, PID2]
INPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}
OUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}
#Choose source type VDEC or HDMI for test input
#SOURCE_NAME = SOURCE_NAME
#SOURCE_PORT = 0
SOURCE_NAME = "HDMI"
SOURCE_PORT = 3
SOURCE_WIDTH = 1920
SOURCE_HEIGHT = 1080
SLEEP_TIME = 1
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog("setUp")
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("register " + pid)
luna.call(API_URL + "register", { "context": pid })
self.statusSub = luna.subscribe(API_URL + "getStatus", {"subscribe":True})
def tearDown(self):
self.vlog("tearDown")
for sink in SINK_LIST:
self.vlog("disconnect " + sink)
luna.call(API_URL + "disconnect", { "sink": sink })
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("unregister " + pid)
luna.call(API_URL + "unregister", { "context": pid })
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog("connect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{ "outputMode": "DISPLAY", "sink": sink, "source": source, "sourcePort": port },
self.statusSub,
{"video":[{"sink": sink, "connectedSource": source, "connectedSourcePort": port}]})
def mute(self, sink, blank):
self.vlog("- Mute" + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "blankVideo",
{"sink": sink, "blank": blank},
self.statusSub,
{"video":[{"sink": sink, "muted": blank}]})
def disconnect(self, sink, pid):
self.vlog("disconnect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "disconnect", { "sink": sink },
self.statusSub,
{"video": [{"sink": sink, "connectedSource": None}]})
def testConnectDisconnect(self):
print("[testConnectDisconnect]")
for source, ports in {"VDEC":[0,1], "HDMI":[0,1,2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, "")
self.disconnect(sink, "")
def testDualConnect(self):
print("[testDualConnect]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{"outputMode": "DISPLAY", "sink": SINK_SUB, "source": SOURCE_NAME, "sourcePort": SOURCE_PORT},
self.statusSub,
{"video": [{"sink": SINK_MAIN, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT},
{"sink": SINK_SUB, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT}]})
self.disconnect(SINK_MAIN, "")
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, "")
def testMute(self):
print("[testMute]")
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, "")
for blank in [False, True]:
self.mute(sink, blank)
#test different orders of display window and media data
def testSetDisplayWindowAndVideoData(self):
print("[testSetDisplayWindowAndVideoData]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":0,
"height":0,
"frameRate":0,
"sourceInput": {"x":0, "y":0, "width":0, "height":0}, # no media data yet so can't determine appliedsourceInput yet
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print("[testSetVideoDataAndDisplayWindow]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": "MAIN",
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print("[testSetFullscreen]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": True,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": True,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":0, "y":0, "width":3840, "height":2160}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print("[testSetCompositing]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setCompositing",
{"composeOrder": [{"sink":SINK_MAIN, "opacity":20, "zOrder":1},
{"sink":SINK_SUB, "opacity":31, "zOrder":0}]},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":20, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN, "fullScreen":True, "opacity":130},
self.statusSub, {"video":[{"sink": SINK_MAIN, "opacity":130, "zOrder":1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":200},
self.statusSub, {"video":[{"sink": "SUB0", "opacity":200, "zOrder":0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":230},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":230, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":30, "zOrder": 1},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":30, "zOrder":1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
|
normal
|
{
"blob_id": "27e66b2a03bc626d5babd804e736a4652ba030d5",
"index": 8624,
"step-1": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n <mask token>\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n <mask token>\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n <mask token>\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-4": "import unittest\nimport luna_utils as luna\nimport time\nAPI_URL = 'com.webos.service.videooutput/'\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\nSINK_MAIN = 'MAIN'\nSINK_SUB = 'SUB0'\nSINK_LIST = [SINK_MAIN]\nPID1 = 'pipeline1'\nPID2 = 'pipeline2'\nPID_LIST = [PID1, PID2]\nINPUT_RECT = {'X': 0, 'Y': 0, 'W': 1920, 'H': 1080}\nOUTPUT_RECT = {'X': 400, 'Y': 400, 'W': 1920, 'H': 1080}\nSOURCE_NAME = 'HDMI'\nSOURCE_PORT = 3\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\nSLEEP_TIME = 1\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-5": "#!/usr/bin/python2\nimport unittest\nimport luna_utils as luna\nimport time\n\nAPI_URL = \"com.webos.service.videooutput/\"\n\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\n\nSINK_MAIN = \"MAIN\"\nSINK_SUB = \"SUB0\"\n\n#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state\n#SINK_LIST = [SINK_MAIN, SINK_SUB]\nSINK_LIST = [SINK_MAIN]\n\nPID1 = \"pipeline1\"\nPID2 = \"pipeline2\"\n\nPID_LIST = [PID1, PID2]\n\nINPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}\nOUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}\n\n#Choose source type VDEC or HDMI for test input\n#SOURCE_NAME = SOURCE_NAME\n#SOURCE_PORT = 0\nSOURCE_NAME = \"HDMI\"\nSOURCE_PORT = 3\n\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\n\nSLEEP_TIME = 1\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog(\"setUp\")\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"register \" + pid)\n luna.call(API_URL + \"register\", { \"context\": pid })\n\n self.statusSub = luna.subscribe(API_URL + \"getStatus\", {\"subscribe\":True})\n\n def tearDown(self):\n self.vlog(\"tearDown\")\n for sink in SINK_LIST:\n self.vlog(\"disconnect \" + sink)\n luna.call(API_URL + \"disconnect\", { \"sink\": sink })\n\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"unregister \" + pid)\n luna.call(API_URL + \"unregister\", { \"context\": pid })\n\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog(\"connect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n { \"outputMode\": \"DISPLAY\", \"sink\": sink, \"source\": source, \"sourcePort\": port },\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"connectedSource\": source, \"connectedSourcePort\": port}]})\n\n def mute(self, sink, blank):\n self.vlog(\"- Mute\" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"blankVideo\",\n {\"sink\": sink, \"blank\": blank},\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"muted\": blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog(\"disconnect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"disconnect\", { \"sink\": sink },\n self.statusSub,\n {\"video\": [{\"sink\": sink, \"connectedSource\": None}]})\n\n def testConnectDisconnect(self):\n print(\"[testConnectDisconnect]\")\n for source, ports in {\"VDEC\":[0,1], \"HDMI\":[0,1,2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, \"\")\n self.disconnect(sink, \"\")\n\n def testDualConnect(self):\n print(\"[testDualConnect]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n {\"outputMode\": \"DISPLAY\", \"sink\": SINK_SUB, \"source\": SOURCE_NAME, \"sourcePort\": SOURCE_PORT},\n self.statusSub,\n {\"video\": [{\"sink\": SINK_MAIN, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT},\n {\"sink\": SINK_SUB, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT}]})\n\n self.disconnect(SINK_MAIN, \"\")\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, \"\")\n\n def testMute(self):\n print(\"[testMute]\")\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, \"\")\n\n for blank in [False, True]:\n self.mute(sink, blank)\n\n #test different orders of display window and media data\n\n def testSetDisplayWindowAndVideoData(self):\n print(\"[testSetDisplayWindowAndVideoData]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":0,\n \"height\":0,\n \"frameRate\":0,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}, # no media data yet so can't determine appliedsourceInput yet\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print(\"[testSetVideoDataAndDisplayWindow]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print(\"[testSetFullscreen]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":3840, \"height\":2160}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print(\"[testSetCompositing]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setCompositing\",\n {\"composeOrder\": [{\"sink\":SINK_MAIN, \"opacity\":20, \"zOrder\":1},\n {\"sink\":SINK_SUB, \"opacity\":31, \"zOrder\":0}]},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":20, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN, \"fullScreen\":True, \"opacity\":130},\n self.statusSub, {\"video\":[{\"sink\": SINK_MAIN, \"opacity\":130, \"zOrder\":1}]})\n\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":200},\n self.statusSub, {\"video\":[{\"sink\": \"SUB0\", \"opacity\":200, \"zOrder\":0}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":230},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":230, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":30, \"zOrder\": 1},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":30, \"zOrder\":1}]})\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-ids": [
11,
14,
15,
17,
18
]
}
|
[
11,
14,
15,
17,
18
] |
class UnknownResponseFormat(Exception):
pass
|
normal
|
{
"blob_id": "e5e460eb704e2ab5f747d1beee05e012ea95fbd2",
"index": 3871,
"step-1": "<mask token>\n",
"step-2": "class UnknownResponseFormat(Exception):\n pass\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
str="mama"
stringlength=len(str)
slicedString=str[stringlength::-1]
print (slicedString)
|
normal
|
{
"blob_id": "5c80561a3344c0240e59500e5dadc1f1ef7f380e",
"index": 7687,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(slicedString)\n",
"step-3": "str = 'mama'\nstringlength = len(str)\nslicedString = str[stringlength::-1]\nprint(slicedString)\n",
"step-4": "str=\"mama\"\r\nstringlength=len(str)\r\nslicedString=str[stringlength::-1]\r\nprint (slicedString)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv("/Users/stevenbaez/Desktop/train.csv")
# In[2]:
data.head()
# In[3]:
subset = data[['Survived','Age', 'Sex']]
# In[5]:
import numpy as np
import matplotlib
# In[20]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Embarked",
notch = False,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[17]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Pclass",
notch = True,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[ ]:
|
normal
|
{
"blob_id": "41006ff35299aa72b69c6dc1c71a45b44dca7d6c",
"index": 1184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndata.head()\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-3": "<mask token>\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\nimport numpy as np\nimport matplotlib\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\n\ndata = pd.read_csv(\"/Users/stevenbaez/Desktop/train.csv\")\n\n\n# In[2]:\n\n\ndata.head()\n\n\n# In[3]:\n\n\nsubset = data[['Survived','Age', 'Sex']]\n\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib\n\n\n# In[20]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Embarked\",\n notch = False,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[17]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Pclass\",\n notch = True,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import config
import web
import hashlib
import sys
db = web.database(dbn="mysql", db=config.db, user=config.user, pw=config.passwd)
def signIn(user, pw):
pwhash = hashlib.md5(pw).hexdigest()
uid = db.insert("users", uname=user, passwd=pwhash)
return uid
# def select():
# db.select(, )
def main():
if len(sys.argv) > 1:
user = sys.argv[1]
pw = sys.argv[2]
signIn(user, pw)
if __name__ == "__main__":
main()
r = db.select("users")
for i in r:
print i.uname
# conn = MySQLdb.connect(host=config.host, user=config.user, passwd=config.passwd,
# db=config.db, port=config.port, charset=config.charset)
# conn
|
normal
|
{
"blob_id": "6d032df195854703f36dce7d27524c8f5089c04d",
"index": 2334,
"step-1": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport config\r\nimport web\r\nimport hashlib\r\nimport sys\r\n\r\n\r\ndb = web.database(dbn=\"mysql\", db=config.db, user=config.user, pw=config.passwd)\r\n\r\ndef signIn(user, pw):\r\n pwhash = hashlib.md5(pw).hexdigest()\r\n uid = db.insert(\"users\", uname=user, passwd=pwhash)\r\n return uid\r\n\r\n# def select():\r\n# db.select(, )\r\n\r\ndef main():\r\n if len(sys.argv) > 1:\r\n user = sys.argv[1]\r\n pw = sys.argv[2]\r\n signIn(user, pw)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n r = db.select(\"users\")\r\n for i in r:\r\n print i.uname\r\n # conn = MySQLdb.connect(host=config.host, user=config.user, passwd=config.passwd,\r\n # db=config.db, port=config.port, charset=config.charset)\r\n # conn\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/bin/python
"""
len()
lower()
upper()
str()
"""
parrot = "Norwegian Blue"
print len(parrot)
|
normal
|
{
"blob_id": "cd8d95e2bf433020db2db06a21263f75e3f81331",
"index": 9740,
"step-1": "#!/bin/python\n\n\"\"\"\nlen()\nlower()\nupper()\nstr()\n\"\"\"\n\nparrot = \"Norwegian Blue\"\nprint len(parrot)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import requests
import tkinter as tk
from tkinter.font import Font
from time import strptime
class Window(tk.Tk):
def __init__(self):
super().__init__()
#取得網路上的資料
res = requests.get('https://flask-robert.herokuapp.com/youbike')
jsonObj = res.json()
areas = jsonObj['areas']
#介面
self.title("台北市行政區")
topFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)
buttonFont = Font(family='Helvetica', size=20)
for index, area in enumerate(areas):
if index % 6 == 0:
parentframe = tk.Frame(topFrame)
parentframe.pack()
btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5, pady=5)
btn.bind('<Button-1>', self.userClick)
btn.pack(side=tk.LEFT, padx=5)
topFrame.pack(padx=20, pady=30)
#建立下方radioButton的介面
self.fixedWidthFrame = tk.Frame(self,height=600,bg='red')
self.createdRadioButtonFrame()
self.fixedWidthFrame.pack(padx=20)
#建立message介面
messageDisplayFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)
self.mdayLabel = tk.Label(messageDisplayFrame, text="記錄時間:")
self.mdayLabel.pack(anchor=tk.W)
self.snaLabel = tk.Label(messageDisplayFrame,text="站名:")
self.snaLabel.pack(anchor=tk.W)
self.arLabel = tk.Label(messageDisplayFrame,text="地址:")
self.arLabel.pack(anchor=tk.W)
self.bempLabel = tk.Label(messageDisplayFrame, text="空位數量:")
self.bempLabel.pack(anchor=tk.W)
self.sbiLabel = tk.Label(messageDisplayFrame, text="可借車數:")
self.sbiLabel.pack(anchor=tk.W)
self.totLabel = tk.Label(messageDisplayFrame, text="總車數:")
self.totLabel.pack(anchor=tk.W)
messageDisplayFrame.pack(expand=True,fill=tk.BOTH,padx=20,pady=30)
def userClick(self,event):
self.bottomFrame.destroy()
selectedArea = event.widget['text']
urlString = "https://flask-robert.herokuapp.com/youbike/%s" % selectedArea
res = requests.get(urlString)
jsonobj = res.json()
self.areas = jsonobj['data']
snaList = []
for area in self.areas:
snaList.append(area["sna"])
self.createdRadioButtonFrame(data=snaList)
def createdRadioButtonFrame(self,data=None):
self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.GROOVE, padx=20, pady=10)
if data == None:
urlString = "https://flask-robert.herokuapp.com/youbike/南港區"
res = requests.get(urlString)
jsonobj = res.json()
self.areas = jsonobj['data']
snaList = []
for area in self.areas:
snaList.append(area["sna"])
self.radioButtonData = snaList
else:
self.radioButtonData = data
self.var = tk.IntVar()
for index, data in enumerate(self.radioButtonData):
if index % 10 == 0:
parentframe = tk.Frame(self.bottomFrame)
parentframe.pack(side=tk.LEFT,expand=True,fill=tk.Y)
radioButton = tk.Radiobutton(parentframe, text=data, value=index, variable=self.var,command=self.userChoicedRadioButton).pack(anchor=tk.W)
self.bottomFrame.pack()
self.var.set(0)
def userChoicedRadioButton(self):
index = self.var.get()
infomation = self.areas[index]
print(infomation)
datetimeString = infomation["mday"]
datetimeFormat = "%Y%m%d%H%M%S"
structTime = strptime(datetimeString,datetimeFormat)
self.mdayLabel["text"] = "記錄時間:%d年%d月%d日 %d:%d:%d" % (structTime.tm_year,structTime.tm_mon,structTime.tm_mday,structTime.tm_hour,structTime.tm_min,structTime.tm_sec)
self.snaLabel["text"] = "站名:%s" % infomation["sna"]
self.arLabel.configure(text="地址:{0:s}".format(infomation["ar"]))
self.bempLabel["text"] = "空位數量:{0:d}".format(infomation["bemp"])
self.sbiLabel["text"] = "可借車數:{0:d}".format(infomation["sbi"])
self.totLabel["text"] = "總車數:{0:d}".format(infomation["tot"])
if __name__ == "__main__":
window = Window()
window.mainloop()
|
normal
|
{
"blob_id": "f9becdb48583423e7bd3730d1cd74a6a016663dc",
"index": 1768,
"step-1": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\nif __name__ == '__main__':\n window = Window()\n window.mainloop()\n",
"step-4": "import requests\nimport tkinter as tk\nfrom tkinter.font import Font\nfrom time import strptime\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\nif __name__ == '__main__':\n window = Window()\n window.mainloop()\n",
"step-5": "import requests\nimport tkinter as tk\nfrom tkinter.font import Font\nfrom time import strptime\n\n\nclass Window(tk.Tk):\n def __init__(self):\n super().__init__()\n #取得網路上的資料\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n\n #介面\n self.title(\"台北市行政區\")\n topFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5, pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n\n\n #建立下方radioButton的介面\n self.fixedWidthFrame = tk.Frame(self,height=600,bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n\n #建立message介面\n messageDisplayFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text=\"記錄時間:\")\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame,text=\"站名:\")\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame,text=\"地址:\")\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text=\"空位數量:\")\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text=\"可借車數:\")\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text=\"總車數:\")\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True,fill=tk.BOTH,padx=20,pady=30)\n\n\n\n\n def userClick(self,event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = \"https://flask-robert.herokuapp.com/youbike/%s\" % selectedArea\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area[\"sna\"])\n self.createdRadioButtonFrame(data=snaList)\n\n\n def createdRadioButtonFrame(self,data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n if data == None:\n urlString = \"https://flask-robert.herokuapp.com/youbike/南港區\"\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area[\"sna\"])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT,expand=True,fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=index, variable=self.var,command=self.userChoicedRadioButton).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation[\"mday\"]\n datetimeFormat = \"%Y%m%d%H%M%S\"\n structTime = strptime(datetimeString,datetimeFormat)\n\n self.mdayLabel[\"text\"] = \"記錄時間:%d年%d月%d日 %d:%d:%d\" % (structTime.tm_year,structTime.tm_mon,structTime.tm_mday,structTime.tm_hour,structTime.tm_min,structTime.tm_sec)\n self.snaLabel[\"text\"] = \"站名:%s\" % infomation[\"sna\"]\n self.arLabel.configure(text=\"地址:{0:s}\".format(infomation[\"ar\"]))\n self.bempLabel[\"text\"] = \"空位數量:{0:d}\".format(infomation[\"bemp\"])\n self.sbiLabel[\"text\"] = \"可借車數:{0:d}\".format(infomation[\"sbi\"])\n self.totLabel[\"text\"] = \"總車數:{0:d}\".format(infomation[\"tot\"])\n\nif __name__ == \"__main__\":\n window = Window()\n window.mainloop()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from collections import Counter
# Complete the isValid function below.
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
if __name__ == '__main__':
s = "ibfdgaeadiaefgbhbdghhhbgdfgeiccbi"
r = isValid(s)
print(r)
|
normal
|
{
"blob_id": "760daa908ca92e7fb1393bdf28fee086dc1648ef",
"index": 6418,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'\n r = isValid(s)\n print(r)\n",
"step-4": "from collections import Counter\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'\n r = isValid(s)\n print(r)\n",
"step-5": "from collections import Counter\n\n\n# Complete the isValid function below.\ndef isValid(s):\n if not s:\n return True\n\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = \"ibfdgaeadiaefgbhbdghhhbgdfgeiccbi\"\n r = isValid(s)\n print(r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
opsi-utils
Test utilities
"""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Generator
@contextmanager
def temp_context() -> Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:
os.chdir(tempdir)
yield origin # return original path
finally:
os.chdir(origin)
|
normal
|
{
"blob_id": "3c2a611fd001f145703853f5ecfe70d0e93844e4",
"index": 4665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-3": "<mask token>\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-4": "\"\"\"\nopsi-utils\n\nTest utilities\n\"\"\"\n\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() -> Generator[Path, None, None]:\n\torigin = Path().absolute()\n\ttry:\n\t\twith tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:\n\t\t\tos.chdir(tempdir)\n\t\t\tyield origin # return original path\n\tfinally:\n\t\tos.chdir(origin)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from Logic.ProperLogic.helper_classes.reducer import MaxReducer
from Logic.ProperLogic.misc_helpers import log_error
import torch
from itertools import count
import logging
logging.basicConfig(level=logging.INFO)
class Cluster:
metric = 2
def __init__(self, cluster_id, embeddings=None, embeddings_ids=None, label=None, center_point=None):
"""
embeddings must be (flat) iterable of embeddings with len applicable
:param embeddings:
:param embeddings_ids:
"""
if label is None:
label = 'Unknown Person'
self.label = label
self.max_id_reducer = MaxReducer()
if embeddings is None:
self.embeddings_dict = dict()
self.num_embeddings = 0
self.center_point = None
self.max_embedding_id = 0
self.max_id_reducer(self.max_embedding_id)
else:
if embeddings_ids is None:
embeddings_ids = count(1)
# cast embeddings to dict
self.embeddings_dict = dict(zip(embeddings_ids, embeddings))
self.num_embeddings = len(self.embeddings_dict)
if center_point is not None:
self.center_point = center_point
else:
self.center_point = self.sum_embeddings(embeddings) / self.num_embeddings
self.max_id_reducer.process_iterable(self.embeddings_dict.keys())
self.max_embedding_id = self.max_id_reducer.get_state()
self.cluster_id = cluster_id
def __len__(self):
return len(self.embeddings_dict)
def set_label(self, label):
self.label = label
def set_cluster_id(self, cluster_id):
self.cluster_id = cluster_id
@classmethod
def set_metric(cls, metric):
cls.metric = metric
def get_embeddings(self, with_embeddings_ids=False, as_dict=False, as_list=False):
if with_embeddings_ids or as_dict:
if as_dict:
return self.embeddings_dict
return self.embeddings_dict.items()
embeddings = self.embeddings_dict.values()
if as_list:
return list(embeddings)
return embeddings
def get_embeddings_ids(self):
return self.embeddings_dict.keys()
def get_size(self):
return len(self.embeddings_dict)
def add_embedding(self, new_embedding, new_embedding_id=None, overwrite=False):
return self.add_embeddings([new_embedding], [new_embedding_id], overwrite)
def add_embeddings(self, new_embeddings, new_embeddings_ids=None, overwrite=False):
if not new_embeddings:
return
if new_embeddings_ids is None:
next_embedding_id = self.max_embedding_id + 1
new_embeddings_ids = count(start=next_embedding_id)
new_embeddings_dict = dict(zip(new_embeddings_ids, new_embeddings))
if overwrite:
self.embeddings_dict.update(new_embeddings_dict)
else:
new_embeddings_dict.update(self.embeddings_dict)
self.embeddings_dict = new_embeddings_dict
old_num_embeddings = self.num_embeddings
self.num_embeddings = len(self.embeddings_dict)
embeddings = self.get_embeddings(as_list=True)
embeddings_sum = self.sum_embeddings(embeddings)
# TODO: Check the math!!!
if self.center_point is not None:
self.center_point = (old_num_embeddings * self.center_point + embeddings_sum) / self.num_embeddings
else:
self.center_point = embeddings_sum / self.num_embeddings
def remove_embedding_by_id(self, embedding_id):
try:
embedding = self.embeddings_dict.pop(embedding_id)
except KeyError:
log_error(f'embedding with id {embedding_id} not found.')
return
old_num_embeddings = self.num_embeddings
self.num_embeddings -= 1
# TODO: Check the math!!!
# (old_center is a uniformly weighted sum of the old embeddings)
try:
self.center_point = (old_num_embeddings * self.center_point - embedding) / self.num_embeddings
except ZeroDivisionError: # num_embeddings is 0
self.center_point = None
def get_center_point(self):
return self.center_point
def get_embedding(self, embedding_id):
return self.embeddings_dict[embedding_id]
def contains_embedding(self, embedding_id):
return self.embeddings_dict.get(embedding_id) is not None
def compute_dist_to_center(self, embedding):
return self.compute_dist(self.center_point, embedding)
@classmethod
def compute_dist(cls, embedding1, embedding2, metric=None):
if metric is None:
metric = cls.metric
return float(torch.dist(embedding1, embedding2, p=metric))
@staticmethod
def sum_embeddings(embeddings):
# return reduce(torch.add, embeddings)
return torch.sum(torch.stack(embeddings), dim=0)
|
normal
|
{
"blob_id": "265c594b12ea45a2dda12e1157e5ea040f4d6ce4",
"index": 9021,
"step-1": "<mask token>\n\n\nclass Cluster:\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.embeddings_dict)\n\n def set_label(self, label):\n self.label = label\n <mask token>\n <mask token>\n <mask token>\n\n def get_embeddings_ids(self):\n return self.embeddings_dict.keys()\n <mask token>\n\n def add_embedding(self, new_embedding, new_embedding_id=None, overwrite\n =False):\n return self.add_embeddings([new_embedding], [new_embedding_id],\n overwrite)\n <mask token>\n <mask token>\n\n def get_center_point(self):\n return self.center_point\n <mask token>\n\n def contains_embedding(self, embedding_id):\n return self.embeddings_dict.get(embedding_id) is not None\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Cluster:\n <mask token>\n\n def __init__(self, cluster_id, embeddings=None, embeddings_ids=None,\n label=None, center_point=None):\n \"\"\"\n embeddings must be (flat) iterable of embeddings with len applicable\n :param embeddings:\n :param embeddings_ids:\n \"\"\"\n if label is None:\n label = 'Unknown Person'\n self.label = label\n self.max_id_reducer = MaxReducer()\n if embeddings is None:\n self.embeddings_dict = dict()\n self.num_embeddings = 0\n self.center_point = None\n self.max_embedding_id = 0\n self.max_id_reducer(self.max_embedding_id)\n else:\n if embeddings_ids is None:\n embeddings_ids = count(1)\n self.embeddings_dict = dict(zip(embeddings_ids, embeddings))\n self.num_embeddings = len(self.embeddings_dict)\n if center_point is not None:\n self.center_point = center_point\n else:\n self.center_point = self.sum_embeddings(embeddings\n ) / self.num_embeddings\n self.max_id_reducer.process_iterable(self.embeddings_dict.keys())\n self.max_embedding_id = self.max_id_reducer.get_state()\n self.cluster_id = cluster_id\n\n def __len__(self):\n return len(self.embeddings_dict)\n\n def set_label(self, label):\n self.label = label\n\n def set_cluster_id(self, cluster_id):\n self.cluster_id = cluster_id\n\n @classmethod\n def set_metric(cls, metric):\n cls.metric = metric\n <mask token>\n\n def get_embeddings_ids(self):\n return self.embeddings_dict.keys()\n\n def get_size(self):\n return len(self.embeddings_dict)\n\n def add_embedding(self, new_embedding, new_embedding_id=None, overwrite\n =False):\n return self.add_embeddings([new_embedding], [new_embedding_id],\n overwrite)\n\n def add_embeddings(self, new_embeddings, new_embeddings_ids=None,\n overwrite=False):\n if not new_embeddings:\n return\n if new_embeddings_ids is None:\n next_embedding_id = self.max_embedding_id + 1\n new_embeddings_ids = count(start=next_embedding_id)\n new_embeddings_dict = dict(zip(new_embeddings_ids, new_embeddings))\n if overwrite:\n self.embeddings_dict.update(new_embeddings_dict)\n else:\n new_embeddings_dict.update(self.embeddings_dict)\n self.embeddings_dict = new_embeddings_dict\n old_num_embeddings = self.num_embeddings\n self.num_embeddings = len(self.embeddings_dict)\n embeddings = self.get_embeddings(as_list=True)\n embeddings_sum = self.sum_embeddings(embeddings)\n if self.center_point is not None:\n self.center_point = (old_num_embeddings * self.center_point +\n embeddings_sum) / self.num_embeddings\n else:\n self.center_point = embeddings_sum / self.num_embeddings\n\n def remove_embedding_by_id(self, embedding_id):\n try:\n embedding = self.embeddings_dict.pop(embedding_id)\n except KeyError:\n log_error(f'embedding with id {embedding_id} not found.')\n return\n old_num_embeddings = self.num_embeddings\n self.num_embeddings -= 1\n try:\n self.center_point = (old_num_embeddings * self.center_point -\n embedding) / self.num_embeddings\n except ZeroDivisionError:\n self.center_point = None\n\n def get_center_point(self):\n return self.center_point\n\n def get_embedding(self, embedding_id):\n return self.embeddings_dict[embedding_id]\n\n def contains_embedding(self, embedding_id):\n return self.embeddings_dict.get(embedding_id) is not None\n\n def compute_dist_to_center(self, embedding):\n return self.compute_dist(self.center_point, embedding)\n\n @classmethod\n def compute_dist(cls, embedding1, embedding2, metric=None):\n if metric is None:\n metric = cls.metric\n return float(torch.dist(embedding1, embedding2, p=metric))\n\n @staticmethod\n def sum_embeddings(embeddings):\n return torch.sum(torch.stack(embeddings), dim=0)\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Cluster:\n metric = 2\n\n def __init__(self, cluster_id, embeddings=None, embeddings_ids=None,\n label=None, center_point=None):\n \"\"\"\n embeddings must be (flat) iterable of embeddings with len applicable\n :param embeddings:\n :param embeddings_ids:\n \"\"\"\n if label is None:\n label = 'Unknown Person'\n self.label = label\n self.max_id_reducer = MaxReducer()\n if embeddings is None:\n self.embeddings_dict = dict()\n self.num_embeddings = 0\n self.center_point = None\n self.max_embedding_id = 0\n self.max_id_reducer(self.max_embedding_id)\n else:\n if embeddings_ids is None:\n embeddings_ids = count(1)\n self.embeddings_dict = dict(zip(embeddings_ids, embeddings))\n self.num_embeddings = len(self.embeddings_dict)\n if center_point is not None:\n self.center_point = center_point\n else:\n self.center_point = self.sum_embeddings(embeddings\n ) / self.num_embeddings\n self.max_id_reducer.process_iterable(self.embeddings_dict.keys())\n self.max_embedding_id = self.max_id_reducer.get_state()\n self.cluster_id = cluster_id\n\n def __len__(self):\n return len(self.embeddings_dict)\n\n def set_label(self, label):\n self.label = label\n\n def set_cluster_id(self, cluster_id):\n self.cluster_id = cluster_id\n\n @classmethod\n def set_metric(cls, metric):\n cls.metric = metric\n\n def get_embeddings(self, with_embeddings_ids=False, as_dict=False,\n as_list=False):\n if with_embeddings_ids or as_dict:\n if as_dict:\n return self.embeddings_dict\n return self.embeddings_dict.items()\n embeddings = self.embeddings_dict.values()\n if as_list:\n return list(embeddings)\n return embeddings\n\n def get_embeddings_ids(self):\n return self.embeddings_dict.keys()\n\n def get_size(self):\n return len(self.embeddings_dict)\n\n def add_embedding(self, new_embedding, new_embedding_id=None, overwrite\n =False):\n return self.add_embeddings([new_embedding], [new_embedding_id],\n overwrite)\n\n def add_embeddings(self, new_embeddings, new_embeddings_ids=None,\n overwrite=False):\n if not new_embeddings:\n return\n if new_embeddings_ids is None:\n next_embedding_id = self.max_embedding_id + 1\n new_embeddings_ids = count(start=next_embedding_id)\n new_embeddings_dict = dict(zip(new_embeddings_ids, new_embeddings))\n if overwrite:\n self.embeddings_dict.update(new_embeddings_dict)\n else:\n new_embeddings_dict.update(self.embeddings_dict)\n self.embeddings_dict = new_embeddings_dict\n old_num_embeddings = self.num_embeddings\n self.num_embeddings = len(self.embeddings_dict)\n embeddings = self.get_embeddings(as_list=True)\n embeddings_sum = self.sum_embeddings(embeddings)\n if self.center_point is not None:\n self.center_point = (old_num_embeddings * self.center_point +\n embeddings_sum) / self.num_embeddings\n else:\n self.center_point = embeddings_sum / self.num_embeddings\n\n def remove_embedding_by_id(self, embedding_id):\n try:\n embedding = self.embeddings_dict.pop(embedding_id)\n except KeyError:\n log_error(f'embedding with id {embedding_id} not found.')\n return\n old_num_embeddings = self.num_embeddings\n self.num_embeddings -= 1\n try:\n self.center_point = (old_num_embeddings * self.center_point -\n embedding) / self.num_embeddings\n except ZeroDivisionError:\n self.center_point = None\n\n def get_center_point(self):\n return self.center_point\n\n def get_embedding(self, embedding_id):\n return self.embeddings_dict[embedding_id]\n\n def contains_embedding(self, embedding_id):\n return self.embeddings_dict.get(embedding_id) is not None\n\n def compute_dist_to_center(self, embedding):\n return self.compute_dist(self.center_point, embedding)\n\n @classmethod\n def compute_dist(cls, embedding1, embedding2, metric=None):\n if metric is None:\n metric = cls.metric\n return float(torch.dist(embedding1, embedding2, p=metric))\n\n @staticmethod\n def sum_embeddings(embeddings):\n return torch.sum(torch.stack(embeddings), dim=0)\n",
"step-4": "from Logic.ProperLogic.helper_classes.reducer import MaxReducer\nfrom Logic.ProperLogic.misc_helpers import log_error\nimport torch\nfrom itertools import count\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Cluster:\n metric = 2\n\n def __init__(self, cluster_id, embeddings=None, embeddings_ids=None,\n label=None, center_point=None):\n \"\"\"\n embeddings must be (flat) iterable of embeddings with len applicable\n :param embeddings:\n :param embeddings_ids:\n \"\"\"\n if label is None:\n label = 'Unknown Person'\n self.label = label\n self.max_id_reducer = MaxReducer()\n if embeddings is None:\n self.embeddings_dict = dict()\n self.num_embeddings = 0\n self.center_point = None\n self.max_embedding_id = 0\n self.max_id_reducer(self.max_embedding_id)\n else:\n if embeddings_ids is None:\n embeddings_ids = count(1)\n self.embeddings_dict = dict(zip(embeddings_ids, embeddings))\n self.num_embeddings = len(self.embeddings_dict)\n if center_point is not None:\n self.center_point = center_point\n else:\n self.center_point = self.sum_embeddings(embeddings\n ) / self.num_embeddings\n self.max_id_reducer.process_iterable(self.embeddings_dict.keys())\n self.max_embedding_id = self.max_id_reducer.get_state()\n self.cluster_id = cluster_id\n\n def __len__(self):\n return len(self.embeddings_dict)\n\n def set_label(self, label):\n self.label = label\n\n def set_cluster_id(self, cluster_id):\n self.cluster_id = cluster_id\n\n @classmethod\n def set_metric(cls, metric):\n cls.metric = metric\n\n def get_embeddings(self, with_embeddings_ids=False, as_dict=False,\n as_list=False):\n if with_embeddings_ids or as_dict:\n if as_dict:\n return self.embeddings_dict\n return self.embeddings_dict.items()\n embeddings = self.embeddings_dict.values()\n if as_list:\n return list(embeddings)\n return embeddings\n\n def get_embeddings_ids(self):\n return self.embeddings_dict.keys()\n\n def get_size(self):\n return len(self.embeddings_dict)\n\n def add_embedding(self, new_embedding, new_embedding_id=None, overwrite\n =False):\n return self.add_embeddings([new_embedding], [new_embedding_id],\n overwrite)\n\n def add_embeddings(self, new_embeddings, new_embeddings_ids=None,\n overwrite=False):\n if not new_embeddings:\n return\n if new_embeddings_ids is None:\n next_embedding_id = self.max_embedding_id + 1\n new_embeddings_ids = count(start=next_embedding_id)\n new_embeddings_dict = dict(zip(new_embeddings_ids, new_embeddings))\n if overwrite:\n self.embeddings_dict.update(new_embeddings_dict)\n else:\n new_embeddings_dict.update(self.embeddings_dict)\n self.embeddings_dict = new_embeddings_dict\n old_num_embeddings = self.num_embeddings\n self.num_embeddings = len(self.embeddings_dict)\n embeddings = self.get_embeddings(as_list=True)\n embeddings_sum = self.sum_embeddings(embeddings)\n if self.center_point is not None:\n self.center_point = (old_num_embeddings * self.center_point +\n embeddings_sum) / self.num_embeddings\n else:\n self.center_point = embeddings_sum / self.num_embeddings\n\n def remove_embedding_by_id(self, embedding_id):\n try:\n embedding = self.embeddings_dict.pop(embedding_id)\n except KeyError:\n log_error(f'embedding with id {embedding_id} not found.')\n return\n old_num_embeddings = self.num_embeddings\n self.num_embeddings -= 1\n try:\n self.center_point = (old_num_embeddings * self.center_point -\n embedding) / self.num_embeddings\n except ZeroDivisionError:\n self.center_point = None\n\n def get_center_point(self):\n return self.center_point\n\n def get_embedding(self, embedding_id):\n return self.embeddings_dict[embedding_id]\n\n def contains_embedding(self, embedding_id):\n return self.embeddings_dict.get(embedding_id) is not None\n\n def compute_dist_to_center(self, embedding):\n return self.compute_dist(self.center_point, embedding)\n\n @classmethod\n def compute_dist(cls, embedding1, embedding2, metric=None):\n if metric is None:\n metric = cls.metric\n return float(torch.dist(embedding1, embedding2, p=metric))\n\n @staticmethod\n def sum_embeddings(embeddings):\n return torch.sum(torch.stack(embeddings), dim=0)\n",
"step-5": "from Logic.ProperLogic.helper_classes.reducer import MaxReducer\nfrom Logic.ProperLogic.misc_helpers import log_error\nimport torch\n\nfrom itertools import count\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Cluster:\n metric = 2\n\n def __init__(self, cluster_id, embeddings=None, embeddings_ids=None, label=None, center_point=None):\n \"\"\"\n embeddings must be (flat) iterable of embeddings with len applicable\n :param embeddings:\n :param embeddings_ids:\n \"\"\"\n if label is None:\n label = 'Unknown Person'\n self.label = label\n self.max_id_reducer = MaxReducer()\n if embeddings is None:\n self.embeddings_dict = dict()\n self.num_embeddings = 0\n self.center_point = None\n self.max_embedding_id = 0\n self.max_id_reducer(self.max_embedding_id)\n else:\n if embeddings_ids is None:\n embeddings_ids = count(1)\n # cast embeddings to dict\n self.embeddings_dict = dict(zip(embeddings_ids, embeddings))\n self.num_embeddings = len(self.embeddings_dict)\n if center_point is not None:\n self.center_point = center_point\n else:\n self.center_point = self.sum_embeddings(embeddings) / self.num_embeddings\n self.max_id_reducer.process_iterable(self.embeddings_dict.keys())\n self.max_embedding_id = self.max_id_reducer.get_state()\n\n self.cluster_id = cluster_id\n\n def __len__(self):\n return len(self.embeddings_dict)\n\n def set_label(self, label):\n self.label = label\n\n def set_cluster_id(self, cluster_id):\n self.cluster_id = cluster_id\n\n @classmethod\n def set_metric(cls, metric):\n cls.metric = metric\n\n def get_embeddings(self, with_embeddings_ids=False, as_dict=False, as_list=False):\n if with_embeddings_ids or as_dict:\n if as_dict:\n return self.embeddings_dict\n return self.embeddings_dict.items()\n\n embeddings = self.embeddings_dict.values()\n if as_list:\n return list(embeddings)\n return embeddings\n\n def get_embeddings_ids(self):\n return self.embeddings_dict.keys()\n\n def get_size(self):\n return len(self.embeddings_dict)\n\n def add_embedding(self, new_embedding, new_embedding_id=None, overwrite=False):\n return self.add_embeddings([new_embedding], [new_embedding_id], overwrite)\n\n def add_embeddings(self, new_embeddings, new_embeddings_ids=None, overwrite=False):\n if not new_embeddings:\n return\n\n if new_embeddings_ids is None:\n next_embedding_id = self.max_embedding_id + 1\n new_embeddings_ids = count(start=next_embedding_id)\n\n new_embeddings_dict = dict(zip(new_embeddings_ids, new_embeddings))\n if overwrite:\n self.embeddings_dict.update(new_embeddings_dict)\n else:\n new_embeddings_dict.update(self.embeddings_dict)\n self.embeddings_dict = new_embeddings_dict\n\n old_num_embeddings = self.num_embeddings\n self.num_embeddings = len(self.embeddings_dict)\n embeddings = self.get_embeddings(as_list=True)\n embeddings_sum = self.sum_embeddings(embeddings)\n\n # TODO: Check the math!!!\n if self.center_point is not None:\n self.center_point = (old_num_embeddings * self.center_point + embeddings_sum) / self.num_embeddings\n else:\n self.center_point = embeddings_sum / self.num_embeddings\n\n def remove_embedding_by_id(self, embedding_id):\n try:\n embedding = self.embeddings_dict.pop(embedding_id)\n except KeyError:\n log_error(f'embedding with id {embedding_id} not found.')\n return\n\n old_num_embeddings = self.num_embeddings\n self.num_embeddings -= 1\n\n # TODO: Check the math!!!\n\n # (old_center is a uniformly weighted sum of the old embeddings)\n try:\n self.center_point = (old_num_embeddings * self.center_point - embedding) / self.num_embeddings\n except ZeroDivisionError: # num_embeddings is 0\n self.center_point = None\n\n def get_center_point(self):\n return self.center_point\n\n def get_embedding(self, embedding_id):\n return self.embeddings_dict[embedding_id]\n\n def contains_embedding(self, embedding_id):\n return self.embeddings_dict.get(embedding_id) is not None\n\n def compute_dist_to_center(self, embedding):\n return self.compute_dist(self.center_point, embedding)\n\n @classmethod\n def compute_dist(cls, embedding1, embedding2, metric=None):\n if metric is None:\n metric = cls.metric\n return float(torch.dist(embedding1, embedding2, p=metric))\n\n @staticmethod\n def sum_embeddings(embeddings):\n # return reduce(torch.add, embeddings)\n return torch.sum(torch.stack(embeddings), dim=0)\n",
"step-ids": [
7,
17,
20,
21,
22
]
}
|
[
7,
17,
20,
21,
22
] |
#!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import unittest
# Allow interactive execution from CLI, cd tests; ./test_cli.py
if __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ksconf.conf.parser import PARSECONF_LOOSE, parse_conf
from ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS
from tests.cli_helper import TestWorkDir, ksconf_cli
class CliKsconfCombineTestCase(unittest.TestCase):
def build_test01(self, twd):
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf", r"""
[aws:config]
SHOULD_LINEMERGE = false
TRUNCATE = 8388608
TIME_PREFIX = configurationItemCaptureTime"\s*:\s*"
TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ
TZ = GMT
MAX_TIMESTAMP_LOOKAHEAD = 28
KV_MODE = json
ANNOTATE_PUNCT = false
FIELDALIAS-dest = resourceType AS dest
FIELDALIAS-object = resourceId AS object
FIELDALIAS-object_id = ARN AS object_id
EVAL-change_type = "configuration"
EVAL-dvc = "AWS Config"
EVAL-status="success"
LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action
LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category
# unify account ID field
FIELDALIAS-aws-account-id = awsAccountId as aws_account_id
FIELDALIAS-region-for-aws-config = awsRegion AS region
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
<view name="search" default="false" label="Search" />
</nav>
""")
# In the future there will be a more efficient way to handle the global 'ANNOTATE_PUCT' scenario
twd.write_file("etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf", """
[aws:config]
TZ = UTC
# Corp want's punct to be enabled globally
ANNOTATE_PUNCT = true
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf", """
[aws:config]
# Our config is bigger than yours!
TRUNCATE = 9999999
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf", """
[aws_sns_modular_alert]
is_custom = 1
label = AWS SNS Alert
description = Publish search result to AWS SNS
payload_format = json
icon_path = appIcon.png
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf", """
[aws_sns_modular_alert]
param.account = DeptAwsAccount
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="My custom view" />
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
<view name="search" default="false" label="Search" />
</nav>
""")
def test_combine_3dir(self):
# Note that this test tests the old shool version of '*.d' processing. But we must preserve this behavior.
# Be aware that we pass in 'default.d/*' as a string, and expand the glob vs allowing the shell to handle this
# and this is _normal_ behavior when dealing with Windows.
twd = TestWorkDir()
self.build_test01(twd)
default = twd.get_path("etc/apps/Splunk_TA_aws/default")
with ksconf_cli:
ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*")
# Q: Why do we run this once, but not check anything about it? (To ensure dry-run has no side effects?)
ko = ksconf_cli("combine", "--target", default, default + ".d/*")
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(twd.get_path("etc/apps/Splunk_TA_aws/default/props.conf"))
self.assertIn("aws:config", cfg)
self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true")
self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"')
self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999')
nav_content = twd.read_file("etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml")
self.assertIn("My custom view", nav_content)
twd.write_conf("etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf", {
"aws:config": {"TIME_FORMAT": "%Y-%m-%dT%H:%M:%S.%6NZ"}
})
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="My custom view" />
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
</nav>
""")
twd.write_file("etc/apps/Splunk_TA_aws/default/data/dead.conf", "# File to remove")
twd.write_file("etc/apps/Splunk_TA_aws/default/data/tags.conf", "# Locally created file")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt", "SAME TEXT")
twd.write_file("etc/apps/Splunk_TA_aws/default/same.txt", "SAME TEXT")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin", b"#BINARY \xff \x00")
twd.write_file("etc/apps/Splunk_TA_aws/default/binary.bin", b"#BINARY NEW \x00 \xff \xFB")
with ksconf_cli:
ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*")
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
self.assertRegex(ko.stdout, r'[\r\n][-]\s*<view name="search"')
self.assertRegex(ko.stdout, r'[\r\n][-] ?[\r\n]') # Remove empty lines from nav
self.assertRegex(ko.stdout, r"[\r\n][+]TIME_FORMAT = [^\r\n]+%6N")
with ksconf_cli:
ko = ksconf_cli("combine", "--target", default, default + ".d/*")
def test_sort_order(self):
"Confirm that single input files are copied as-is"
twd = TestWorkDir()
default = twd.get_path("input")
target = twd.get_path("output")
unique_conf = [
"z = 1",
" b=? ",
"a = 9"]
twd.write_file("input/unique.conf",
"\n".join(unique_conf))
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "disable", "--banner", "",
"--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
data = twd.read_file("output/unique.conf").splitlines()
self.assertListEqual(unique_conf, data)
def test_combine_dird(self):
twd = TestWorkDir()
self.build_test01(twd)
default = twd.get_path("etc/apps/Splunk_TA_aws")
target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT")
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--dry-run", "--target", target, default)
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(target + "/default/props.conf")
self.assertIn("aws:config", cfg)
self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true")
self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"')
self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999')
nav_content = twd.read_file("etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml")
self.assertIn("My custom view", nav_content)
alert_action = twd.read_conf("etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf")
self.assertIn("aws_sns_modular_alert", alert_action)
self.assertEqual(alert_action["aws_sns_modular_alert"]["param.account"], "DeptAwsAccount") # layer 10
self.assertEqual(alert_action["aws_sns_modular_alert"]["label"], "AWS SNS Alert") # layer 60
def test_keep_existing_ds_local_app(self):
twd = TestWorkDir()
src = twd.get_path("repo/apps/Splunk_TA_nix")
target = twd.get_path("etc/deployment-apps/Splunk_TA_nix")
twd.write_file("repo/apps/Splunk_TA_nix/default/app.conf", r"""
[install]
allows_disable = false
is_configured = true
state = enabled
[launcher]
author = Splunk
description = The app is Splunk
version = 7.0.0
""")
# Make partent diretories
os.makedirs(twd.get_path("etc/deployment-apps"))
# First run (creates maker file)
with ksconf_cli:
ko = ksconf_cli("combine", "--keep-existing", "local/app.conf",
"--target", target, src)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
# Local folder hasn't been created yet
self.assertFalse(os.path.isdir(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local")))
# Simulate a 'splunk reload deploy-server'
twd.write_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf", "# Autogenerated file")
with ksconf_cli:
ko = ksconf_cli("combine", "--keep-existing", "local/app.conf",
"--target", target, src)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(os.path.join(target, "default/app.conf"))
self.assertIn("install", cfg)
self.assertEqual(cfg["launcher"]["version"], "7.0.0")
self.assertEqual(twd.read_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf"),
"# Autogenerated file")
# This time the file will be removed
ko = ksconf_cli("combine", "--target", target, src)
self.assertFalse(os.path.isfile(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local/app.conf")),
"local/app.conf should have been removed.")
def test_combine_conf_spec(self):
twd = TestWorkDir()
self.build_test01(twd)
twd.write_file("etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec", r"""
[<stanza_type1>]
important_field = <str>
* Some notes about the important field.
* Required!
disabled = <bool>
""")
twd.write_file("etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec", r"""
[bookmark::<prefixed_stanza_type>]
resource = <url>
category = <str>
* Label for organization
disabled = <bool>
""")
default = twd.get_path("etc/apps/Splunk_TA_aws")
target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT")
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
spec_file = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec")
spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)
self.assertIn("bookmark::<prefixed_stanza_type>", spec)
self.assertIn("<stanza_type1>", spec)
def test_require_arg(self):
with ksconf_cli:
ko = ksconf_cli("combine", "source-dir")
self.assertRegex(ko.stderr, "Must provide [^\r\n]+--target")
def test_missing_marker(self):
twd = TestWorkDir()
twd.write_file("source-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n")
twd.write_file("dest-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n")
ko = ksconf_cli("combine", twd.get_path("source-dir"), "--target", twd.get_path("dest-dir"))
self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)
self.assertRegex(ko.stderr, r".*Marker file missing\b.*")
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
normal
|
{
"blob_id": "1bb953b665f48638691986e2fcae73b10a1c2ce0",
"index": 7729,
"step-1": "<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n <mask token>\n <mask token>\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n <mask token>\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\n<mask token>\n",
"step-3": "<mask token>\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n\n def test_combine_3dir(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws/default')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\n 'etc/apps/Splunk_TA_aws/default/props.conf'))\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml')\n self.assertIn('My custom view', nav_content)\n twd.write_conf(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf', {\n 'aws:config': {'TIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%6NZ'}})\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/dead.conf',\n '# File to remove')\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/tags.conf',\n '# Locally created file')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt',\n 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default/same.txt', 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin',\n b'#BINARY \\xff \\x00')\n twd.write_file('etc/apps/Splunk_TA_aws/default/binary.bin',\n b'#BINARY NEW \\x00 \\xff \\xfb')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-]\\\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-] ?[\\\\r\\\\n]')\n self.assertRegex(ko.stdout,\n '[\\\\r\\\\n][+]TIME_FORMAT = [^\\\\r\\\\n]+%6N')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from __future__ import absolute_import, print_function, unicode_literals\nimport os\nimport sys\nimport unittest\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\nfrom ksconf.conf.parser import PARSECONF_LOOSE, parse_conf\nfrom ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS\nfrom tests.cli_helper import TestWorkDir, ksconf_cli\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n\n def test_combine_3dir(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws/default')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\n 'etc/apps/Splunk_TA_aws/default/props.conf'))\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml')\n self.assertIn('My custom view', nav_content)\n twd.write_conf(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf', {\n 'aws:config': {'TIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%6NZ'}})\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/dead.conf',\n '# File to remove')\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/tags.conf',\n '# Locally created file')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt',\n 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default/same.txt', 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin',\n b'#BINARY \\xff \\x00')\n twd.write_file('etc/apps/Splunk_TA_aws/default/binary.bin',\n b'#BINARY NEW \\x00 \\xff \\xfb')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-]\\\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-] ?[\\\\r\\\\n]')\n self.assertRegex(ko.stdout,\n '[\\\\r\\\\n][+]TIME_FORMAT = [^\\\\r\\\\n]+%6N')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nimport unittest\n\n# Allow interactive execution from CLI, cd tests; ./test_cli.py\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom ksconf.conf.parser import PARSECONF_LOOSE, parse_conf\nfrom ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS\nfrom tests.cli_helper import TestWorkDir, ksconf_cli\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf\", r\"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\s*:\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\")\n # In the future there will be a more efficient way to handle the global 'ANNOTATE_PUCT' scenario\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf\", \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf\", \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf\", \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf\", \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\")\n\n def test_combine_3dir(self):\n # Note that this test tests the old shool version of '*.d' processing. But we must preserve this behavior.\n # Be aware that we pass in 'default.d/*' as a string, and expand the glob vs allowing the shell to handle this\n # and this is _normal_ behavior when dealing with Windows.\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path(\"etc/apps/Splunk_TA_aws/default\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--dry-run\", \"--target\", default, default + \".d/*\")\n # Q: Why do we run this once, but not check anything about it? (To ensure dry-run has no side effects?)\n ko = ksconf_cli(\"combine\", \"--target\", default, default + \".d/*\")\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\"etc/apps/Splunk_TA_aws/default/props.conf\"))\n self.assertIn(\"aws:config\", cfg)\n self.assertEqual(cfg[\"aws:config\"][\"ANNOTATE_PUNCT\"], \"true\")\n self.assertEqual(cfg[\"aws:config\"][\"EVAL-change_type\"], '\"configuration\"')\n self.assertEqual(cfg[\"aws:config\"][\"TRUNCATE\"], '9999999')\n nav_content = twd.read_file(\"etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml\")\n self.assertIn(\"My custom view\", nav_content)\n\n twd.write_conf(\"etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf\", {\n \"aws:config\": {\"TIME_FORMAT\": \"%Y-%m-%dT%H:%M:%S.%6NZ\"}\n })\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/data/dead.conf\", \"# File to remove\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/data/tags.conf\", \"# Locally created file\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt\", \"SAME TEXT\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/same.txt\", \"SAME TEXT\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin\", b\"#BINARY \\xff \\x00\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/binary.bin\", b\"#BINARY NEW \\x00 \\xff \\xFB\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--dry-run\", \"--target\", default, default + \".d/*\")\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, r'[\\r\\n][-]\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, r'[\\r\\n][-] ?[\\r\\n]') # Remove empty lines from nav\n self.assertRegex(ko.stdout, r\"[\\r\\n][+]TIME_FORMAT = [^\\r\\n]+%6N\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--target\", default, default + \".d/*\")\n\n def test_sort_order(self):\n \"Confirm that single input files are copied as-is\"\n twd = TestWorkDir()\n default = twd.get_path(\"input\")\n target = twd.get_path(\"output\")\n unique_conf = [\n \"z = 1\",\n \" b=? \",\n \"a = 9\"]\n twd.write_file(\"input/unique.conf\",\n \"\\n\".join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"disable\", \"--banner\", \"\",\n \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file(\"output/unique.conf\").splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path(\"etc/apps/Splunk_TA_aws\")\n target = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--dry-run\", \"--target\", target, default)\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + \"/default/props.conf\")\n self.assertIn(\"aws:config\", cfg)\n self.assertEqual(cfg[\"aws:config\"][\"ANNOTATE_PUNCT\"], \"true\")\n self.assertEqual(cfg[\"aws:config\"][\"EVAL-change_type\"], '\"configuration\"')\n self.assertEqual(cfg[\"aws:config\"][\"TRUNCATE\"], '9999999')\n nav_content = twd.read_file(\"etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml\")\n self.assertIn(\"My custom view\", nav_content)\n\n alert_action = twd.read_conf(\"etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf\")\n self.assertIn(\"aws_sns_modular_alert\", alert_action)\n self.assertEqual(alert_action[\"aws_sns_modular_alert\"][\"param.account\"], \"DeptAwsAccount\") # layer 10\n self.assertEqual(alert_action[\"aws_sns_modular_alert\"][\"label\"], \"AWS SNS Alert\") # layer 60\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path(\"repo/apps/Splunk_TA_nix\")\n target = twd.get_path(\"etc/deployment-apps/Splunk_TA_nix\")\n\n twd.write_file(\"repo/apps/Splunk_TA_nix/default/app.conf\", r\"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\")\n # Make partent diretories\n os.makedirs(twd.get_path(\"etc/deployment-apps\"))\n\n # First run (creates maker file)\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--keep-existing\", \"local/app.conf\",\n \"--target\", target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n # Local folder hasn't been created yet\n self.assertFalse(os.path.isdir(twd.get_path(\"etc/deployment-apps/Splunk_TA_nix/local\")))\n\n # Simulate a 'splunk reload deploy-server'\n twd.write_file(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\", \"# Autogenerated file\")\n\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--keep-existing\", \"local/app.conf\",\n \"--target\", target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, \"default/app.conf\"))\n self.assertIn(\"install\", cfg)\n self.assertEqual(cfg[\"launcher\"][\"version\"], \"7.0.0\")\n\n self.assertEqual(twd.read_file(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\"),\n \"# Autogenerated file\")\n\n # This time the file will be removed\n ko = ksconf_cli(\"combine\", \"--target\", target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\")),\n \"local/app.conf should have been removed.\")\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec\", r\"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec\", r\"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\")\n\n default = twd.get_path(\"etc/apps/Splunk_TA_aws\")\n target = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n\n spec_file = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec\")\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n\n self.assertIn(\"bookmark::<prefixed_stanza_type>\", spec)\n self.assertIn(\"<stanza_type1>\", spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"source-dir\")\n self.assertRegex(ko.stderr, \"Must provide [^\\r\\n]+--target\")\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file(\"source-dir/someapp/default/blah.conf\", \"[entry]\\nboring=yes\\n\")\n twd.write_file(\"dest-dir/someapp/default/blah.conf\", \"[entry]\\nboring=yes\\n\")\n\n ko = ksconf_cli(\"combine\", twd.get_path(\"source-dir\"), \"--target\", twd.get_path(\"dest-dir\"))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, r\".*Marker file missing\\b.*\")\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
# SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from dataclasses import field
from beartype.typing import List, Optional
from spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties
from spdx_tools.common.typing.type_checks import check_types_and_set_values
from spdx_tools.spdx3.model import IntegrityMethod
@dataclass_with_properties
class ExternalMap:
external_id: str # anyURI
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None # anyURI
defining_document: Optional[str] = None
def __init__(
self,
external_id: str,
verified_using: List[IntegrityMethod] = None,
location_hint: Optional[str] = None,
defining_document: Optional[str] = None,
):
verified_using = [] if verified_using is None else verified_using
check_types_and_set_values(self, locals())
|
normal
|
{
"blob_id": "1c085ea8f9b21ea7bef94ad4ecbb1771a57f697a",
"index": 2208,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n <mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-4": "from dataclasses import field\nfrom beartype.typing import List, Optional\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-5": "# SPDX-FileCopyrightText: 2023 spdx contributors\n#\n# SPDX-License-Identifier: Apache-2.0\nfrom dataclasses import field\n\nfrom beartype.typing import List, Optional\n\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str # anyURI\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None # anyURI\n defining_document: Optional[str] = None\n\n def __init__(\n self,\n external_id: str,\n verified_using: List[IntegrityMethod] = None,\n location_hint: Optional[str] = None,\n defining_document: Optional[str] = None,\n ):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Created by MechAviv
# [Maestra Fiametta] | [9390220]
# Commerci Republic : San Commerci
if sm.hasItem(4310100, 1):
sm.setSpeakerID(9390220)
sm.sendSayOkay("You can't start your voyage until you finish the tutorial quest!")
else:
sm.setSpeakerID(9390220)
sm.sendNext("What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.")
sm.setSpeakerID(9390220)
sm.sendSay("Just remember, you can't trade without gold!")
sm.giveItem(4310100, 10)
sm.setSpeakerID(9390220)
sm.sendPrev("Check to make sure there you have coins in your inventory.")
|
normal
|
{
"blob_id": "c4b9fdba9e9eeccc52999dab9232302f159c882a",
"index": 588,
"step-1": "<mask token>\n",
"step-2": "if sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\n \"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\n 'What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.'\n )\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev('Check to make sure there you have coins in your inventory.')\n",
"step-3": "# Created by MechAviv\n# [Maestra Fiametta] | [9390220]\n# Commerci Republic : San Commerci\nif sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\"What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.\")\n\n\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n\n\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev(\"Check to make sure there you have coins in your inventory.\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import testTemplate
def getTests():
tests = []
suite=testTemplate.testSuite("Sample Test Cases")
testcase = testTemplate.testInstance("3\n1 1 1\n1 1 1\n1 1 1" , "6" , "Sample #1")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0" , "7588" , "Sample #2")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 " , "7426" , "Sample #3")
suite.add(testcase)
tests.append(suite)
return tests
|
normal
|
{
"blob_id": "de4c31ad474b7ce75631214aceafbe4d7334f14b",
"index": 6956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-3": "import testTemplate\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-4": "import testTemplate \ndef getTests():\n\ttests = []\n\t\n\tsuite=testTemplate.testSuite(\"Sample Test Cases\")\n\ttestcase = testTemplate.testInstance(\"3\\n1 1 1\\n1 1 1\\n1 1 1\" , \"6\" , \"Sample #1\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n1 0 0 1 0 0 0 0 0 1 1 \\n1 1 1 1 1 0 1 0 1 0 0 \\n1 0 0 1 0 0 1 1 0 1 0 \\n1 0 1 1 1 0 1 1 0 1 1 \\n0 1 1 1 0 1 0 0 1 1 1 \\n1 1 1 0 0 1 0 0 0 0 0 \\n0 0 0 0 1 0 1 0 0 0 1 \\n1 0 1 1 0 0 0 0 0 0 1 \\n0 0 1 0 1 1 0 0 0 1 1 \\n1 1 1 0 0 0 1 0 1 0 1 \\n1 0 0 0 1 1 1 1 0 0 0\" , \"7588\" , \"Sample #2\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n0 1 1 1 0 1 0 0 0 1 0 \\n0 0 1 1 1 1 1 1 1 1 1 \\n1 1 0 1 0 0 0 0 0 1 0 \\n0 1 0 1 0 1 0 1 0 1 1 \\n1 0 0 1 0 0 0 0 1 0 1 \\n0 0 1 0 1 1 0 0 0 0 1 \\n1 0 1 0 1 1 1 0 1 1 0 \\n1 0 1 1 0 1 1 0 0 1 0 \\n0 0 1 1 0 1 1 1 1 1 1 \\n0 1 0 0 0 0 0 0 0 1 1 \\n0 1 1 0 0 0 0 0 1 0 1 \" , \"7426\" , \"Sample #3\")\n\tsuite.add(testcase)\n\ttests.append(suite)\n\t\n\treturn tests\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Script start
print"This is the two number subtraction python program."
a = 9
b = 2
c = a - b
print c
# Scrip close
|
normal
|
{
"blob_id": "a045423edd94d985dfc9660bcfe4a88c61bf4574",
"index": 20,
"step-1": "#Script start\nprint\"This is the two number subtraction python program.\"\na = 9\nb = 2\nc = a - b\nprint c\n\n# Scrip close\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""------------------------------------------------------------------------
MODULE
FContactRegulatoryInfoBase -
DESCRIPTION:
This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods
VERSION: 1.0.25(0.25.7)
RESTRICTIONS/ LIMITATIONS:
1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.
2. This module is not customizable
3. The component may not work as expected with any modifications done to this module at user end
--------------------------------------------------------------------------"""
import string
import acm
import FIntegrationUtils
import FRegulatoryLogger
import ael
import FRegulatoryUtils
import FRegulatoryInfoException
logger = 'FContactRegulatoryInfoBase'
VALUE_NOT_SET = ()
class FContactRegulatoryInfoBase(object):
def __init__(self, contact = None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger, "The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object")
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e :
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = "The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo"%reg_date_of_birth
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name = VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name = VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id = VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id = VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id = VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = "The ExchangeId provided <%s> is not of the expected integer format"%str(exchange_id)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name = VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(unique_name)
except:
pass
else:
msg = "The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name."%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger, "<%s> is not a General Partner. Hence JointAccount is None"%self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is being set to <%s>."%(str(self.__is_general_partner)))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == "None":
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is None. Hence defaulting it to False")
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
else:
if hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)
self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, "ABORTING TRANSACTION***********")
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, "Contact")
FRegulatoryLogger.DEBUG(logger, "Deleted all AdditionalInfos on Contact related to Regulatory Reporting")
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
def RegulatoryInfo(self):
"""returns the FContactRegulatoryInfoBase instance for the given contact"""
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo
def Select(query):
"""Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query"""
party = None
if query.find('and party') != -1:#it means there is an additional condition added
pos = query.find('and party')
party_name = query[(pos + len('and party')):]
query = query[0:pos]
party_name = party_name.replace('=', '').replace("'", '')
party_name = party_name.strip()
party = acm.FParty[party_name]
return_result = FRegulatoryUtils.Select(query, "FContact", party)
return return_result
|
normal
|
{
"blob_id": "d4e62950f10efeb27d19c3d9c672969342ef8c7c",
"index": 3095,
"step-1": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n <mask token>\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n <mask token>\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:\n pos = query.find('and party')\n party_name = query[pos + len('and party'):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, 'FContact', party)\n return return_result\n",
"step-5": "\"\"\"------------------------------------------------------------------------\nMODULE\n FContactRegulatoryInfoBase -\nDESCRIPTION:\n This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods\nVERSION: 1.0.25(0.25.7)\nRESTRICTIONS/ LIMITATIONS:\n 1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.\n 2. This module is not customizable\n 3. The component may not work as expected with any modifications done to this module at user end\n--------------------------------------------------------------------------\"\"\"\nimport string\nimport acm\nimport FIntegrationUtils\nimport FRegulatoryLogger\nimport ael\nimport FRegulatoryUtils\nimport FRegulatoryInfoException\nlogger = 'FContactRegulatoryInfoBase'\nVALUE_NOT_SET = ()\n\nclass FContactRegulatoryInfoBase(object):\n def __init__(self, contact = None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger, \"The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object\")\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e :\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = \"The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo\"%reg_date_of_birth\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name = VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name = VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id = VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id = VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id = VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = \"The ExchangeId provided <%s> is not of the expected integer format\"%str(exchange_id)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name = VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(unique_name)\n except:\n pass\n else:\n msg = \"The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.\"%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts(): \n if contact.AdditionalInfo().RegGeneralPartner(): \n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \"<%s> is not a General Partner. Hence JointAccount is None\"%self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts \n\n def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is being set to <%s>.\"%(str(self.__is_general_partner)))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)\n except:\n pass\n\n else:\n if str(self.__is_general_partner) == \"None\":\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is None. Hence defaulting it to False\")\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n else:\n if hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)\n self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, \"ABORTING TRANSACTION***********\")\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, \"Contact\")\n FRegulatoryLogger.DEBUG(logger, \"Deleted all AdditionalInfos on Contact related to Regulatory Reporting\")\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:#it means there is an additional condition added\n pos = query.find('and party')\n party_name = query[(pos + len('and party')):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, \"FContact\", party)\n return return_result\n\n",
"step-ids": [
13,
15,
18,
20,
23
]
}
|
[
13,
15,
18,
20,
23
] |
from mayan.apps.testing.tests.base import BaseTestCase
from .mixins import AssetTestMixin
class AssetModelTestCase(AssetTestMixin, BaseTestCase):
def test_asset_get_absolute_url_method(self):
self._create_test_asset()
self.test_asset.get_absolute_url()
|
normal
|
{
"blob_id": "42c9e5039e2d5f784bf6405ea8bcaf7d6973ddcb",
"index": 6456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n\n def test_asset_get_absolute_url_method(self):\n self._create_test_asset()\n self.test_asset.get_absolute_url()\n",
"step-4": "from mayan.apps.testing.tests.base import BaseTestCase\nfrom .mixins import AssetTestMixin\n\n\nclass AssetModelTestCase(AssetTestMixin, BaseTestCase):\n\n def test_asset_get_absolute_url_method(self):\n self._create_test_asset()\n self.test_asset.get_absolute_url()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# ethermine.py, Copyright (c) 2019, Nicholas Saparoff <[email protected]>: Original implementation
from minermedic.pools.base_pool import BasePool
from phenome_core.util.rest_api import RestAPI
from minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost
"""
EtherminePool
This is the main Pool API for Ethermine.
SEE: https://ethermine.org/api/worker#monitoring
"""
class EtherminePool(BasePool):
# PER WORKER
_MINER_URL_PER_WORKER = "https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats"
# PER MINER
_MINER_URL_PER_MINER = "https://api.ethermine.org/miner/:{MINER}/currentStats"
# with Ethermine, the coin is Usually ETH, but could be ETC or ZCASH
_DEFAULT_COIN_ = "ETH"
def __init__(self, pool, pool_attrs):
super(EtherminePool, self).__init__(pool, pool_attrs)
def build_creation_parameters(self, pool, pool_attrs, pool_classname):
# get the default creation parameters
params = super(EtherminePool, self).build_creation_parameters(pool, pool_attrs, pool_classname)
server_location = "US"
if pool.startswith("eu1.etc") or pool.startswith("eu1.eth"):
server_location = "Europe"
elif pool.startswith("us1-etc"):
server_location = "US"
elif pool.startswith("us1.eth"):
server_location = "US East"
elif pool.startswith("us2.eth"):
server_location = "US West"
elif pool.startswith("asia1.eth"):
server_location = "Asia"
# Set the unique ID of the pool (give it a NAME, as the URL/IP may change)
# POOL - LOCATION (COIN)
params['unique_id'] = "ETHERMINE - " + server_location + " (" + self._DEFAULT_COIN_ + ")"
return params
def _clean_coin_address(self, miner):
coin_address = miner.coin_address.lower()
if coin_address.startswith('0x'):
coin_address = coin_address[2:]
elif coin_address.startswith('#0x'):
coin_address = coin_address[3:]
return coin_address
def get_worker_stats(self, miner, worker):
# build the miner URL
url = self._MINER_URL_PER_WORKER.replace("{MINER}",self._clean_coin_address(miner)).replace("{WORKER}",worker)
api = RestAPI(url=url, port=80)
return api.get_json()
def get_miner_stats(self, miner):
# build the miner URL
url = self._MINER_URL_PER_MINER.replace("{MINER}", self._clean_coin_address(miner))
api = RestAPI(url=url, port=80)
return api.get_json()
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):
if algo == 'ethash':
algo_idx = get_algo_index('daggerhashimoto')
else:
algo_idx = get_algo_index(algo)
if algo_idx is -1:
return False
coin_idx = get_coin_index(self._DEFAULT_COIN_)
# get the cost of the coin
# TODO - get the currency from the config, do not assume USD
coin_cost = get_coin_cost(self._DEFAULT_COIN_,'USD')
success = False
json = self.get_worker_stats(miner, worker)
if json:
success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost)
return success
def parse_json(self, json, results, miner, worker, pool, algo, algo_idx, coin_idx, coin_cost):
# get the record
record = json['data']
if record == 'NO DATA':
# check coin switch?
miner_coin_idx = None
if hasattr(miner, 'coin_idx'):
# we have been mining so far
miner_coin_idx = miner.coin
if miner_coin_idx is None or miner_coin_idx != coin_idx:
# reset the coin address, maybe switched coin
miner.coin_address = ''
# no data, just fail
return False
# API call results, speed is in units of Hashes
speed_suffix = 'H'
try:
# get accepted hashrate
speed_accepted = float(record['currentHashrate'])
except:
speed_accepted = 0.0
try:
# get "reported" hashrate
speed_reported = float(record['reportedHashrate'])
except:
speed_reported = None
# now get the miner stats for profitability
json_miner_stats = self.get_miner_stats(miner)
# get the record
record_miner_stats = json_miner_stats['data']
try:
coins_per_minute = float(record_miner_stats['coinsPerMin'])
except:
coins_per_minute = 0.0
try:
active_workers = float(record_miner_stats['activeWorkers'])
except:
active_workers = 1
# profitability is a measure of COIN / speed suffix / per DAY
# ETHERMINE only gives coin estimates per MINER per MINUTE, not per WORKER
# so we need to average it out by dividing by the # of active workers
profitability = ((coins_per_minute * (60 * 24))/speed_accepted)/active_workers
# finally set the API results into the main results object
results.populate_pool_results(miner, worker, pool, algo, algo_idx, coin_idx, coin_cost, profitability,
speed_accepted, speed_reported, speed_suffix)
# if we got here, we were successful
return True
|
normal
|
{
"blob_id": "921c7255fad46c767f2ec1030ef9498da05b9bb1",
"index": 9958,
"step-1": "<mask token>\n\n\nclass EtherminePool(BasePool):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n <mask token>\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-2": "<mask token>\n\n\nclass EtherminePool(BasePool):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-3": "<mask token>\n\n\nclass EtherminePool(BasePool):\n _MINER_URL_PER_WORKER = (\n 'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'\n )\n _MINER_URL_PER_MINER = (\n 'https://api.ethermine.org/miner/:{MINER}/currentStats')\n _DEFAULT_COIN_ = 'ETH'\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-4": "from minermedic.pools.base_pool import BasePool\nfrom phenome_core.util.rest_api import RestAPI\nfrom minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost\n<mask token>\n\n\nclass EtherminePool(BasePool):\n _MINER_URL_PER_WORKER = (\n 'https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats'\n )\n _MINER_URL_PER_MINER = (\n 'https://api.ethermine.org/miner/:{MINER}/currentStats')\n _DEFAULT_COIN_ = 'ETH'\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n params = super(EtherminePool, self).build_creation_parameters(pool,\n pool_attrs, pool_classname)\n server_location = 'US'\n if pool.startswith('eu1.etc') or pool.startswith('eu1.eth'):\n server_location = 'Europe'\n elif pool.startswith('us1-etc'):\n server_location = 'US'\n elif pool.startswith('us1.eth'):\n server_location = 'US East'\n elif pool.startswith('us2.eth'):\n server_location = 'US West'\n elif pool.startswith('asia1.eth'):\n server_location = 'Asia'\n params['unique_id'\n ] = 'ETHERMINE - ' + server_location + ' (' + self._DEFAULT_COIN_ + ')'\n return params\n\n def _clean_coin_address(self, miner):\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n url = self._MINER_URL_PER_WORKER.replace('{MINER}', self.\n _clean_coin_address(miner)).replace('{WORKER}', worker)\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_miner_stats(self, miner):\n url = self._MINER_URL_PER_MINER.replace('{MINER}', self.\n _clean_coin_address(miner))\n api = RestAPI(url=url, port=80)\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n if algo_idx is -1:\n return False\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD')\n success = False\n json = self.get_worker_stats(miner, worker)\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id,\n algo, algo_idx, coin_idx, coin_cost)\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost):\n record = json['data']\n if record == 'NO DATA':\n miner_coin_idx = None\n if hasattr(miner, 'coin_idx'):\n miner_coin_idx = miner.coin\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n miner.coin_address = ''\n return False\n speed_suffix = 'H'\n try:\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n try:\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n json_miner_stats = self.get_miner_stats(miner)\n record_miner_stats = json_miner_stats['data']\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n profitability = coins_per_minute * (60 * 24\n ) / speed_accepted / active_workers\n results.populate_pool_results(miner, worker, pool, algo, algo_idx,\n coin_idx, coin_cost, profitability, speed_accepted,\n speed_reported, speed_suffix)\n return True\n",
"step-5": "# ethermine.py, Copyright (c) 2019, Nicholas Saparoff <[email protected]>: Original implementation\n\nfrom minermedic.pools.base_pool import BasePool\nfrom phenome_core.util.rest_api import RestAPI\nfrom minermedic.pools.helper import get_algo_index, get_coin_index, get_coin_cost\n\n\"\"\"\n\nEtherminePool\n\n This is the main Pool API for Ethermine.\n SEE: https://ethermine.org/api/worker#monitoring\n \n\"\"\"\n\n\nclass EtherminePool(BasePool):\n\n # PER WORKER\n _MINER_URL_PER_WORKER = \"https://api.ethermine.org/miner/:{MINER}/worker/:{WORKER}/currentStats\"\n\n # PER MINER\n _MINER_URL_PER_MINER = \"https://api.ethermine.org/miner/:{MINER}/currentStats\"\n\n # with Ethermine, the coin is Usually ETH, but could be ETC or ZCASH\n _DEFAULT_COIN_ = \"ETH\"\n\n def __init__(self, pool, pool_attrs):\n super(EtherminePool, self).__init__(pool, pool_attrs)\n\n def build_creation_parameters(self, pool, pool_attrs, pool_classname):\n\n # get the default creation parameters\n params = super(EtherminePool, self).build_creation_parameters(pool, pool_attrs, pool_classname)\n\n server_location = \"US\"\n\n if pool.startswith(\"eu1.etc\") or pool.startswith(\"eu1.eth\"):\n server_location = \"Europe\"\n elif pool.startswith(\"us1-etc\"):\n server_location = \"US\"\n elif pool.startswith(\"us1.eth\"):\n server_location = \"US East\"\n elif pool.startswith(\"us2.eth\"):\n server_location = \"US West\"\n elif pool.startswith(\"asia1.eth\"):\n server_location = \"Asia\"\n\n # Set the unique ID of the pool (give it a NAME, as the URL/IP may change)\n # POOL - LOCATION (COIN)\n params['unique_id'] = \"ETHERMINE - \" + server_location + \" (\" + self._DEFAULT_COIN_ + \")\"\n\n return params\n\n def _clean_coin_address(self, miner):\n\n coin_address = miner.coin_address.lower()\n if coin_address.startswith('0x'):\n coin_address = coin_address[2:]\n elif coin_address.startswith('#0x'):\n coin_address = coin_address[3:]\n\n return coin_address\n\n def get_worker_stats(self, miner, worker):\n\n # build the miner URL\n url = self._MINER_URL_PER_WORKER.replace(\"{MINER}\",self._clean_coin_address(miner)).replace(\"{WORKER}\",worker)\n\n api = RestAPI(url=url, port=80)\n\n return api.get_json()\n\n def get_miner_stats(self, miner):\n\n # build the miner URL\n url = self._MINER_URL_PER_MINER.replace(\"{MINER}\", self._clean_coin_address(miner))\n\n api = RestAPI(url=url, port=80)\n\n return api.get_json()\n\n def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):\n\n if algo == 'ethash':\n algo_idx = get_algo_index('daggerhashimoto')\n else:\n algo_idx = get_algo_index(algo)\n\n if algo_idx is -1:\n return False\n\n coin_idx = get_coin_index(self._DEFAULT_COIN_)\n\n # get the cost of the coin\n # TODO - get the currency from the config, do not assume USD\n coin_cost = get_coin_cost(self._DEFAULT_COIN_,'USD')\n\n success = False\n\n json = self.get_worker_stats(miner, worker)\n\n if json:\n success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost)\n\n return success\n\n def parse_json(self, json, results, miner, worker, pool, algo, algo_idx, coin_idx, coin_cost):\n\n # get the record\n record = json['data']\n\n if record == 'NO DATA':\n\n # check coin switch?\n miner_coin_idx = None\n\n if hasattr(miner, 'coin_idx'):\n # we have been mining so far\n miner_coin_idx = miner.coin\n\n if miner_coin_idx is None or miner_coin_idx != coin_idx:\n # reset the coin address, maybe switched coin\n miner.coin_address = ''\n\n # no data, just fail\n return False\n\n # API call results, speed is in units of Hashes\n speed_suffix = 'H'\n\n try:\n # get accepted hashrate\n speed_accepted = float(record['currentHashrate'])\n except:\n speed_accepted = 0.0\n\n try:\n # get \"reported\" hashrate\n speed_reported = float(record['reportedHashrate'])\n except:\n speed_reported = None\n\n # now get the miner stats for profitability\n json_miner_stats = self.get_miner_stats(miner)\n\n # get the record\n record_miner_stats = json_miner_stats['data']\n\n try:\n coins_per_minute = float(record_miner_stats['coinsPerMin'])\n except:\n coins_per_minute = 0.0\n\n try:\n active_workers = float(record_miner_stats['activeWorkers'])\n except:\n active_workers = 1\n\n # profitability is a measure of COIN / speed suffix / per DAY\n # ETHERMINE only gives coin estimates per MINER per MINUTE, not per WORKER\n # so we need to average it out by dividing by the # of active workers\n profitability = ((coins_per_minute * (60 * 24))/speed_accepted)/active_workers\n\n # finally set the API results into the main results object\n results.populate_pool_results(miner, worker, pool, algo, algo_idx, coin_idx, coin_cost, profitability,\n speed_accepted, speed_reported, speed_suffix)\n\n # if we got here, we were successful\n return True\n\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
#%% [markdown]
# # Look at intron-less gene enrichment in Cyte biased expressed genes.
# This is a quick look at if parimary spermatocyte biased genes are enriched in intronless genes.
# Yes this is what we see.
#%%
import os
import pickle
import numpy as np
import pandas as pd
from scipy.stats import fisher_exact, contingency
from IPython.display import display, Markdown
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.api import formula as smf
from tabulate import tabulate
from larval_gonad.io import feather_to_cluster_rep_matrix
from larval_gonad.stats import run_chisq
from larval_gonad.plotting import plot_statsmodels_results
try:
os.chdir(os.path.join(os.getcwd(), "docs"))
print(os.getcwd())
except:
pass
#%%
# Get list of intronless FBgns
fbgns_no_intron = pickle.load(open("../output/paper_submission/intron_less_genes.pkl", "rb"))
background = pickle.load(open("../output/paper_submission/background_fbgns.pkl", "rb"))
#%%
# Get list of X chromosome genes
fbgn2chrom = (
pd.read_feather(
"../references/gene_annotation_dmel_r6-26.feather", columns=["FBgn", "FB_chrom"]
)
.set_index("FBgn")
.squeeze()
)
chrx_fbgns = fbgn2chrom[fbgn2chrom == "X"].index
#%%
# Get gonia biased and cyte biased genes
bias = (
pd.read_feather("../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather")
.assign(gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0), True, False))
.assign(pct_gonia=lambda x: x["pct.1"])
.assign(cyte_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False))
.assign(pct_cyte=lambda x: x["pct.2"])
.set_index("FBgn")
.loc[:, ["gonia_bias", "cyte_bias", "pct_gonia", "pct_cyte"]]
.reindex(background)
.dropna()
)
#%%
# Munge all into a dataframe
df = bias.copy().join(fbgn2chrom)
df["intronless"] = np.where(df.index.isin(fbgns_no_intron), True, False)
df["X"] = np.where(df.index.isin(chrx_fbgns), True, False)
df["bias"] = "NS"
df.loc[df.gonia_bias, "bias"] = "gonia"
df.loc[df.cyte_bias, "bias"] = "cyte"
#%% [markdown]
# ## How are intronless genes expressed in primary spermatocytes?
#%% [markdown]
# ### Intronless genes are expressed in fewer cells than genes with introns.
#%%
# Plot percent cytes with expression by bias*chrom*intronless
g = sns.FacetGrid(
df,
row="bias",
row_order=["cyte", "gonia", "NS"],
col="FB_chrom",
col_order=["X", "2L", "2R", "3L", "3R"],
sharex=True,
sharey=True,
margin_titles=True,
)
g.map(sns.boxplot, "intronless", "pct_cyte", order=[False, True])
g.set_ylabels("% Spermatocyte Cells\nWith Expression")
g.savefig("../output/docs/x_escapers_and_intronless_genes.svg", bbox_inches="tight")
#%% [markdown]
# ### However, intronless genes are enriched in genes with primary spermatocyte biased expression.
#%%
# Cross tab of intronless * bias
ct = pd.crosstab(df.intronless, df.bias)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
res
#%%
zscores_intronless = (
feather_to_cluster_rep_matrix("../output/paper_submission/zscore_by_cluster_rep.feather")
.reindex(fbgns_no_intron)
.dropna()
)
ax = sns.clustermap(
zscores_intronless,
col_cluster=False,
xticklabels=True,
yticklabels=False,
cmap="viridis",
vmin=-3,
vmax=3,
rasterized=True,
)
ax.ax_heatmap.set(xlabel="", ylabel="Intronless Genes")
plt.savefig("../output/docs/x_escapers_and_intronless_genes_heatmap.svg", bbox_inches="tight")
#%% [markdown]
# ## Are intronless genes enriched in X chromosome escapers?
#%% [markdown]
# ### Intronless genes are depleted on the X chromosome.
#%%
# intronless genes across the genome
intronless2chrom = fbgn2chrom.to_frame().query(
"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']"
)
intronless2chrom["intronless"] = np.where(intronless2chrom.index.isin(fbgns_no_intron), True, False)
ct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
display(res)
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
#%% [markdown]
# ### X chromosome escapers are not enriched for intronless genes.
#%% [markdown]
# #### Main Effects Model Logit(intronless = cyte_biased + X chromosome)
#%%
# Main effects model
model = smf.logit("intronless ~ cyte_bias + X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_main_effects.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%% [markdown]
# #### Full Model Logit(intronless = cyte_biased + X chromosome + cyte_biased * X chromosome)
#%%
# FUll Model
model = smf.logit("intronless ~ cyte_bias * X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_full.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%%
|
normal
|
{
"blob_id": "5f4d83aa2b530417ecb1598510fb4778b111700b",
"index": 6489,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\n<mask token>\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\n<mask token>\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\n<mask token>\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\n<mask token>\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\n<mask token>\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n<mask token>\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-3": "<mask token>\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\nfbgns_no_intron = pickle.load(open(\n '../output/paper_submission/intron_less_genes.pkl', 'rb'))\nbackground = pickle.load(open(\n '../output/paper_submission/background_fbgns.pkl', 'rb'))\nfbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'\n , columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()\nchrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index\nbias = pd.read_feather(\n '../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(\n gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),\n True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=\n lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)\n ).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [\n 'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background\n ).dropna()\ndf = bias.copy().join(fbgn2chrom)\ndf['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf['X'] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf['bias'] = 'NS'\ndf.loc[df.gonia_bias, 'bias'] = 'gonia'\ndf.loc[df.cyte_bias, 'bias'] = 'cyte'\ng = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=\n 'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,\n sharey=True, margin_titles=True)\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\nzscores_intronless = feather_to_cluster_rep_matrix(\n '../output/paper_submission/zscore_by_cluster_rep.feather').reindex(\n fbgns_no_intron).dropna()\nax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,\n yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\")\nintronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(\n fbgns_no_intron), True, False)\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nmodel = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\nmodel = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-4": "import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import fisher_exact, contingency\nfrom IPython.display import display, Markdown\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.api import formula as smf\nfrom tabulate import tabulate\nfrom larval_gonad.io import feather_to_cluster_rep_matrix\nfrom larval_gonad.stats import run_chisq\nfrom larval_gonad.plotting import plot_statsmodels_results\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\nfbgns_no_intron = pickle.load(open(\n '../output/paper_submission/intron_less_genes.pkl', 'rb'))\nbackground = pickle.load(open(\n '../output/paper_submission/background_fbgns.pkl', 'rb'))\nfbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'\n , columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()\nchrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index\nbias = pd.read_feather(\n '../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(\n gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),\n True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=\n lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)\n ).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [\n 'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background\n ).dropna()\ndf = bias.copy().join(fbgn2chrom)\ndf['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf['X'] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf['bias'] = 'NS'\ndf.loc[df.gonia_bias, 'bias'] = 'gonia'\ndf.loc[df.cyte_bias, 'bias'] = 'cyte'\ng = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=\n 'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,\n sharey=True, margin_titles=True)\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\nzscores_intronless = feather_to_cluster_rep_matrix(\n '../output/paper_submission/zscore_by_cluster_rep.feather').reindex(\n fbgns_no_intron).dropna()\nax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,\n yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\")\nintronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(\n fbgns_no_intron), True, False)\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nmodel = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\nmodel = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-5": "#%% [markdown]\n# # Look at intron-less gene enrichment in Cyte biased expressed genes.\n\n# This is a quick look at if parimary spermatocyte biased genes are enriched in intronless genes.\n# Yes this is what we see.\n\n#%%\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import fisher_exact, contingency\nfrom IPython.display import display, Markdown\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.api import formula as smf\nfrom tabulate import tabulate\n\nfrom larval_gonad.io import feather_to_cluster_rep_matrix\nfrom larval_gonad.stats import run_chisq\nfrom larval_gonad.plotting import plot_statsmodels_results\n\ntry:\n os.chdir(os.path.join(os.getcwd(), \"docs\"))\n print(os.getcwd())\nexcept:\n pass\n\n\n#%%\n# Get list of intronless FBgns\nfbgns_no_intron = pickle.load(open(\"../output/paper_submission/intron_less_genes.pkl\", \"rb\"))\nbackground = pickle.load(open(\"../output/paper_submission/background_fbgns.pkl\", \"rb\"))\n\n#%%\n# Get list of X chromosome genes\nfbgn2chrom = (\n pd.read_feather(\n \"../references/gene_annotation_dmel_r6-26.feather\", columns=[\"FBgn\", \"FB_chrom\"]\n )\n .set_index(\"FBgn\")\n .squeeze()\n)\nchrx_fbgns = fbgn2chrom[fbgn2chrom == \"X\"].index\n\n#%%\n# Get gonia biased and cyte biased genes\nbias = (\n pd.read_feather(\"../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather\")\n .assign(gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0), True, False))\n .assign(pct_gonia=lambda x: x[\"pct.1\"])\n .assign(cyte_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False))\n .assign(pct_cyte=lambda x: x[\"pct.2\"])\n .set_index(\"FBgn\")\n .loc[:, [\"gonia_bias\", \"cyte_bias\", \"pct_gonia\", \"pct_cyte\"]]\n .reindex(background)\n .dropna()\n)\n\n#%%\n# Munge all into a dataframe\ndf = bias.copy().join(fbgn2chrom)\ndf[\"intronless\"] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf[\"X\"] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf[\"bias\"] = \"NS\"\ndf.loc[df.gonia_bias, \"bias\"] = \"gonia\"\ndf.loc[df.cyte_bias, \"bias\"] = \"cyte\"\n\n#%% [markdown]\n# ## How are intronless genes expressed in primary spermatocytes?\n\n#%% [markdown]\n# ### Intronless genes are expressed in fewer cells than genes with introns.\n\n#%%\n# Plot percent cytes with expression by bias*chrom*intronless\ng = sns.FacetGrid(\n df,\n row=\"bias\",\n row_order=[\"cyte\", \"gonia\", \"NS\"],\n col=\"FB_chrom\",\n col_order=[\"X\", \"2L\", \"2R\", \"3L\", \"3R\"],\n sharex=True,\n sharey=True,\n margin_titles=True,\n)\ng.map(sns.boxplot, \"intronless\", \"pct_cyte\", order=[False, True])\ng.set_ylabels(\"% Spermatocyte Cells\\nWith Expression\")\ng.savefig(\"../output/docs/x_escapers_and_intronless_genes.svg\", bbox_inches=\"tight\")\n\n#%% [markdown]\n# ### However, intronless genes are enriched in genes with primary spermatocyte biased expression.\n\n#%%\n# Cross tab of intronless * bias\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), [\"observed\", \"adj std residual\", \"flag_sig\"]), :]\nprint(tabulate(res.reset_index(), headers=\"keys\", showindex=False, tablefmt=\"github\"))\nres\n\n#%%\nzscores_intronless = (\n feather_to_cluster_rep_matrix(\"../output/paper_submission/zscore_by_cluster_rep.feather\")\n .reindex(fbgns_no_intron)\n .dropna()\n)\n\nax = sns.clustermap(\n zscores_intronless,\n col_cluster=False,\n xticklabels=True,\n yticklabels=False,\n cmap=\"viridis\",\n vmin=-3,\n vmax=3,\n rasterized=True,\n)\nax.ax_heatmap.set(xlabel=\"\", ylabel=\"Intronless Genes\")\nplt.savefig(\"../output/docs/x_escapers_and_intronless_genes_heatmap.svg\", bbox_inches=\"tight\")\n\n\n#%% [markdown]\n# ## Are intronless genes enriched in X chromosome escapers?\n\n#%% [markdown]\n# ### Intronless genes are depleted on the X chromosome.\n\n#%%\n# intronless genes across the genome\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\"\n)\nintronless2chrom[\"intronless\"] = np.where(intronless2chrom.index.isin(fbgns_no_intron), True, False)\n\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), [\"observed\", \"adj std residual\", \"flag_sig\"]), :]\ndisplay(res)\n\nprint(tabulate(res.reset_index(), headers=\"keys\", showindex=False, tablefmt=\"github\"))\n\n#%% [markdown]\n# ### X chromosome escapers are not enriched for intronless genes.\n\n#%% [markdown]\n# #### Main Effects Model Logit(intronless = cyte_biased + X chromosome)\n\n#%%\n# Main effects model\nmodel = smf.logit(\"intronless ~ cyte_bias + X\", data=df.replace({True: 1, False: 0}))\nresults = model.fit()\nplot_statsmodels_results(\n \"../output/docs/x_escapers_and_intronless_genes_main_effects.png\", str(results.summary2())\n)\ndisplay(results.summary2())\n\nnp.exp(results.params).rename(\"Odds Ratio\").to_frame()[results.pvalues <= 0.05]\n\n#%% [markdown]\n# #### Full Model Logit(intronless = cyte_biased + X chromosome + cyte_biased * X chromosome)\n\n#%%\n# FUll Model\nmodel = smf.logit(\"intronless ~ cyte_bias * X\", data=df.replace({True: 1, False: 0}))\nresults = model.fit()\nplot_statsmodels_results(\n \"../output/docs/x_escapers_and_intronless_genes_full.png\", str(results.summary2())\n)\ndisplay(results.summary2())\n\nnp.exp(results.params).rename(\"Odds Ratio\").to_frame()[results.pvalues <= 0.05]\n\n\n#%%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
# Aluno: Héricles Emanuel
# Matrícula: 117110647
# Atividade: É quadrado Mágico?
def eh_quadrado_magico(m):
somas_all = []
eh_magico = True
soma = 0
for e in range(len(m[0])):
soma += m[0][e]
# Linhas
for i in range(len(m)):
somados = 0
for e in range(len(m[i])):
somados += (m[i][e])
soma_all.append(somados)
# Colunas
x = 0
while x < len(m):
somados = 0
for n in range(len(m)):
somados += m[n][x]
x += 1
soma_all.append(somados)
# Diagonal1
x = len(m) - 1
somados = 0
for i in range(len(m)):
somados += m[i][x]
x -= 1
soma_all.append(somados)
# Diagonal 2
x = len(m) -1
somados = 0
for i in range(len(m) -1, -1, -1):
somados += m[i][x]
x -= 1
soma_all.append(somados)
for i in somados:
if i != soma:
return False
if eh_magico:
return True
quadrado1 = [[2,7,6],[9,5,1],[4,3,8]]
print eh_quadrado_magico(quadrado1)
|
normal
|
{
"blob_id": "f039ab104093eb42c3f5d3c794710a0997e85387",
"index": 8371,
"step-1": "# coding: utf-8\n# Aluno: Héricles Emanuel\n# Matrícula: 117110647\n# Atividade: É quadrado Mágico?\n\ndef eh_quadrado_magico(m):\n\tsomas_all = []\n\teh_magico = True\n\tsoma = 0\n\tfor e in range(len(m[0])):\n\t\tsoma += m[0][e]\n\n# Linhas\n\tfor i in range(len(m)):\n\t\tsomados = 0\n\t\tfor e in range(len(m[i])):\n\t\t\tsomados += (m[i][e])\n\t\tsoma_all.append(somados)\n# Colunas\n\tx = 0\n\twhile x < len(m):\n\t\tsomados = 0\n\t\tfor n in range(len(m)):\n\t\t\tsomados += m[n][x]\n\t\tx += 1\n\t\tsoma_all.append(somados)\n\n# Diagonal1\n\tx = len(m) - 1\n\tsomados = 0\n\tfor i in range(len(m)):\n\t\tsomados += m[i][x]\n\t\tx -= 1\n\tsoma_all.append(somados)\n# Diagonal 2\t\n\tx = len(m) -1\n\tsomados = 0\n\tfor i in range(len(m) -1, -1, -1):\n\t\tsomados += m[i][x]\n\t\tx -= 1\n\tsoma_all.append(somados)\n\nfor i in somados:\n\tif i != soma:\n\t\treturn False\t\t \t \n\t\t \t \n\tif eh_magico:\n\t\treturn True\nquadrado1 = [[2,7,6],[9,5,1],[4,3,8]]\nprint eh_quadrado_magico(quadrado1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Solution:
'''
先遍历整个string,并记录最小的character的出现次数。
如果最小character出现次数都不小于k,那么说明整个string就是满足条件的longest substring,返回原string的长度即可;
如果character的出现次数小于k,假设这个character是c,因为满足条件的substring永远不会包含c,所以满足条件的substring一定是在以c为分割参考下的某个substring中。所以我们需要做的就是把c当做是split的参考,在得到的String[]中再次调用我们的method,找到最大的返回值即可。
'''
def longestSubstring(self, s: str, k: int) -> int:
def helper(s, k):
if len(s) < k:
return 0
ch = min(set(s), key=s.count)
if s.count(ch) >= k:
return len(s)
else:
return max(helper(t, k) for t in s.split(ch))
return helper(s, k)
|
normal
|
{
"blob_id": "6ba830aafbe8e4b42a0b927328ebcad1424cda5e",
"index": 8381,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n",
"step-3": "class Solution:\n <mask token>\n\n def longestSubstring(self, s: str, k: int) ->int:\n\n def helper(s, k):\n if len(s) < k:\n return 0\n ch = min(set(s), key=s.count)\n if s.count(ch) >= k:\n return len(s)\n else:\n return max(helper(t, k) for t in s.split(ch))\n return helper(s, k)\n",
"step-4": "class Solution:\n \"\"\"\n 先遍历整个string,并记录最小的character的出现次数。\n 如果最小character出现次数都不小于k,那么说明整个string就是满足条件的longest substring,返回原string的长度即可;\n 如果character的出现次数小于k,假设这个character是c,因为满足条件的substring永远不会包含c,所以满足条件的substring一定是在以c为分割参考下的某个substring中。所以我们需要做的就是把c当做是split的参考,在得到的String[]中再次调用我们的method,找到最大的返回值即可。\n \"\"\"\n\n def longestSubstring(self, s: str, k: int) ->int:\n\n def helper(s, k):\n if len(s) < k:\n return 0\n ch = min(set(s), key=s.count)\n if s.count(ch) >= k:\n return len(s)\n else:\n return max(helper(t, k) for t in s.split(ch))\n return helper(s, k)\n",
"step-5": "class Solution:\n '''\n 先遍历整个string,并记录最小的character的出现次数。\n 如果最小character出现次数都不小于k,那么说明整个string就是满足条件的longest substring,返回原string的长度即可;\n 如果character的出现次数小于k,假设这个character是c,因为满足条件的substring永远不会包含c,所以满足条件的substring一定是在以c为分割参考下的某个substring中。所以我们需要做的就是把c当做是split的参考,在得到的String[]中再次调用我们的method,找到最大的返回值即可。\n '''\n\n def longestSubstring(self, s: str, k: int) -> int:\n def helper(s, k):\n if len(s) < k:\n return 0\n ch = min(set(s), key=s.count)\n if s.count(ch) >= k:\n return len(s)\n else:\n return max(helper(t, k) for t in s.split(ch))\n return helper(s, k)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'meet.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(607, 723)
self.start = QtWidgets.QLabel(Dialog)
self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))
self.start.setObjectName("start")
self.startDate = QtWidgets.QDateEdit(Dialog)
self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))
self.startDate.setDate(QtCore.QDate(2017, 1, 1))
self.startDate.setObjectName("startDate")
self.end = QtWidgets.QLabel(Dialog)
self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))
self.end.setObjectName("end")
self.endDate = QtWidgets.QDateEdit(Dialog)
self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))
self.endDate.setDate(QtCore.QDate(2017, 1, 1))
self.endDate.setObjectName("endDate")
self.name = QtWidgets.QLabel(Dialog)
self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))
self.name.setObjectName("name")
self.nameEdit = QtWidgets.QLineEdit(Dialog)
self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))
self.nameEdit.setObjectName("nameEdit")
self.athletes = QtWidgets.QLabel(Dialog)
self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))
self.athletes.setObjectName("athletes")
self.addButton = QtWidgets.QPushButton(Dialog)
self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))
self.addButton.setObjectName("addButton")
self.removeButton = QtWidgets.QPushButton(Dialog)
self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))
self.removeButton.setObjectName("removeButton")
self.members = QtWidgets.QLabel(Dialog)
self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))
self.members.setObjectName("members")
self.meetCount = QtWidgets.QLabel(Dialog)
self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))
self.meetCount.setObjectName("meetCount")
self.meetCountEdit = QtWidgets.QLineEdit(Dialog)
self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))
self.meetCountEdit.setObjectName("meetCountEdit")
self.sortitionButton = QtWidgets.QPushButton(Dialog)
self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))
self.sortitionButton.setObjectName("sortitionButton")
self.cancel = QtWidgets.QPushButton(Dialog)
self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))
self.cancel.setObjectName("cancel")
self.athletesList = QtWidgets.QListWidget(Dialog)
self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))
self.athletesList.setObjectName("athletesList")
self.membersList = QtWidgets.QListWidget(Dialog)
self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))
self.membersList.setObjectName("membersList")
self.city = QtWidgets.QLabel(Dialog)
self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))
self.city.setObjectName("city")
self.cityEdit = QtWidgets.QLineEdit(Dialog)
self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))
self.cityEdit.setObjectName("cityEdit")
self.main_referee = QtWidgets.QLabel(Dialog)
self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))
self.main_referee.setObjectName("main_referee")
self.main_clerk = QtWidgets.QLabel(Dialog)
self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))
self.main_clerk.setObjectName("main_clerk")
self.mainrefCBox = QtWidgets.QComboBox(Dialog)
self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))
self.mainrefCBox.setObjectName("mainrefCBox")
self.mainclerkCBox = QtWidgets.QComboBox(Dialog)
self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))
self.mainclerkCBox.setObjectName("mainclerkCBox")
self.refList = QtWidgets.QListWidget(Dialog)
self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))
self.refList.setObjectName("refList")
self.refereeList = QtWidgets.QLabel(Dialog)
self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))
self.refereeList.setObjectName("refereeList")
self.refColList = QtWidgets.QListWidget(Dialog)
self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))
self.refColList.setObjectName("refColList")
self.refereeCol = QtWidgets.QLabel(Dialog)
self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))
self.refereeCol.setObjectName("refereeCol")
self.raddButton = QtWidgets.QPushButton(Dialog)
self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))
self.raddButton.setObjectName("raddButton")
self.rremoveButton = QtWidgets.QPushButton(Dialog)
self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))
self.rremoveButton.setObjectName("rremoveButton")
self.wsortitionButton = QtWidgets.QPushButton(Dialog)
self.wsortitionButton.setEnabled(True)
self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))
self.wsortitionButton.setAutoDefault(True)
self.wsortitionButton.setDefault(False)
self.wsortitionButton.setFlat(False)
self.wsortitionButton.setObjectName("wsortitionButton")
self.divrings = QtWidgets.QCheckBox(Dialog)
self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))
self.divrings.setObjectName("divrings")
self.weightcatCBox = QtWidgets.QComboBox(Dialog)
self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))
self.weightcatCBox.setObjectName("weightcatCBox")
self.weigthcat = QtWidgets.QLabel(Dialog)
self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))
self.weigthcat.setObjectName("weigthcat")
self.round = QtWidgets.QLabel(Dialog)
self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))
self.round.setObjectName("round")
self.stage = QtWidgets.QLabel(Dialog)
self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))
self.stage.setObjectName("stage")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.nameEdit, self.cityEdit)
Dialog.setTabOrder(self.cityEdit, self.startDate)
Dialog.setTabOrder(self.startDate, self.endDate)
Dialog.setTabOrder(self.endDate, self.meetCountEdit)
Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)
Dialog.setTabOrder(self.weightcatCBox, self.divrings)
Dialog.setTabOrder(self.divrings, self.athletesList)
Dialog.setTabOrder(self.athletesList, self.addButton)
Dialog.setTabOrder(self.addButton, self.removeButton)
Dialog.setTabOrder(self.removeButton, self.membersList)
Dialog.setTabOrder(self.membersList, self.sortitionButton)
Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)
Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)
Dialog.setTabOrder(self.mainclerkCBox, self.refList)
Dialog.setTabOrder(self.refList, self.raddButton)
Dialog.setTabOrder(self.raddButton, self.rremoveButton)
Dialog.setTabOrder(self.rremoveButton, self.refColList)
Dialog.setTabOrder(self.refColList, self.wsortitionButton)
Dialog.setTabOrder(self.wsortitionButton, self.cancel)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Создание соревнования"))
self.start.setText(_translate("Dialog", "Начало"))
self.startDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.end.setText(_translate("Dialog", "Окончание"))
self.endDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.name.setText(_translate("Dialog", "Название"))
self.athletes.setText(_translate("Dialog", "Список спортсменов"))
self.addButton.setText(_translate("Dialog", ">>"))
self.removeButton.setText(_translate("Dialog", "<<"))
self.members.setText(_translate("Dialog", "Список участников"))
self.meetCount.setText(_translate("Dialog", "Число боев в день"))
self.sortitionButton.setText(_translate("Dialog", "Жеребьевка"))
self.cancel.setText(_translate("Dialog", "Отмена"))
self.city.setText(_translate("Dialog", "Место проведения"))
self.main_referee.setText(_translate("Dialog", "Главный судья"))
self.main_clerk.setText(_translate("Dialog", "Главный секретарь"))
self.refereeList.setText(_translate("Dialog", "Список судей"))
self.refereeCol.setText(_translate("Dialog", "Судейская коллегия"))
self.raddButton.setText(_translate("Dialog", ">>"))
self.rremoveButton.setText(_translate("Dialog", "<<"))
self.wsortitionButton.setText(_translate("Dialog", "Без жеребьевки"))
self.divrings.setText(_translate("Dialog", "Разбивать по рингам"))
self.weigthcat.setText(_translate("Dialog", "Весовая категория"))
self.round.setText(_translate("Dialog", "раунд"))
self.stage.setText(_translate("Dialog", "стадия"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "c076aed1bfff51f8edf5ab4ef029b7fa7ca2422c",
"index": 9479,
"step-1": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName('start')\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName('startDate')\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName('end')\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName('endDate')\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName('name')\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName('nameEdit')\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName('athletes')\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName('addButton')\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName('removeButton')\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName('members')\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName('meetCount')\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName('meetCountEdit')\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName('sortitionButton')\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName('cancel')\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName('athletesList')\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName('membersList')\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName('city')\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName('cityEdit')\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName('main_referee')\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName('main_clerk')\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName('mainrefCBox')\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName('mainclerkCBox')\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName('refList')\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName('refereeList')\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName('refColList')\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName('refereeCol')\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName('raddButton')\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName('rremoveButton')\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName('wsortitionButton')\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName('divrings')\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName('weightcatCBox')\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName('weigthcat')\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName('round')\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName('stage')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Создание соревнования'))\n self.start.setText(_translate('Dialog', 'Начало'))\n self.startDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.end.setText(_translate('Dialog', 'Окончание'))\n self.endDate.setDisplayFormat(_translate('Dialog', 'dd.MM.yyyy'))\n self.name.setText(_translate('Dialog', 'Название'))\n self.athletes.setText(_translate('Dialog', 'Список спортсменов'))\n self.addButton.setText(_translate('Dialog', '>>'))\n self.removeButton.setText(_translate('Dialog', '<<'))\n self.members.setText(_translate('Dialog', 'Список участников'))\n self.meetCount.setText(_translate('Dialog', 'Число боев в день'))\n self.sortitionButton.setText(_translate('Dialog', 'Жеребьевка'))\n self.cancel.setText(_translate('Dialog', 'Отмена'))\n self.city.setText(_translate('Dialog', 'Место проведения'))\n self.main_referee.setText(_translate('Dialog', 'Главный судья'))\n self.main_clerk.setText(_translate('Dialog', 'Главный секретарь'))\n self.refereeList.setText(_translate('Dialog', 'Список судей'))\n self.refereeCol.setText(_translate('Dialog', 'Судейская коллегия'))\n self.raddButton.setText(_translate('Dialog', '>>'))\n self.rremoveButton.setText(_translate('Dialog', '<<'))\n self.wsortitionButton.setText(_translate('Dialog', 'Без жеребьевки'))\n self.divrings.setText(_translate('Dialog', 'Разбивать по рингам'))\n self.weigthcat.setText(_translate('Dialog', 'Весовая категория'))\n self.round.setText(_translate('Dialog', 'раунд'))\n self.stage.setText(_translate('Dialog', 'стадия'))\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'meet.ui'\n#\n# Created by: PyQt5 UI code generator 5.8.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(607, 723)\n self.start = QtWidgets.QLabel(Dialog)\n self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))\n self.start.setObjectName(\"start\")\n self.startDate = QtWidgets.QDateEdit(Dialog)\n self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))\n self.startDate.setDate(QtCore.QDate(2017, 1, 1))\n self.startDate.setObjectName(\"startDate\")\n self.end = QtWidgets.QLabel(Dialog)\n self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))\n self.end.setObjectName(\"end\")\n self.endDate = QtWidgets.QDateEdit(Dialog)\n self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))\n self.endDate.setDate(QtCore.QDate(2017, 1, 1))\n self.endDate.setObjectName(\"endDate\")\n self.name = QtWidgets.QLabel(Dialog)\n self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))\n self.name.setObjectName(\"name\")\n self.nameEdit = QtWidgets.QLineEdit(Dialog)\n self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))\n self.nameEdit.setObjectName(\"nameEdit\")\n self.athletes = QtWidgets.QLabel(Dialog)\n self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))\n self.athletes.setObjectName(\"athletes\")\n self.addButton = QtWidgets.QPushButton(Dialog)\n self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))\n self.addButton.setObjectName(\"addButton\")\n self.removeButton = QtWidgets.QPushButton(Dialog)\n self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))\n self.removeButton.setObjectName(\"removeButton\")\n self.members = QtWidgets.QLabel(Dialog)\n self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))\n self.members.setObjectName(\"members\")\n self.meetCount = QtWidgets.QLabel(Dialog)\n self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))\n self.meetCount.setObjectName(\"meetCount\")\n self.meetCountEdit = QtWidgets.QLineEdit(Dialog)\n self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))\n self.meetCountEdit.setObjectName(\"meetCountEdit\")\n self.sortitionButton = QtWidgets.QPushButton(Dialog)\n self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))\n self.sortitionButton.setObjectName(\"sortitionButton\")\n self.cancel = QtWidgets.QPushButton(Dialog)\n self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))\n self.cancel.setObjectName(\"cancel\")\n self.athletesList = QtWidgets.QListWidget(Dialog)\n self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))\n self.athletesList.setObjectName(\"athletesList\")\n self.membersList = QtWidgets.QListWidget(Dialog)\n self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))\n self.membersList.setObjectName(\"membersList\")\n self.city = QtWidgets.QLabel(Dialog)\n self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))\n self.city.setObjectName(\"city\")\n self.cityEdit = QtWidgets.QLineEdit(Dialog)\n self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))\n self.cityEdit.setObjectName(\"cityEdit\")\n self.main_referee = QtWidgets.QLabel(Dialog)\n self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))\n self.main_referee.setObjectName(\"main_referee\")\n self.main_clerk = QtWidgets.QLabel(Dialog)\n self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))\n self.main_clerk.setObjectName(\"main_clerk\")\n self.mainrefCBox = QtWidgets.QComboBox(Dialog)\n self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))\n self.mainrefCBox.setObjectName(\"mainrefCBox\")\n self.mainclerkCBox = QtWidgets.QComboBox(Dialog)\n self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))\n self.mainclerkCBox.setObjectName(\"mainclerkCBox\")\n self.refList = QtWidgets.QListWidget(Dialog)\n self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))\n self.refList.setObjectName(\"refList\")\n self.refereeList = QtWidgets.QLabel(Dialog)\n self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))\n self.refereeList.setObjectName(\"refereeList\")\n self.refColList = QtWidgets.QListWidget(Dialog)\n self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))\n self.refColList.setObjectName(\"refColList\")\n self.refereeCol = QtWidgets.QLabel(Dialog)\n self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))\n self.refereeCol.setObjectName(\"refereeCol\")\n self.raddButton = QtWidgets.QPushButton(Dialog)\n self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))\n self.raddButton.setObjectName(\"raddButton\")\n self.rremoveButton = QtWidgets.QPushButton(Dialog)\n self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))\n self.rremoveButton.setObjectName(\"rremoveButton\")\n self.wsortitionButton = QtWidgets.QPushButton(Dialog)\n self.wsortitionButton.setEnabled(True)\n self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))\n self.wsortitionButton.setAutoDefault(True)\n self.wsortitionButton.setDefault(False)\n self.wsortitionButton.setFlat(False)\n self.wsortitionButton.setObjectName(\"wsortitionButton\")\n self.divrings = QtWidgets.QCheckBox(Dialog)\n self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))\n self.divrings.setObjectName(\"divrings\")\n self.weightcatCBox = QtWidgets.QComboBox(Dialog)\n self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))\n self.weightcatCBox.setObjectName(\"weightcatCBox\")\n self.weigthcat = QtWidgets.QLabel(Dialog)\n self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))\n self.weigthcat.setObjectName(\"weigthcat\")\n self.round = QtWidgets.QLabel(Dialog)\n self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))\n self.round.setObjectName(\"round\")\n self.stage = QtWidgets.QLabel(Dialog)\n self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))\n self.stage.setObjectName(\"stage\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.nameEdit, self.cityEdit)\n Dialog.setTabOrder(self.cityEdit, self.startDate)\n Dialog.setTabOrder(self.startDate, self.endDate)\n Dialog.setTabOrder(self.endDate, self.meetCountEdit)\n Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)\n Dialog.setTabOrder(self.weightcatCBox, self.divrings)\n Dialog.setTabOrder(self.divrings, self.athletesList)\n Dialog.setTabOrder(self.athletesList, self.addButton)\n Dialog.setTabOrder(self.addButton, self.removeButton)\n Dialog.setTabOrder(self.removeButton, self.membersList)\n Dialog.setTabOrder(self.membersList, self.sortitionButton)\n Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)\n Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)\n Dialog.setTabOrder(self.mainclerkCBox, self.refList)\n Dialog.setTabOrder(self.refList, self.raddButton)\n Dialog.setTabOrder(self.raddButton, self.rremoveButton)\n Dialog.setTabOrder(self.rremoveButton, self.refColList)\n Dialog.setTabOrder(self.refColList, self.wsortitionButton)\n Dialog.setTabOrder(self.wsortitionButton, self.cancel)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Создание соревнования\"))\n self.start.setText(_translate(\"Dialog\", \"Начало\"))\n self.startDate.setDisplayFormat(_translate(\"Dialog\", \"dd.MM.yyyy\"))\n self.end.setText(_translate(\"Dialog\", \"Окончание\"))\n self.endDate.setDisplayFormat(_translate(\"Dialog\", \"dd.MM.yyyy\"))\n self.name.setText(_translate(\"Dialog\", \"Название\"))\n self.athletes.setText(_translate(\"Dialog\", \"Список спортсменов\"))\n self.addButton.setText(_translate(\"Dialog\", \">>\"))\n self.removeButton.setText(_translate(\"Dialog\", \"<<\"))\n self.members.setText(_translate(\"Dialog\", \"Список участников\"))\n self.meetCount.setText(_translate(\"Dialog\", \"Число боев в день\"))\n self.sortitionButton.setText(_translate(\"Dialog\", \"Жеребьевка\"))\n self.cancel.setText(_translate(\"Dialog\", \"Отмена\"))\n self.city.setText(_translate(\"Dialog\", \"Место проведения\"))\n self.main_referee.setText(_translate(\"Dialog\", \"Главный судья\"))\n self.main_clerk.setText(_translate(\"Dialog\", \"Главный секретарь\"))\n self.refereeList.setText(_translate(\"Dialog\", \"Список судей\"))\n self.refereeCol.setText(_translate(\"Dialog\", \"Судейская коллегия\"))\n self.raddButton.setText(_translate(\"Dialog\", \">>\"))\n self.rremoveButton.setText(_translate(\"Dialog\", \"<<\"))\n self.wsortitionButton.setText(_translate(\"Dialog\", \"Без жеребьевки\"))\n self.divrings.setText(_translate(\"Dialog\", \"Разбивать по рингам\"))\n self.weigthcat.setText(_translate(\"Dialog\", \"Весовая категория\"))\n self.round.setText(_translate(\"Dialog\", \"раунд\"))\n self.stage.setText(_translate(\"Dialog\", \"стадия\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sys
import networkx as nx
import bcube.generator_bcube as bcube
import dcell.generate_dcell as dcell
import fat_tree.generate_fat_tree as fatTree
import cayley_graphs.generate_bubble_sort as bubbleSort
import cayley_graphs.generate_hypercube as hypercube
import cayley_graphs.generate_pancake as pancake
import cayley_graphs.generate_transposition as transposition
import cayley_graphs.generate_star as star
import cayley_graphs.generate_butterfly as butterfly
import slim_fly.generate_slim_fly as slimFly
graph_name = sys.argv[1]
p = int(sys.argv[2])
q = int(sys.argv[3])
path = 'temp/' + sys.argv[4]
#integers q, p
if graph_name == "bcube":
#constrains q < p and q < 4
G = bcube.generate_bcube(q,p)
elif graph_name == "dcell":
#constrains q < p and q < 4
G = dcell.generate_dcell(p,q)
elif graph_name == "fat_tree":
#integer p
#constrains p must be even
G = fatTree.generate_fat_tree(p)
elif graph_name == "bubble_sort":
G = bubbleSort.generate_bubble_sort(p)
elif graph_name == "hypercube":
G = hypercube.generate_hypercube(p)
elif graph_name == "pancake":
G = pancake.generate_pancake_graph(p)
elif graph_name == "transposition":
G = transposition.generate_transposition_graph(p)
elif graph_name == "star":
G = star.generate_star_graph(p)
elif graph_name == "butterfly":
G = butterfly.generate_butterfly(p)
elif graph_name == "slim_fly":
# p = 5,7,11,17,19,25,29,35,43,47,55,79
G = slimFly.generate_slim_fly(p)
edges = G.edges()
#print G.nodes(data=True)
H = nx.from_edgelist(edges)
#changing color of nodes
#H.node[1]['co']='red'
nodes = len(H.nodes(data=True))
print nodes
nx.write_graphml(H, path + str(nodes))
|
normal
|
{
"blob_id": "12d59697d5c2ec69d019c64dac762385c8c0cb66",
"index": 7224,
"step-1": "import sys\nimport networkx as nx\nimport bcube.generator_bcube as bcube\nimport dcell.generate_dcell as dcell\nimport fat_tree.generate_fat_tree as fatTree\nimport cayley_graphs.generate_bubble_sort as bubbleSort\nimport cayley_graphs.generate_hypercube as hypercube\nimport cayley_graphs.generate_pancake as pancake\nimport cayley_graphs.generate_transposition as transposition\nimport cayley_graphs.generate_star as star\nimport cayley_graphs.generate_butterfly as butterfly\nimport slim_fly.generate_slim_fly as slimFly\n\ngraph_name = sys.argv[1]\np = int(sys.argv[2])\nq = int(sys.argv[3])\npath = 'temp/' + sys.argv[4]\n#integers q, p\nif graph_name == \"bcube\":\n #constrains q < p and q < 4\n G = bcube.generate_bcube(q,p)\nelif graph_name == \"dcell\":\n #constrains q < p and q < 4\n G = dcell.generate_dcell(p,q)\nelif graph_name == \"fat_tree\":\n #integer p\n #constrains p must be even\n G = fatTree.generate_fat_tree(p)\nelif graph_name == \"bubble_sort\":\n G = bubbleSort.generate_bubble_sort(p)\nelif graph_name == \"hypercube\":\n G = hypercube.generate_hypercube(p)\nelif graph_name == \"pancake\":\n G = pancake.generate_pancake_graph(p)\nelif graph_name == \"transposition\":\n G = transposition.generate_transposition_graph(p)\nelif graph_name == \"star\":\n G = star.generate_star_graph(p)\nelif graph_name == \"butterfly\":\n G = butterfly.generate_butterfly(p)\nelif graph_name == \"slim_fly\":\n # p = 5,7,11,17,19,25,29,35,43,47,55,79\n G = slimFly.generate_slim_fly(p)\n\nedges = G.edges()\n#print G.nodes(data=True)\nH = nx.from_edgelist(edges)\n#changing color of nodes\n#H.node[1]['co']='red'\nnodes = len(H.nodes(data=True))\nprint nodes\nnx.write_graphml(H, path + str(nodes))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
#
from django.core.paginator import Paginator
def pagination(request, queryset, display_amount=15, after_range_num=5, bevor_range_num=4):
# 按参数分页
paginator = Paginator(queryset, display_amount)
try:
# 得到request中的page参数
page = int(request.GET['page'])
except:
# 默认为1
page = 1
try:
# 尝试获得分页列表
objects = paginator.page(page)
# 如果页数不存在
except paginator.EmptyPage:
# 获得最后一页
objects = paginator.page(paginator.num_pages)
# 如果不是一个整数
except:
# 获得第一页
objects = paginator.page(1)
# 根据参数配置导航显示范围
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+bevor_range_num]
else:
page_range = paginator.page_range[0:page+bevor_range_num]
return objects, page_range
|
normal
|
{
"blob_id": "7a2b33d1763e66335c6a72a35082e20725cab03d",
"index": 3318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5,\n bevor_range_num=4):\n paginator = Paginator(queryset, display_amount)\n try:\n page = int(request.GET['page'])\n except:\n page = 1\n try:\n objects = paginator.page(page)\n except paginator.EmptyPage:\n objects = paginator.page(paginator.num_pages)\n except:\n objects = paginator.page(1)\n if page >= after_range_num:\n page_range = paginator.page_range[page - after_range_num:page +\n bevor_range_num]\n else:\n page_range = paginator.page_range[0:page + bevor_range_num]\n return objects, page_range\n",
"step-3": "from django.core.paginator import Paginator\n\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5,\n bevor_range_num=4):\n paginator = Paginator(queryset, display_amount)\n try:\n page = int(request.GET['page'])\n except:\n page = 1\n try:\n objects = paginator.page(page)\n except paginator.EmptyPage:\n objects = paginator.page(paginator.num_pages)\n except:\n objects = paginator.page(1)\n if page >= after_range_num:\n page_range = paginator.page_range[page - after_range_num:page +\n bevor_range_num]\n else:\n page_range = paginator.page_range[0:page + bevor_range_num]\n return objects, page_range\n",
"step-4": "# -*- coding:utf-8 -*-\n#\nfrom django.core.paginator import Paginator\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5, bevor_range_num=4):\n # 按参数分页\n paginator = Paginator(queryset, display_amount)\n try:\n # 得到request中的page参数\n page = int(request.GET['page'])\n except:\n # 默认为1\n page = 1\n try:\n # 尝试获得分页列表\n objects = paginator.page(page)\n # 如果页数不存在\n except paginator.EmptyPage:\n # 获得最后一页\n objects = paginator.page(paginator.num_pages)\n # 如果不是一个整数\n except:\n # 获得第一页\n objects = paginator.page(1)\n # 根据参数配置导航显示范围\n if page >= after_range_num:\n page_range = paginator.page_range[page-after_range_num:page+bevor_range_num]\n else:\n page_range = paginator.page_range[0:page+bevor_range_num]\n return objects, page_range\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import oneflow as flow
import torch
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
|
normal
|
{
"blob_id": "8a3cf65550893367b9001369111fa19a3e998d82",
"index": 9589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-3": "import oneflow as flow\nimport torch\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-4": "import oneflow as flow\nimport torch\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if \"num_batches_tracked\" not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""byte - property model module."""
from __future__ import absolute_import, division, print_function
class BaseProperty(object):
"""Base class for properties."""
def get(self, obj):
"""Get property value from object.
:param obj: Item
:type obj: byte.model.Model
"""
raise NotImplementedError
def set(self, obj, value):
"""Set property value on object.
:param obj: Item
:type obj: byte.model.Model
:param value: Value
:type value: any
"""
raise NotImplementedError
|
normal
|
{
"blob_id": "382f7119beba81087c497baf170eb6814c26c03e",
"index": 5458,
"step-1": "<mask token>\n\n\nclass BaseProperty(object):\n <mask token>\n <mask token>\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-2": "<mask token>\n\n\nclass BaseProperty(object):\n <mask token>\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-3": "<mask token>\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-5": "\"\"\"byte - property model module.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\n\nclass BaseProperty(object):\n \"\"\"Base class for properties.\"\"\"\n\n def get(self, obj):\n \"\"\"Get property value from object.\n\n :param obj: Item\n :type obj: byte.model.Model\n \"\"\"\n raise NotImplementedError\n\n def set(self, obj, value):\n \"\"\"Set property value on object.\n\n :param obj: Item\n :type obj: byte.model.Model\n\n :param value: Value\n :type value: any\n \"\"\"\n raise NotImplementedError\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File: software/jetson/fastmot/utils/sot.py
# By: Samuel Duclos
# For: Myself
# Description: This file returns detection results from an image.
from cvlib.object_detection import draw_bbox
class ObjectCenter(object):
def __init__(self, args):
"""Initialize variables."""
self.args = args
def load_classes(self, path):
with open(path, 'r') as names_file:
names = names_file.read().split('\n')
return list(filter(None, names))
def _filter_(self, frame, predictions):
"""Apply object detection."""
if not self.args.no_filter_object_category:
names = self.load_classes(self.args.names)
object_category = names.index(self.args.object_category)
predictions = self.filter_inference_results(predictions,
object_category=object_category)
return predictions
def filter_inference_results(self, predictions, object_category='person'):
"""Return bounding box of biggest object of selected category."""
if predictions is not None and len(predictions) == 3:
bboxes, labels, confs = predictions
# Only return bounding boxes for the selected object category.
category_bboxes = [(bbox,
label,
conf) for (bbox,
label,
conf) in zip(bboxes,
labels,
confs) if (label == object_category).any()]
if len(category_bboxes) > 0:
# Choose biggest object of selected category.
biggest_bbox = None
biggest_label = None
biggest_conf = None
most_pixels = 0
for (bbox, label, conf) in category_bboxes:
(x, y, w, h) = bbox
n_pixels = w * h
if n_pixels > most_pixels:
most_pixels = n_pixels
biggest_bbox = bbox
biggest_label = label
biggest_conf = conf
category_bboxes = ([biggest_bbox], [biggest_label], [biggest_conf])
predictions = category_bboxes
return predictions
def update(self, predictions, frame, frameCenter):
"""Asynchronous update of detection results to return object center."""
if len(predictions) > 0:
(x, y, w, h) = predictions[0][0]
objectX = int(x + (w / 2.0))
objectY = int(y + (h / 2.0))
return ((objectX, objectY), predictions)
else:
return (frameCenter, None)
def filter_objects(self, frame, predictions, object_x=None, object_y=None, center_x=None, center_y=None):
"""Apply object detection."""
predictions = self._filter_(frame, predictions)
if predictions is not None and len(predictions) > 0:
if predictions[0][0] is not None and len(predictions) == 3:
bbox, label, conf = predictions[0][0]
# Calculate the center of the frame since we will be trying to keep the object there.
(H, W) = frame.shape[:2]
center_x.value = W // 2
center_y.value = H // 2
object_location = self.update(predictions, frame, (center_x.value, center_y.value))
((object_x.value, object_y.value), predictions) = object_location
if self.args.no_show:
return None
else:
# Draw bounding box over detected objects.
inferred_image = draw_bbox(frame, bbox, label, conf, write_conf=True)
return inferred_image
|
normal
|
{
"blob_id": "8f14bbab8b2a4bc0758c6b48feb20f8b0e3e348b",
"index": 5460,
"step-1": "<mask token>\n\n\nclass ObjectCenter(object):\n\n def __init__(self, args):\n \"\"\"Initialize variables.\"\"\"\n self.args = args\n\n def load_classes(self, path):\n with open(path, 'r') as names_file:\n names = names_file.read().split('\\n')\n return list(filter(None, names))\n <mask token>\n <mask token>\n <mask token>\n\n def filter_objects(self, frame, predictions, object_x=None, object_y=\n None, center_x=None, center_y=None):\n \"\"\"Apply object detection.\"\"\"\n predictions = self._filter_(frame, predictions)\n if predictions is not None and len(predictions) > 0:\n if predictions[0][0] is not None and len(predictions) == 3:\n bbox, label, conf = predictions[0][0]\n H, W = frame.shape[:2]\n center_x.value = W // 2\n center_y.value = H // 2\n object_location = self.update(predictions, frame, (center_x\n .value, center_y.value))\n (object_x.value, object_y.value), predictions = object_location\n if self.args.no_show:\n return None\n else:\n inferred_image = draw_bbox(frame, bbox, label, conf,\n write_conf=True)\n return inferred_image\n",
"step-2": "<mask token>\n\n\nclass ObjectCenter(object):\n\n def __init__(self, args):\n \"\"\"Initialize variables.\"\"\"\n self.args = args\n\n def load_classes(self, path):\n with open(path, 'r') as names_file:\n names = names_file.read().split('\\n')\n return list(filter(None, names))\n <mask token>\n <mask token>\n\n def update(self, predictions, frame, frameCenter):\n \"\"\"Asynchronous update of detection results to return object center.\"\"\"\n if len(predictions) > 0:\n x, y, w, h = predictions[0][0]\n objectX = int(x + w / 2.0)\n objectY = int(y + h / 2.0)\n return (objectX, objectY), predictions\n else:\n return frameCenter, None\n\n def filter_objects(self, frame, predictions, object_x=None, object_y=\n None, center_x=None, center_y=None):\n \"\"\"Apply object detection.\"\"\"\n predictions = self._filter_(frame, predictions)\n if predictions is not None and len(predictions) > 0:\n if predictions[0][0] is not None and len(predictions) == 3:\n bbox, label, conf = predictions[0][0]\n H, W = frame.shape[:2]\n center_x.value = W // 2\n center_y.value = H // 2\n object_location = self.update(predictions, frame, (center_x\n .value, center_y.value))\n (object_x.value, object_y.value), predictions = object_location\n if self.args.no_show:\n return None\n else:\n inferred_image = draw_bbox(frame, bbox, label, conf,\n write_conf=True)\n return inferred_image\n",
"step-3": "<mask token>\n\n\nclass ObjectCenter(object):\n\n def __init__(self, args):\n \"\"\"Initialize variables.\"\"\"\n self.args = args\n\n def load_classes(self, path):\n with open(path, 'r') as names_file:\n names = names_file.read().split('\\n')\n return list(filter(None, names))\n\n def _filter_(self, frame, predictions):\n \"\"\"Apply object detection.\"\"\"\n if not self.args.no_filter_object_category:\n names = self.load_classes(self.args.names)\n object_category = names.index(self.args.object_category)\n predictions = self.filter_inference_results(predictions,\n object_category=object_category)\n return predictions\n <mask token>\n\n def update(self, predictions, frame, frameCenter):\n \"\"\"Asynchronous update of detection results to return object center.\"\"\"\n if len(predictions) > 0:\n x, y, w, h = predictions[0][0]\n objectX = int(x + w / 2.0)\n objectY = int(y + h / 2.0)\n return (objectX, objectY), predictions\n else:\n return frameCenter, None\n\n def filter_objects(self, frame, predictions, object_x=None, object_y=\n None, center_x=None, center_y=None):\n \"\"\"Apply object detection.\"\"\"\n predictions = self._filter_(frame, predictions)\n if predictions is not None and len(predictions) > 0:\n if predictions[0][0] is not None and len(predictions) == 3:\n bbox, label, conf = predictions[0][0]\n H, W = frame.shape[:2]\n center_x.value = W // 2\n center_y.value = H // 2\n object_location = self.update(predictions, frame, (center_x\n .value, center_y.value))\n (object_x.value, object_y.value), predictions = object_location\n if self.args.no_show:\n return None\n else:\n inferred_image = draw_bbox(frame, bbox, label, conf,\n write_conf=True)\n return inferred_image\n",
"step-4": "from cvlib.object_detection import draw_bbox\n\n\nclass ObjectCenter(object):\n\n def __init__(self, args):\n \"\"\"Initialize variables.\"\"\"\n self.args = args\n\n def load_classes(self, path):\n with open(path, 'r') as names_file:\n names = names_file.read().split('\\n')\n return list(filter(None, names))\n\n def _filter_(self, frame, predictions):\n \"\"\"Apply object detection.\"\"\"\n if not self.args.no_filter_object_category:\n names = self.load_classes(self.args.names)\n object_category = names.index(self.args.object_category)\n predictions = self.filter_inference_results(predictions,\n object_category=object_category)\n return predictions\n\n def filter_inference_results(self, predictions, object_category='person'):\n \"\"\"Return bounding box of biggest object of selected category.\"\"\"\n if predictions is not None and len(predictions) == 3:\n bboxes, labels, confs = predictions\n category_bboxes = [(bbox, label, conf) for bbox, label, conf in\n zip(bboxes, labels, confs) if (label == object_category).any()]\n if len(category_bboxes) > 0:\n biggest_bbox = None\n biggest_label = None\n biggest_conf = None\n most_pixels = 0\n for bbox, label, conf in category_bboxes:\n x, y, w, h = bbox\n n_pixels = w * h\n if n_pixels > most_pixels:\n most_pixels = n_pixels\n biggest_bbox = bbox\n biggest_label = label\n biggest_conf = conf\n category_bboxes = [biggest_bbox], [biggest_label], [\n biggest_conf]\n predictions = category_bboxes\n return predictions\n\n def update(self, predictions, frame, frameCenter):\n \"\"\"Asynchronous update of detection results to return object center.\"\"\"\n if len(predictions) > 0:\n x, y, w, h = predictions[0][0]\n objectX = int(x + w / 2.0)\n objectY = int(y + h / 2.0)\n return (objectX, objectY), predictions\n else:\n return frameCenter, None\n\n def filter_objects(self, frame, predictions, object_x=None, object_y=\n None, center_x=None, center_y=None):\n \"\"\"Apply object detection.\"\"\"\n predictions = self._filter_(frame, predictions)\n if predictions is not None and len(predictions) > 0:\n if predictions[0][0] is not None and len(predictions) == 3:\n bbox, label, conf = predictions[0][0]\n H, W = frame.shape[:2]\n center_x.value = W // 2\n center_y.value = H // 2\n object_location = self.update(predictions, frame, (center_x\n .value, center_y.value))\n (object_x.value, object_y.value), predictions = object_location\n if self.args.no_show:\n return None\n else:\n inferred_image = draw_bbox(frame, bbox, label, conf,\n write_conf=True)\n return inferred_image\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# File: software/jetson/fastmot/utils/sot.py\n# By: Samuel Duclos\n# For: Myself\n# Description: This file returns detection results from an image.\n\nfrom cvlib.object_detection import draw_bbox\n\nclass ObjectCenter(object):\n def __init__(self, args):\n \"\"\"Initialize variables.\"\"\"\n self.args = args\n\n def load_classes(self, path):\n with open(path, 'r') as names_file:\n names = names_file.read().split('\\n')\n return list(filter(None, names))\n\n def _filter_(self, frame, predictions):\n \"\"\"Apply object detection.\"\"\"\n\n if not self.args.no_filter_object_category:\n names = self.load_classes(self.args.names)\n object_category = names.index(self.args.object_category)\n predictions = self.filter_inference_results(predictions, \n object_category=object_category)\n\n return predictions\n\n def filter_inference_results(self, predictions, object_category='person'):\n \"\"\"Return bounding box of biggest object of selected category.\"\"\"\n if predictions is not None and len(predictions) == 3:\n bboxes, labels, confs = predictions\n\n # Only return bounding boxes for the selected object category.\n category_bboxes = [(bbox, \n label, \n conf) for (bbox, \n label, \n conf) in zip(bboxes, \n labels, \n confs) if (label == object_category).any()]\n\n if len(category_bboxes) > 0:\n # Choose biggest object of selected category.\n biggest_bbox = None\n biggest_label = None\n biggest_conf = None\n most_pixels = 0\n\n for (bbox, label, conf) in category_bboxes:\n (x, y, w, h) = bbox\n n_pixels = w * h\n\n if n_pixels > most_pixels:\n most_pixels = n_pixels\n biggest_bbox = bbox\n biggest_label = label\n biggest_conf = conf\n\n category_bboxes = ([biggest_bbox], [biggest_label], [biggest_conf])\n\n predictions = category_bboxes\n\n return predictions\n\n def update(self, predictions, frame, frameCenter):\n \"\"\"Asynchronous update of detection results to return object center.\"\"\"\n if len(predictions) > 0:\n (x, y, w, h) = predictions[0][0]\n objectX = int(x + (w / 2.0))\n objectY = int(y + (h / 2.0))\n return ((objectX, objectY), predictions)\n\n else:\n return (frameCenter, None)\n\n def filter_objects(self, frame, predictions, object_x=None, object_y=None, center_x=None, center_y=None):\n \"\"\"Apply object detection.\"\"\"\n\n predictions = self._filter_(frame, predictions)\n\n if predictions is not None and len(predictions) > 0:\n if predictions[0][0] is not None and len(predictions) == 3:\n bbox, label, conf = predictions[0][0]\n\n # Calculate the center of the frame since we will be trying to keep the object there.\n (H, W) = frame.shape[:2]\n center_x.value = W // 2\n center_y.value = H // 2\n\n object_location = self.update(predictions, frame, (center_x.value, center_y.value))\n ((object_x.value, object_y.value), predictions) = object_location\n\n if self.args.no_show:\n return None\n\n else:\n # Draw bounding box over detected objects.\n inferred_image = draw_bbox(frame, bbox, label, conf, write_conf=True)\n return inferred_image\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
#!/usr/bin/env conda-execute
# conda execute
# env:
# - python >=3
# - requests
# run_with: python
from configparser import NoOptionError
from configparser import SafeConfigParser
import argparse
import base64
import inspect
import ipaddress
import json
import logging
import logging.config
import os
import socket
import sys
import time
import requests
requests.packages.urllib3.disable_warnings()
""" McAfee ESM <=> ServiceNow
This script can be called as an alarm action on the McAfee ESM to send data
to ServiceNow via the API to create tickets. Optionally, ticket data is
transmitted back to the ESM via syslog and referenced as an event. The event
allows for contextual linking directly to the ticket from the ESM.
The script requires Python 3 and was tested with 3.5.2 for Windows and Linux.
Other modules, requests and configparser, are also required.
The script requires a config.ini file for the credentials. The filename and
path can be set from the command line.
An example config.ini is available at:
https://raw.githubusercontent.com/andywalden/mfe2snow/config.ini
Example:
$ python mfe2snow.py alarm="This is my alarm" severity="50"
This is intended to be called as an alarm action to Execute a Script. In the ESM,
go to System Properties | Profile Management | Remote Commands and add a profile for
"Create ServiceNow Ticket". The script can be called using any combination of fields and
values however 'alarm', 'eventdescription', 'severity', 'sourceip' and 'destip' are
mapped to ServiceNow fields. Remaining fields=values are mapped to SNOW field
"Additional Info".
This is an example of the script being called:
mfe2snow.py alarm="[$Alarm Name]" eventdescription="[$Rule Message]" severity="[$Average Severity]"
devicename="[$Device Name]" message_key="[$Event ID]" category="[$Normalized Rule]" sourceip="[$Source IP]"
destip="[$Destination IP]" sourceport="[$Source Port]" destport="[$Destination Port]" host="[$%HostID]"
domain="[$%DomainID]" command="[$%CommandID]" object="[$%ObjectID]" application="[$%AppID]"
deviceaction="[$%Device_Action]" targetuser="[$%UserIDDst]" threatcategory="[$%Threat_Category]"
threathandled="[$%Threat_Handled]" geosrc="[$Geolocation Source]" geodest="[$Geolocation Destination]"
The output is also written to a file that is overwritten each time the script is run.
Make sure the permissions on the config.ini file are secure as not to expose any credentials.
"""
__author__ = "Andy Walden"
__version__ = "1.2"
class Args(object):
"""
Handles any args and passes them back as a dict
"""
def __init__(self, args):
self.log_levels = ["quiet", "error", "warning", "info", "debug"]
self.formatter_class = argparse.RawDescriptionHelpFormatter
self.parser = argparse.ArgumentParser(
formatter_class=self.formatter_class,
description="Send McAfee ESM Alarm data to ServiceNow"
)
self.args = args
self.parser.add_argument("-v", "--version",
action="version",
help="Show version",
version="%(prog)s {}".format(__version__))
self.parser.add_argument("-l", "--level",
default=None, dest="level",
choices=self.log_levels, metavar='',
help="Logging output level. Default: warning")
self.parser.add_argument("-c", "--config",
default=None, dest="cfgfile", metavar='',
help="Path to config file. Default: config.ini")
self.parser.add_argument("fields", nargs='*', metavar='',
help="Key=Values for the query. Example: \n \
alarm=\"The milk has spilled\" sourceip=\"1.1.1.1\", destip=\"2.2.2.2\" \
The following keys are mapped to fields in SNOW: \
alarm - Description \
sourceip/destip - Node \
severity - Severity,
recordid = Message_Key")
self.pargs = self.parser.parse_args()
def get_args(self):
return self.pargs
class Config(object):
""" Creates object for provided configfile/section settings """
def __init__(self, filename, header):
config = SafeConfigParser()
cfgfile = config.read(filename)
if not cfgfile:
raise ValueError('Config file not found:', filename)
self.__dict__.update(config.items(header))
def logging_init():
filename = get_filename()
logfile = filename + ".log"
hostname = socket.gethostname()
formatter = logging.Formatter('%(asctime)s {} %(module)s: %(message)s'.format(hostname),
datefmt='%b %d %H:%M:%S')
logger = logging.getLogger()
fh = logging.FileHandler(logfile, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_filename():
filename = (inspect.getfile(inspect.currentframe()).split("\\", -1)[-1]).rsplit(".", 1)[0]
return filename
class Syslog(object):
"""
Open TCP socket using supplied server IP and port.
Returns socket or None on failure
"""
def __init__(self,
server,
port=514):
logging.debug("Function: open_socket: %s: %s", server, port)
self.server = server
self.port = int(port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((self.server, self.port))
def send(self, data):
"""
Sends data to the established connection
"""
self.data = data
self.sock.sendall(data.encode())
logging.info("Syslog feedback sent")
class SNOW(object):
"""
Send to ServiceNow API
Initialize with host, user and passwd to create connection.
send() sends JSON query to SNOW.
"""
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = passwd
self.url = "https://" + host
self.auth_string = '{}'.format(base64.b64encode('{}:{}'
.format(user,passwd)
.encode('utf-8'))
.decode('ascii'))
self.headers = {'Authorization':'Basic '+ self.auth_string, 'Content-Type': 'application/json'}
def send(self, query_conf, uri_string):
"""
Sends URI method and JSON query string
Runs query and returns result object.
"""
self.query_conf = query_conf
self.uri_string = uri_string
result = requests.post(self.url + self.uri_string,
headers=self.headers,
data=query_conf, verify=False)
if result.status_code != 200:
logging.error("SNOW said: Status Code: %s, Headers: %s, \
Mesg: %s", result.status_code, result.headers,
result.json())
sys.exit(1)
return result
class Query(object):
"""
Returns JSON query from provided dict
"""
def __init__(self):
self.qconf = []
def create(self, **kwargs):
self.query_dict = kwargs
self.alarm = self.query_dict.pop('alarm', 'McAfee ESM Alarm')
self.node = self.query_dict.pop('node', '0.0.0.0')
self.severity = self.query_dict.pop('severity', '25')
self.id = self.query_dict.pop('id', "No key")
self.info = ", ".join(["=".join([key, str(val)])
for key, val in self.query_dict.items()])
self.qconf = {
"active" : "false",
"classification" : "1",
"description" : self.alarm,
"source" : "McAfee ESM",
"node" : self.node,
"type" : "Security" ,
"message_key" : "id",
"additional_info" : self.info,
"severity" : self.severity,
"state" : "Ready",
"sys_class_name" : "em_event",
"sys_created_by" : "mcafee.integration"
}
return(json.dumps(self.qconf))
def main():
""" Main function """
# Process any command line args
args = Args(sys.argv)
pargs = args.get_args()
logging_init()
if pargs.level:
logging.getLogger().setLevel(getattr(logging, pargs.level.upper()))
try:
fields = dict(x.split('=', 1) for x in pargs.fields)
except ValueError:
logging.error("Invalid input. Format is field=value")
sys.exit(1)
configfile = pargs.cfgfile if pargs.cfgfile else 'config.ini'
try:
c = Config(configfile, "DEFAULT")
except ValueError:
logging.error("Config file not found: %s", configfile)
sys.exit(1)
# Strip empty values
fields = {k:v for k,v in fields.items() if v is not None}
# Figure out which IP should be 'node'
destip = fields.get('destip', None)
sourceip = fields.get('sourceip', None)
if sourceip:
for subnet in homenet:
if ipaddress.ip_address(sourceip) in ipaddress.ip_network(subnet):
fields['node'] = sourceip
elif ipaddress.ip_address(destip) in ipaddress.ip_network(subnet):
fields['node'] = destip
else:
fields['node'] = sourceip
# Check for severity in arguments. Map ESM severity (1-100) to SNOW (1-5)
s = int(fields.get('severity', 25))
if 90 <= s <= 100: fields['severity'] = 1 # Critical
if 75 <= s <= 89: fields['severity'] = 2 # Major
if 65 <= s <= 74: fields['severity'] = 3 # Minor
if 50 <= s <= 64: fields['severity'] = 4 # Warning
if 0 <= s <= 49: fields['severity'] = 5 # Info
try:
snowhost = SNOW(c.snowhost, c.snowuser, c.snowpass)
except AttributeError:
print("{} is missing a required field:".format(configfile))
raise
sys.exit(1)
new_ticket = Query()
new_ticket_q = new_ticket.create(**fields)
result = snowhost.send(new_ticket_q, '/api/now/table/em_event')
# Syslog feedback to ESM
try:
syslog_host = c.get('sysloghost')
syslog_port = c.get('syslogport')
syslog = Syslog(syslog_host, syslog_port)
syslog.send(result.text)
except NoOptionError:
logging.debug("Syslog feedback disabled. Settings not detected.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logging.warning("Control-C Pressed, stopping...")
sys.exit()
|
normal
|
{
"blob_id": "dd91ba13177aefacc24ef4a004acae0bffafadf0",
"index": 8889,
"step-1": "#!/usr/bin/env conda-execute\r\n\r\n# conda execute\r\n# env:\r\n# - python >=3\r\n# - requests\r\n# run_with: python\r\n\r\nfrom configparser import NoOptionError\r\nfrom configparser import SafeConfigParser\r\nimport argparse\r\nimport base64\r\nimport inspect\r\nimport ipaddress\r\nimport json\r\nimport logging\r\nimport logging.config\r\nimport os\r\nimport socket\r\nimport sys\r\nimport time\r\nimport requests\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\n\"\"\" McAfee ESM <=> ServiceNow\r\n\r\nThis script can be called as an alarm action on the McAfee ESM to send data\r\nto ServiceNow via the API to create tickets. Optionally, ticket data is\r\ntransmitted back to the ESM via syslog and referenced as an event. The event\r\nallows for contextual linking directly to the ticket from the ESM.\r\n\r\nThe script requires Python 3 and was tested with 3.5.2 for Windows and Linux.\r\n\r\nOther modules, requests and configparser, are also required.\r\n\r\nThe script requires a config.ini file for the credentials. The filename and\r\npath can be set from the command line.\r\n\r\n\r\nAn example config.ini is available at:\r\nhttps://raw.githubusercontent.com/andywalden/mfe2snow/config.ini\r\n\r\nExample:\r\n\r\n $ python mfe2snow.py alarm=\"This is my alarm\" severity=\"50\"\r\n\r\nThis is intended to be called as an alarm action to Execute a Script. In the ESM,\r\ngo to System Properties | Profile Management | Remote Commands and add a profile for\r\n\"Create ServiceNow Ticket\". The script can be called using any combination of fields and\r\nvalues however 'alarm', 'eventdescription', 'severity', 'sourceip' and 'destip' are\r\nmapped to ServiceNow fields. Remaining fields=values are mapped to SNOW field\r\n\"Additional Info\".\r\n\r\nThis is an example of the script being called:\r\n\r\nmfe2snow.py alarm=\"[$Alarm Name]\" eventdescription=\"[$Rule Message]\" severity=\"[$Average Severity]\"\r\ndevicename=\"[$Device Name]\" message_key=\"[$Event ID]\" category=\"[$Normalized Rule]\" sourceip=\"[$Source IP]\"\r\ndestip=\"[$Destination IP]\" sourceport=\"[$Source Port]\" destport=\"[$Destination Port]\" host=\"[$%HostID]\"\r\ndomain=\"[$%DomainID]\" command=\"[$%CommandID]\" object=\"[$%ObjectID]\" application=\"[$%AppID]\"\r\ndeviceaction=\"[$%Device_Action]\" targetuser=\"[$%UserIDDst]\" threatcategory=\"[$%Threat_Category]\"\r\nthreathandled=\"[$%Threat_Handled]\" geosrc=\"[$Geolocation Source]\" geodest=\"[$Geolocation Destination]\"\r\n\r\nThe output is also written to a file that is overwritten each time the script is run.\r\n\r\nMake sure the permissions on the config.ini file are secure as not to expose any credentials.\r\n\r\n\"\"\"\r\n\r\n__author__ = \"Andy Walden\"\r\n__version__ = \"1.2\"\r\n\r\nclass Args(object):\r\n \"\"\"\r\n Handles any args and passes them back as a dict\r\n \"\"\"\r\n\r\n def __init__(self, args):\r\n self.log_levels = [\"quiet\", \"error\", \"warning\", \"info\", \"debug\"]\r\n self.formatter_class = argparse.RawDescriptionHelpFormatter\r\n self.parser = argparse.ArgumentParser(\r\n formatter_class=self.formatter_class,\r\n description=\"Send McAfee ESM Alarm data to ServiceNow\"\r\n )\r\n self.args = args\r\n\r\n self.parser.add_argument(\"-v\", \"--version\",\r\n action=\"version\",\r\n help=\"Show version\",\r\n version=\"%(prog)s {}\".format(__version__))\r\n\r\n self.parser.add_argument(\"-l\", \"--level\",\r\n default=None, dest=\"level\",\r\n choices=self.log_levels, metavar='',\r\n help=\"Logging output level. Default: warning\")\r\n\r\n self.parser.add_argument(\"-c\", \"--config\",\r\n default=None, dest=\"cfgfile\", metavar='',\r\n help=\"Path to config file. Default: config.ini\")\r\n\r\n self.parser.add_argument(\"fields\", nargs='*', metavar='',\r\n\r\n help=\"Key=Values for the query. Example: \\n \\\r\n alarm=\\\"The milk has spilled\\\" sourceip=\\\"1.1.1.1\\\", destip=\\\"2.2.2.2\\\" \\\r\n The following keys are mapped to fields in SNOW: \\\r\n alarm - Description \\\r\n sourceip/destip - Node \\\r\n severity - Severity,\r\n recordid = Message_Key\")\r\n\r\n self.pargs = self.parser.parse_args()\r\n\r\n def get_args(self):\r\n return self.pargs\r\n\r\n\r\nclass Config(object):\r\n \"\"\" Creates object for provided configfile/section settings \"\"\"\r\n\r\n def __init__(self, filename, header):\r\n config = SafeConfigParser()\r\n cfgfile = config.read(filename)\r\n if not cfgfile:\r\n raise ValueError('Config file not found:', filename)\r\n self.__dict__.update(config.items(header))\r\n\r\n\r\ndef logging_init():\r\n filename = get_filename()\r\n logfile = filename + \".log\"\r\n hostname = socket.gethostname()\r\n formatter = logging.Formatter('%(asctime)s {} %(module)s: %(message)s'.format(hostname),\r\n datefmt='%b %d %H:%M:%S')\r\n logger = logging.getLogger()\r\n fh = logging.FileHandler(logfile, mode='w')\r\n fh.setFormatter(formatter)\r\n logger.addHandler(fh)\r\n ch = logging.StreamHandler()\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n\r\ndef get_filename():\r\n filename = (inspect.getfile(inspect.currentframe()).split(\"\\\\\", -1)[-1]).rsplit(\".\", 1)[0]\r\n return filename\r\n\r\n\r\nclass Syslog(object):\r\n \"\"\"\r\n Open TCP socket using supplied server IP and port.\r\n\r\n Returns socket or None on failure\r\n \"\"\"\r\n\r\n def __init__(self,\r\n server,\r\n port=514):\r\n logging.debug(\"Function: open_socket: %s: %s\", server, port)\r\n self.server = server\r\n self.port = int(port)\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.sock.connect((self.server, self.port))\r\n\r\n def send(self, data):\r\n \"\"\"\r\n Sends data to the established connection\r\n \"\"\"\r\n\r\n self.data = data\r\n self.sock.sendall(data.encode())\r\n logging.info(\"Syslog feedback sent\")\r\n\r\n\r\nclass SNOW(object):\r\n \"\"\"\r\n Send to ServiceNow API\r\n Initialize with host, user and passwd to create connection.\r\n send() sends JSON query to SNOW.\r\n \"\"\"\r\n\r\n def __init__(self, host, user, passwd):\r\n self.host = host\r\n self.user = user\r\n self.passwd = passwd\r\n self.url = \"https://\" + host\r\n\r\n self.auth_string = '{}'.format(base64.b64encode('{}:{}'\r\n .format(user,passwd)\r\n .encode('utf-8'))\r\n .decode('ascii'))\r\n\r\n self.headers = {'Authorization':'Basic '+ self.auth_string, 'Content-Type': 'application/json'}\r\n\r\n\r\n def send(self, query_conf, uri_string):\r\n \"\"\"\r\n Sends URI method and JSON query string\r\n Runs query and returns result object.\r\n \"\"\"\r\n\r\n self.query_conf = query_conf\r\n self.uri_string = uri_string\r\n result = requests.post(self.url + self.uri_string,\r\n headers=self.headers,\r\n data=query_conf, verify=False)\r\n\r\n if result.status_code != 200:\r\n logging.error(\"SNOW said: Status Code: %s, Headers: %s, \\\r\n Mesg: %s\", result.status_code, result.headers,\r\n result.json())\r\n sys.exit(1)\r\n return result\r\n\r\nclass Query(object):\r\n \"\"\"\r\n Returns JSON query from provided dict\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.qconf = []\r\n\r\n def create(self, **kwargs):\r\n self.query_dict = kwargs\r\n self.alarm = self.query_dict.pop('alarm', 'McAfee ESM Alarm')\r\n self.node = self.query_dict.pop('node', '0.0.0.0')\r\n self.severity = self.query_dict.pop('severity', '25')\r\n self.id = self.query_dict.pop('id', \"No key\")\r\n self.info = \", \".join([\"=\".join([key, str(val)])\r\n for key, val in self.query_dict.items()])\r\n\r\n self.qconf = {\r\n \"active\" : \"false\",\r\n \"classification\" : \"1\",\r\n \"description\" : self.alarm,\r\n \"source\" : \"McAfee ESM\",\r\n \"node\" : self.node,\r\n \"type\" : \"Security\" ,\r\n \"message_key\" : \"id\",\r\n \"additional_info\" : self.info,\r\n \"severity\" : self.severity,\r\n \"state\" : \"Ready\",\r\n \"sys_class_name\" : \"em_event\",\r\n \"sys_created_by\" : \"mcafee.integration\"\r\n }\r\n\r\n return(json.dumps(self.qconf))\r\n\r\n\r\n\r\ndef main():\r\n \"\"\" Main function \"\"\"\r\n\r\n # Process any command line args\r\n args = Args(sys.argv)\r\n pargs = args.get_args()\r\n\r\n logging_init()\r\n\r\n if pargs.level:\r\n logging.getLogger().setLevel(getattr(logging, pargs.level.upper()))\r\n\r\n try:\r\n fields = dict(x.split('=', 1) for x in pargs.fields)\r\n except ValueError:\r\n logging.error(\"Invalid input. Format is field=value\")\r\n sys.exit(1)\r\n\r\n configfile = pargs.cfgfile if pargs.cfgfile else 'config.ini'\r\n try:\r\n c = Config(configfile, \"DEFAULT\")\r\n except ValueError:\r\n logging.error(\"Config file not found: %s\", configfile)\r\n sys.exit(1)\r\n\r\n # Strip empty values\r\n fields = {k:v for k,v in fields.items() if v is not None}\r\n\r\n # Figure out which IP should be 'node'\r\n destip = fields.get('destip', None)\r\n sourceip = fields.get('sourceip', None)\r\n if sourceip:\r\n for subnet in homenet:\r\n if ipaddress.ip_address(sourceip) in ipaddress.ip_network(subnet):\r\n fields['node'] = sourceip\r\n elif ipaddress.ip_address(destip) in ipaddress.ip_network(subnet):\r\n fields['node'] = destip\r\n else:\r\n fields['node'] = sourceip\r\n\r\n # Check for severity in arguments. Map ESM severity (1-100) to SNOW (1-5)\r\n s = int(fields.get('severity', 25))\r\n if 90 <= s <= 100: fields['severity'] = 1 # Critical\r\n if 75 <= s <= 89: fields['severity'] = 2 # Major\r\n if 65 <= s <= 74: fields['severity'] = 3 # Minor\r\n if 50 <= s <= 64: fields['severity'] = 4 # Warning\r\n if 0 <= s <= 49: fields['severity'] = 5 # Info\r\n\r\n try:\r\n snowhost = SNOW(c.snowhost, c.snowuser, c.snowpass)\r\n except AttributeError:\r\n print(\"{} is missing a required field:\".format(configfile))\r\n raise\r\n sys.exit(1)\r\n\r\n new_ticket = Query()\r\n new_ticket_q = new_ticket.create(**fields)\r\n result = snowhost.send(new_ticket_q, '/api/now/table/em_event')\r\n\r\n # Syslog feedback to ESM\r\n try:\r\n syslog_host = c.get('sysloghost')\r\n syslog_port = c.get('syslogport')\r\n syslog = Syslog(syslog_host, syslog_port)\r\n\r\n syslog.send(result.text)\r\n except NoOptionError:\r\n logging.debug(\"Syslog feedback disabled. Settings not detected.\")\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n logging.warning(\"Control-C Pressed, stopping...\")\r\n sys.exit()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
n, m, a = map(int, input().split())
top = math.ceil(n / a)
bottom = math.ceil(m / a)
print(top * bottom)
|
normal
|
{
"blob_id": "6c426d2b165e01a7cec9f7ddbd96113ae05668f6",
"index": 4898,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(top * bottom)\n",
"step-3": "<mask token>\nn, m, a = map(int, input().split())\ntop = math.ceil(n / a)\nbottom = math.ceil(m / a)\nprint(top * bottom)\n",
"step-4": "import math\nn, m, a = map(int, input().split())\ntop = math.ceil(n / a)\nbottom = math.ceil(m / a)\nprint(top * bottom)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
a = list(range(1, 501))
b = list(range(1, 501))
c = list(range(1, 501))
for i in a:
for j in b:
for k in c:
if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:
print(i)
print(j)
print(k)
break
|
normal
|
{
"blob_id": "34947b7ed300f2cbcbf9042fee3902458921d603",
"index": 2912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in a:\n for j in b:\n for k in c:\n if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:\n print(i)\n print(j)\n print(k)\n break\n",
"step-3": "a = list(range(1, 501))\nb = list(range(1, 501))\nc = list(range(1, 501))\nfor i in a:\n for j in b:\n for k in c:\n if i + k + j == 1000 and i < j < k and j ** 2 + i ** 2 == k ** 2:\n print(i)\n print(j)\n print(k)\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def selectionSort(arr, low, high):
for i in range(len(arr)):
mini = i
for j in range(i + 1, len(arr)):
if arr[mini] > arr[j]:
mini = j
arr[i], arr[mini] = arr[mini], arr[i]
return arr
|
normal
|
{
"blob_id": "c91be6cc332139c5b1e7ee5a3512482d0f8620b1",
"index": 7322,
"step-1": "<mask token>\n",
"step-2": "def selectionSort(arr, low, high):\n for i in range(len(arr)):\n mini = i\n for j in range(i + 1, len(arr)):\n if arr[mini] > arr[j]:\n mini = j\n arr[i], arr[mini] = arr[mini], arr[i]\n return arr\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'
seed = 300
n_fold = 2
epoch = 50
resume_from = None
batch_size = 32
num_workers = 32
imgsize = (768, 768) #(height, width)
loss = dict(
name='BCEWithLogitsLoss',
params=dict(),
)
optim = dict(
name='AdamW',
params=dict(
lr=0.0003,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.01,
),
)
model = dict(
name='se_resnext50_32x4d'
)
normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}
totensor = dict(name='ToTensor', params=dict(normalize=normalize))
crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
crop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
rotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))
hflip = dict(name='HorizontalFlip', params=dict(p=0.5))
'''
Additional augmentarions
------------------------
vflip = dict(name='VerticalFlip', params=dict(p=0.5,))
random_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))
#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))
#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))
#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))
hue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))
cut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))
blur = dict(name='Blur', params=dict(blur_limit=4, p=.25))
shift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))
'''
rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))
dicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))
dicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))
elastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))
grid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)
window_policy = 1
data = dict(
train=dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
n_grad_acc=2,
loader=dict(
shuffle=True,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, dicomnoise, totensor],
dataset_policy=1,
window_policy=window_policy,
),
valid = dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
test = dict(
dataset_type='CustomDataset',
annotations='./cache/test.pkl',
imgdir='./input/test_runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
)
|
normal
|
{
"blob_id": "8030bdb6c9f0b7114916d7abc245ff680d1fc917",
"index": 6790,
"step-1": "<mask token>\n",
"step-2": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\nn_fold = 2\nepoch = 50\nresume_from = None\nbatch_size = 32\nnum_workers = 32\nimgsize = 768, 768\nloss = dict(name='BCEWithLogitsLoss', params=dict())\noptim = dict(name='AdamW', params=dict(lr=0.0003, betas=(0.9, 0.999), eps=\n 1e-08, weight_decay=0.01))\nmodel = dict(name='se_resnext50_32x4d')\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=\n imgsize[1], scale=(0.7, 1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0],\n width=imgsize[1], scale=(0.7, 1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n<mask token>\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9)\n )\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=\n 0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1,\n sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\nwindow_policy = 1\ndata = dict(train=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, n_grad_acc=2, loader=dict(\n shuffle=True, batch_size=batch_size, drop_last=True, num_workers=\n num_workers, pin_memory=False), transforms=[crop, hflip, rotate,\n dicomnoise, totensor], dataset_policy=1, window_policy=window_policy),\n valid=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, loader=dict(shuffle=False,\n batch_size=batch_size, drop_last=False, num_workers=num_workers,\n pin_memory=False), transforms=[crop_test, hflip, rotate_test,\n dicomnoise_test, totensor], dataset_policy=1, window_policy=\n window_policy), test=dict(dataset_type='CustomDataset', annotations=\n './cache/test.pkl', imgdir='./input/test_runmila_i768', imgsize=imgsize,\n loader=dict(shuffle=False, batch_size=batch_size, drop_last=False,\n num_workers=num_workers, pin_memory=False), transforms=[crop_test,\n hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1,\n window_policy=window_policy))\n",
"step-3": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\n\nn_fold = 2\nepoch = 50\nresume_from = None\n\nbatch_size = 32\nnum_workers = 32\nimgsize = (768, 768) #(height, width)\n\nloss = dict(\n name='BCEWithLogitsLoss',\n params=dict(),\n)\n\noptim = dict(\n name='AdamW',\n params=dict(\n lr=0.0003,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=0.01,\n ),\n)\n\nmodel = dict(\n name='se_resnext50_32x4d'\n)\n\n\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n\n'''\nAdditional augmentarions\n------------------------\n\nvflip = dict(name='VerticalFlip', params=dict(p=0.5,))\nrandom_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))\n#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))\n#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))\n#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))\nhue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))\ncut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))\nblur = dict(name='Blur', params=dict(blur_limit=4, p=.25))\nshift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))\n'''\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\n\n\nwindow_policy = 1\n\ndata = dict(\n train=dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n n_grad_acc=2,\n loader=dict(\n shuffle=True,\n batch_size=batch_size,\n drop_last=True,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop, hflip, rotate, dicomnoise, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n valid = dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n test = dict(\n dataset_type='CustomDataset',\n annotations='./cache/test.pkl',\n imgdir='./input/test_runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
def mini100(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,100)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<100:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def mini200(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,200)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<200:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def exist_or_not(ann,):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path =video.split(',')[0]
if not os.path.isfile(path):
print(path)
print("all done!")
if __name__ == "__main__":
import fire
fire.Fire()
|
normal
|
{
"blob_id": "f6d4208afee7aacd96ea5ae6c9e38d2876466703",
"index": 7417,
"step-1": "<mask token>\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire()\n",
"step-4": "import os\n\n\ndef mini100(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 100)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 100:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef mini200(videopath, minipath, mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n count = [(0) for _ in range(0, 200)]\n with open(minipath, 'w') as f:\n for video in all_videos:\n path, label = video.split(',')\n label = int(label)\n if label < 200:\n count[label] += 1\n f.write(video)\n for cls, i in enumerate(count):\n print('{} class have : {}'.format(cls, i))\n print('total {}'.format(sum(count)))\n\n\ndef exist_or_not(ann):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path = video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print('all done!')\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire()\n",
"step-5": "import os\n\ndef mini100(videopath, minipath,mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n #if mod=='train':\n # count = [400 for _ in range(0,100)]\n #else:\n # count = [25 for _ in range(0,100)]\n count = [0 for _ in range(0,100)]\n with open(minipath,'w') as f:\n for video in all_videos:\n #print(video)\n path, label = video.split(',')\n label = int(label)\n if label<100:\n #if count[label]>0:\n # count[label] -= 1\n count[label] +=1\n \n f.write(video)\n \n for cls,i in enumerate(count):\n #if i!=0:\n print(\"{} class have : {}\".format(cls,i))\n print(\"total {}\".format(sum(count)))\n # assert i==0\n\ndef mini200(videopath, minipath,mod='train'):\n with open(videopath, 'r') as video_f:\n all_videos = video_f.readlines()\n #if mod=='train':\n # count = [400 for _ in range(0,100)]\n #else:\n # count = [25 for _ in range(0,100)]\n count = [0 for _ in range(0,200)]\n with open(minipath,'w') as f:\n for video in all_videos:\n #print(video)\n path, label = video.split(',')\n label = int(label)\n if label<200:\n #if count[label]>0:\n # count[label] -= 1\n count[label] +=1\n \n f.write(video)\n \n for cls,i in enumerate(count):\n #if i!=0:\n print(\"{} class have : {}\".format(cls,i))\n print(\"total {}\".format(sum(count)))\n # assert i==0\n\ndef exist_or_not(ann,):\n with open(ann, 'r') as f:\n all = f.readlines()\n for video in all:\n path =video.split(',')[0]\n if not os.path.isfile(path):\n print(path)\n print(\"all done!\")\n \nif __name__ == \"__main__\":\n import fire\n fire.Fire()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from .fieldmatrix import *
|
normal
|
{
"blob_id": "fc4fafe4e29a7f116c38be265fce8e4fb6638330",
"index": 6848,
"step-1": "<mask token>\n",
"step-2": "from .fieldmatrix import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import json
import re
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from common import dir_path
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator():
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > (self.i)
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
def parse_emails_url(iter_):
emails = []
url = None
try:
while True:
iter_.peek_till('a')
email = iter_.__next__()
href = email['href']
if href.startswith('mailto:'):
if href[7:]:
emails += [href[7:]]
else:
emails += [email.text]
else:
url = href
except IndexError:
pass
return emails, url
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {
'locale': locale,
'county': locale,
'emails': emails,
}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search(r'Nevada \d{5}', el) or re.search(r'NV \d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ') # replace non-breaking space
matches1 = re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX (\(\d{3}\) \d{3}-\d{4})', el)
matches2 = re.search(r'(\(\d{3}\) \d{3}-VOTE \(\d{4}\)) FAX (\(\d{3}\) \d{3}-\d{4})', el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {
**init,
'locale': locale,
'official': official,
'address': ', '.join(address),
'emails': list(set(emails)),
'phones': [phone],
'faxes': [fax],
'url': url,
}
def main():
# Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information
# But it's behind a javascript test
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "cb08f64d1ad7e53f1041684d4ca4ef65036c138d",
"index": 44,
"step-1": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\n<mask token>\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\n<mask token>\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator:\n\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > self.i\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {'locale': locale, 'county': locale, 'emails': emails}\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search('Nevada \\\\d{5}', el) or re.search('NV \\\\d{5}', el):\n break\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ')\n matches1 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})', el\n )\n matches2 = re.search(\n '(\\\\(\\\\d{3}\\\\) \\\\d{3}-VOTE \\\\(\\\\d{4}\\\\)) FAX (\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4})'\n , el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search('(\\\\(\\\\d{3}\\\\) \\\\d{3}-\\\\d{4}) FAX', el))\n assert False\n emails, url = parse_emails_url(iter_)\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n return {**init, 'locale': locale, 'official': official, 'address': ', '\n .join(address), 'emails': list(set(emails)), 'phones': [phone],\n 'faxes': [fax], 'url': url}\n\n\ndef main():\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\nimport re\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString, Tag\n\nfrom common import dir_path\n\n\ndef is_element(el, tag):\n return isinstance(el, Tag) and el.name == tag\n\n\nclass ElemIterator():\n def __init__(self, els):\n self.els = els\n self.i = 0\n\n def peek(self):\n try:\n return self.els[self.i]\n except IndexError:\n return None\n\n def __next__(self):\n self.i += 1\n return self.els[self.i - 1]\n\n def hasNext(self):\n return len(self.els) > (self.i)\n\n def peek_till(self, tag):\n while not is_element(self.peek(), tag):\n self.__next__()\n\n def next_till(self, tag):\n self.peek_till(tag)\n self.__next__()\n\n\ndef parse_lines(iter_):\n iter_.peek_till('strong')\n\n county = []\n while iter_.hasNext():\n county += [iter_.__next__()]\n\n if is_element(iter_.peek(), 'strong'):\n yield ElemIterator(county)\n county = []\n\n yield ElemIterator(county)\n county = []\n\n\ndef parse_emails_url(iter_):\n emails = []\n url = None\n\n try:\n while True:\n iter_.peek_till('a')\n email = iter_.__next__()\n href = email['href']\n if href.startswith('mailto:'):\n if href[7:]:\n emails += [href[7:]]\n else:\n emails += [email.text]\n else:\n url = href\n except IndexError:\n pass\n return emails, url\n\n\ndef parse_url(iter_):\n iter_.peek_till('a')\n link = iter_.__next__()\n href = link['href']\n assert not href.startswith('mailto:')\n return [href]\n\n\ndef parse_county(iter_):\n county_title = iter_.__next__().text.strip().title()\n locale = re.match('(.*) (City|County)', county_title).group(0)\n\n if county_title.startswith('Clark County Elections Mailing Address'):\n emails, url = parse_emails_url(iter_)\n return {\n 'locale': locale,\n 'county': locale,\n 'emails': emails,\n }\n\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n if 'Clerk' in el or 'Registrar' in el:\n official = el.strip().split(',')[0]\n break\n\n address = []\n while True:\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n address += [el.strip()]\n if re.search(r'Nevada \\d{5}', el) or re.search(r'NV \\d{5}', el):\n break\n\n el = iter_.__next__()\n el = iter_.__next__()\n if isinstance(el, NavigableString):\n el = el.replace(u'\\xa0', ' ') # replace non-breaking space\n matches1 = re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n matches2 = re.search(r'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el)\n if matches1:\n phone = matches1.group(1)\n fax = matches1.group(2)\n elif matches2:\n phone = matches2.group(1)\n fax = matches2.group(2)\n else:\n print(county_title)\n print(el)\n print(re.search(r'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))\n assert False\n\n emails, url = parse_emails_url(iter_)\n\n init = {'city': locale} if locale.endswith('City') else {'county': locale}\n\n return {\n **init,\n 'locale': locale,\n 'official': official,\n 'address': ', '.join(address),\n 'emails': list(set(emails)),\n 'phones': [phone],\n 'faxes': [fax],\n 'url': url,\n }\n\n\ndef main():\n # Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information\n # But it's behind a javascript test\n with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:\n page = fh.read()\n soup = BeautifulSoup(page, 'lxml')\n ps = soup.select('div.content_area > p')\n iter_ = ElemIterator([x for p in ps for x in p.children])\n raw_counties = [parse_county(county) for county in parse_lines(iter_)]\n\n merge_counties = {}\n for county in raw_counties:\n locale = county['locale']\n if locale in merge_counties:\n merge_counties[locale]['emails'] += county['emails']\n else:\n merge_counties[locale] = county\n\n counties = list(merge_counties.values())\n assert len(counties) == len(raw_counties) - 1\n\n with open('public/nevada.json', 'w') as fh:\n json.dump(counties, fh)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
10,
12,
14,
15,
16
]
}
|
[
10,
12,
14,
15,
16
] |
# Generated by Django 3.1.3 on 2020-11-19 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myems', '0004_auto_20201118_1446'),
]
operations = [
migrations.RenameField(
model_name='dg',
old_name='sn',
new_name='id',
),
migrations.AddField(
model_name='dg',
name='code_ean13',
field=models.CharField(default=0, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='dg',
name='commercial_designation_in_english',
field=models.CharField(default=0, max_length=100),
preserve_default=False,
),
migrations.AlterModelTable(
name='dg',
table='dg_gen',
),
]
|
normal
|
{
"blob_id": "11d96a8a400afb0861b92d8900e003826614c99a",
"index": 7502,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('myems', '0004_auto_20201118_1446')]\n operations = [migrations.RenameField(model_name='dg', old_name='sn',\n new_name='id'), migrations.AddField(model_name='dg', name=\n 'code_ean13', field=models.CharField(default=0, max_length=50),\n preserve_default=False), migrations.AddField(model_name='dg', name=\n 'commercial_designation_in_english', field=models.CharField(default\n =0, max_length=100), preserve_default=False), migrations.\n AlterModelTable(name='dg', table='dg_gen')]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('myems', '0004_auto_20201118_1446')]\n operations = [migrations.RenameField(model_name='dg', old_name='sn',\n new_name='id'), migrations.AddField(model_name='dg', name=\n 'code_ean13', field=models.CharField(default=0, max_length=50),\n preserve_default=False), migrations.AddField(model_name='dg', name=\n 'commercial_designation_in_english', field=models.CharField(default\n =0, max_length=100), preserve_default=False), migrations.\n AlterModelTable(name='dg', table='dg_gen')]\n",
"step-5": "# Generated by Django 3.1.3 on 2020-11-19 06:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myems', '0004_auto_20201118_1446'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='dg',\n old_name='sn',\n new_name='id',\n ),\n migrations.AddField(\n model_name='dg',\n name='code_ean13',\n field=models.CharField(default=0, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='dg',\n name='commercial_designation_in_english',\n field=models.CharField(default=0, max_length=100),\n preserve_default=False,\n ),\n migrations.AlterModelTable(\n name='dg',\n table='dg_gen',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.
[12, 17, 19, 17, 23] = 17
[26, 37, 26, 37, 91] = 26, 37
[28, 30, 32, 34, 144] = 없다
최빈값 : 자료의 값 중에서 가장 많이 나타난 값
① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.
② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.
"""
n_list = [[12, 17, 19, 17, 23],
[26, 37, 26, 37, 91],
[28, 30, 32, 34, 144],
[10, 10, 10, 10, 10]]
for numbers in n_list:
n_dict = {}
for n in numbers:
if n in n_dict:
n_dict[n] += 1
else:
n_dict[n] = 1
mode = []
if len(n_dict) == 1 or len(n_dict) == len(numbers):
print(numbers, '= 없다')
else:
mode_count = max(n_dict.values())
for e in n_dict.keys():
if n_dict[e] == mode_count:
mode.append(e)
print(numbers, '=', mode)
|
normal
|
{
"blob_id": "39f9341313e29a22ec5e05ce9371bf65e89c91bd",
"index": 25,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-3": "<mask token>\nn_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-4": "\"\"\"\n리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.\n\n[12, 17, 19, 17, 23] = 17\n[26, 37, 26, 37, 91] = 26, 37\n[28, 30, 32, 34, 144] = 없다\n\n최빈값 : 자료의 값 중에서 가장 많이 나타난 값 \n① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.\n② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.\n\"\"\"\n\nn_list = [[12, 17, 19, 17, 23],\n [26, 37, 26, 37, 91],\n [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\n \nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
# page = requests.get("https://www.worldometers.info/coronavirus/")
# soup = BeautifulSoup(page.content, 'html.parser')
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Initialize application
app = Flask(__name__)
@app.route("/")
def hello():
return "Flask setup"
def sheets_row_writer(data_list):
print("sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list)
print("Write complete")
def sheets_row_writer_donor(data_list_donor):
print("donor sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list_donor)
print("Write complete")
def death_global():
page = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
cases_list = []
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return "There are"+cases_list[0]+" Total cases out of which"+cases_list[1]+" have died and"+cases_list[2]+" have recovered . There are still "+active_cases+" active cases."
app.route("/death/global", methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get("https://www.worldometers.info/coronavirus/")
response = death_global()
reply = { "fulfillmentText": response }
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get("https://www.worldometers.info/coronavirus/country/"+id+"/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return "In " +idu+" There are"+cases_list[0]+"Total cases out of which"+cases_list[1]+"are dead and"+cases_list[2]+"have already recovered . There are still "+active_cases+ " active cases ."
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print (intent)
def news_nepal_int():
url = "https://nepalcorona.info/api/v1/news"
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{
"card":{
"title":data1['title'],
"subtitle":"Source: "+data1['source']+" >>",
"imageUri":data1['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data1['url']
},
{
"text":"Corona Symptoms",
"postback":"symptoms"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data2['title'],
"subtitle":"Source "+data2['source']+" >>",
"imageUri":data2['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data2['url']
},
{
"text":"Live Nepal Data",
"postback":"live-nepal-data"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data3['title'],
"subtitle":"Source "+data3['source']+" >>",
"imageUri":data3['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data3['url']
},
{
"text":"Self Isolation",
"postback":"self isolation"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response2 }
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0],name[0],phone[0],place[0]]
sheets_row_writer(ilist)
response2 = "Hello "+name[0]+" so you are looking for "+item_required[0]+" Your location is "+place[0]+" One of our Team will contact you @ " +phone[0]+" soon !"
response = [
{
"quickReplies": {
"title": response2,
"quickReplies": [
"Call a Doctor",
"Get Online Support"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']
url = "https://nepalcorona.info/api/v1/faqs"
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ["Live Nepali Data","Latest Nepali News","Symptoms","Preventions","Self Isolation","Play Corona Quiz"]
faqs = todos['data']
faq = faqs[rand]
if(ff=="English FAQ" or ff =="More Quizzles" or ff =="भाषा परिवर्तन"):
randq= faq['question']
randa = faq['answer']
opt1 = "More Quizzles"
opt2 = "Switch Language"
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = "अरु देखाउनुहोस >>"
opt2 = "भाषा परिवर्तन"
response2 = "Q. "+randq+"\n A. "+randa+"\n"
response = [{
"text": {
"text": [
randq
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"quickReplies": {
"title": randa,
"quickReplies": [
opt1,
opt2,
random.choice(opt3)
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def blood_pal_yes():
print (intent)
print (data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group,blood_amount,location,case,date,phone]
sheets_row_writer(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "The following request has been sent. We will contact you shortly. "+blood_group+" blood ("+str(blood_amount)+" ) required for "+case+" at "+location+" On "+date+" - "+phone+" Thank you ."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def blood_pal_donor_yes():
print (intent)
print (data)
permananet_address = data['queryResult']['parameters']['permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation= data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]
sheets_row_writer_donor(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "Thank you "+name+" for registration as a blood donor We will contact you at the time of urgency in your area."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def world_data_live():
text = death_global()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Data",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#district summary all
def district_all_summary():
text = dss.district_all_summary()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Summary",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#provience summary all should remove
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"District-Summary",
"Province-Data",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def proviencewise_detail():
#get provience name
#return dss.ard(provience)
#card
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [
{
"card":{
"title": "Covid-19 Provience: "+str(province)+" | Details",
"subtitle":response_summary,
"imageUri": "https://setopati.net/wp-content/uploads/2018/02/province6.jpg",
"buttons":[
{
"text":"Prov "+str(province)+" District Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Prov "+str(province)+" Vdc-Mun Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
# provincecode = pcode
if(dvdc=="vdc"):
print('inside vdc')
typ = "vdc"
else:
print('inside district')
typ = "district"
data_return = dss.ard(code,typ)
response = [
{
"quickReplies": {
"title": data_return,
"quickReplies": [
"District Summary",
"Province Summary",
"Nepali News",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def nepal_data_new_main_int():
url = "https://nepalcorona.info/api/v1/data/nepal"
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = "Nepal Cases \n Positive :"+str(todos["tested_positive"])+" | Recovered: "+str(todos["recovered"])+"| Deaths:"+str(todos["deaths"])+" "+"\n"
print(response2)
response_summary = dss.affected_summary()
response = [
{
"text": {
"text": [
response2
]
},
"platform": "FACEBOOK"
},
{
"text": {
"text": [
""
]
}
},
{
"card":{
"title": "Covid-19 Nepal | Stats",
"subtitle":response_summary,
# "subtitle": "Find details by Province, Municipals and Districts for Nepal",
"imageUri": "https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png",
"buttons":[
{
"text":"Province Summary",
"postback":"province data int"
},
{
"text":"District-Summary",
"postback":"district data int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def batti_update():
url = "https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM"
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos["feeds"][0]
response2 = "Batti Status Now :"+str(feeds["field1"]+"\n Last Updated: "+str(feeds["created_at"]))
print(response2)
reply = { "fulfillmentText": response2 }
return reply
def default():
return "Incorrect Data"
switcher = {
"nepal data int": nepal_data_new_main_int,
"news-nepal-int": news_nepal_int,
"i need help main int - yes": i_need_help_yes,
"faq-que-ans-int": faq_ques_ans,
"bloodpal-need-blood-main-int - yes": blood_pal_yes,
"data world int": world_data_live,
"district data int": district_all_summary,
"province data int": province_all_summary,
"province-wise-data": proviencewise_detail,
"dis-vdc data detail int": dis_vdc_detail,
"bloodpal-become-donor-main-int":blood_pal_donor_yes,
"batti-update-intent":batti_update
}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "267cb37f2ccad5b02a809d9b85327eacd9a49515",
"index": 1061,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<mask token>\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-3": "<mask token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\n# page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n# soup = BeautifulSoup(page.content, 'html.parser')\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n\n# Initialize application\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef hello():\n return \"Flask setup\"\n\ndef sheets_row_writer(data_list):\n print(\"sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list) \n print(\"Write complete\")\n\ndef sheets_row_writer_donor(data_list_donor):\n print(\"donor sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list_donor) \n print(\"Write complete\")\n\ndef death_global():\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n \n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n cases_list = []\n\n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n\n for res in result:\n cases_list.append(res.text)\n\n return \"There are\"+cases_list[0]+\" Total cases out of which\"+cases_list[1]+\" have died and\"+cases_list[2]+\" have recovered . There are still \"+active_cases+\" active cases.\"\n\napp.route(\"/death/global\", methods=['POST'])\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n response = death_global()\n reply = { \"fulfillmentText\": response } \n return jsonify(reply)\n \n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\"https://www.worldometers.info/coronavirus/country/\"+id+\"/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n \n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n\n return \"In \" +idu+\" There are\"+cases_list[0]+\"Total cases out of which\"+cases_list[1]+\"are dead and\"+cases_list[2]+\"have already recovered . There are still \"+active_cases+ \" active cases .\"\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print (intent)\n \n def news_nepal_int():\n url = \"https://nepalcorona.info/api/v1/news\"\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n \n response2 = [{\n \"card\":{\n \"title\":data1['title'],\n \"subtitle\":\"Source: \"+data1['source']+\" >>\",\n \"imageUri\":data1['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data1['url']\n },\n {\n \"text\":\"Corona Symptoms\",\n \"postback\":\"symptoms\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data2['title'],\n \"subtitle\":\"Source \"+data2['source']+\" >>\",\n \"imageUri\":data2['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data2['url']\n },\n {\n \"text\":\"Live Nepal Data\",\n \"postback\":\"live-nepal-data\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data3['title'],\n \"subtitle\":\"Source \"+data3['source']+\" >>\",\n \"imageUri\":data3['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data3['url']\n },\n {\n \"text\":\"Self Isolation\",\n \"postback\":\"self isolation\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n ]\n\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0],name[0],phone[0],place[0]]\n sheets_row_writer(ilist)\n response2 = \"Hello \"+name[0]+\" so you are looking for \"+item_required[0]+\" Your location is \"+place[0]+\" One of our Team will contact you @ \" +phone[0]+\" soon !\"\n response = [\n\n {\n \"quickReplies\": {\n \"title\": response2,\n \"quickReplies\": [\n \"Call a Doctor\",\n \"Get Online Support\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']\n url = \"https://nepalcorona.info/api/v1/faqs\"\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = [\"Live Nepali Data\",\"Latest Nepali News\",\"Symptoms\",\"Preventions\",\"Self Isolation\",\"Play Corona Quiz\"]\n faqs = todos['data']\n faq = faqs[rand]\n if(ff==\"English FAQ\" or ff ==\"More Quizzles\" or ff ==\"भाषा परिवर्तन\"):\n randq= faq['question']\n randa = faq['answer']\n opt1 = \"More Quizzles\"\n opt2 = \"Switch Language\"\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = \"अरु देखाउनुहोस >>\"\n opt2 = \"भाषा परिवर्तन\"\n\n response2 = \"Q. \"+randq+\"\\n A. \"+randa+\"\\n\"\n response = [{\n \"text\": {\n \"text\": [\n randq\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n {\n \"quickReplies\": {\n \"title\": randa,\n \"quickReplies\": [\n opt1,\n opt2,\n random.choice(opt3)\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response }\n\n return reply\n \n def blood_pal_yes():\n print (intent)\n print (data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group,blood_amount,location,case,date,phone]\n sheets_row_writer(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"The following request has been sent. We will contact you shortly. \"+blood_group+\" blood (\"+str(blood_amount)+\" ) required for \"+case+\" at \"+location+\" On \"+date+\" - \"+phone+\" Thank you .\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def blood_pal_donor_yes():\n print (intent)\n print (data)\n permananet_address = data['queryResult']['parameters']['permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation= data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"Thank you \"+name+\" for registration as a blood donor We will contact you at the time of urgency in your area.\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n\n def world_data_live():\n text = death_global()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Data\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #district summary all\n def district_all_summary():\n text = dss.district_all_summary()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #provience summary all should remove \n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"District-Summary\",\n \"Province-Data\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def proviencewise_detail():\n #get provience name\n #return dss.ard(provience)\n #card \n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n\n response = [\n {\n \"card\":{\n \"title\": \"Covid-19 Provience: \"+str(province)+\" | Details\",\n \"subtitle\":response_summary,\n \"imageUri\": \"https://setopati.net/wp-content/uploads/2018/02/province6.jpg\",\n \"buttons\":[\n {\n \"text\":\"Prov \"+str(province)+\" District Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Prov \"+str(province)+\" Vdc-Mun Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n \n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n\n\n # provincecode = pcode\n if(dvdc==\"vdc\"):\n print('inside vdc')\n typ = \"vdc\" \n else:\n print('inside district')\n typ = \"district\"\n\n data_return = dss.ard(code,typ)\n response = [\n {\n \"quickReplies\": {\n \"title\": data_return,\n \"quickReplies\": [\n \"District Summary\",\n \"Province Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def nepal_data_new_main_int():\n url = \"https://nepalcorona.info/api/v1/data/nepal\"\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n\n \n response2 = \"Nepal Cases \\n Positive :\"+str(todos[\"tested_positive\"])+\" | Recovered: \"+str(todos[\"recovered\"])+\"| Deaths:\"+str(todos[\"deaths\"])+\" \"+\"\\n\"\n print(response2)\n response_summary = dss.affected_summary()\n\n response = [\n {\n \"text\": {\n \"text\": [\n response2\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\": {\n \"text\": [\n \"\"\n ]\n }\n },\n {\n \"card\":{\n \"title\": \"Covid-19 Nepal | Stats\",\n \"subtitle\":response_summary,\n # \"subtitle\": \"Find details by Province, Municipals and Districts for Nepal\",\n \"imageUri\": \"https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png\",\n \"buttons\":[\n {\n \"text\":\"Province Summary\",\n \"postback\":\"province data int\"\n },\n {\n \"text\":\"District-Summary\",\n \"postback\":\"district data int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def batti_update():\n url = \"https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM\"\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos[\"feeds\"][0]\n \n response2 = \"Batti Status Now :\"+str(feeds[\"field1\"]+\"\\n Last Updated: \"+str(feeds[\"created_at\"]))\n print(response2)\n reply = { \"fulfillmentText\": response2 }\n return reply\n\n\n def default():\n return \"Incorrect Data\"\n\n switcher = {\n \"nepal data int\": nepal_data_new_main_int,\n \"news-nepal-int\": news_nepal_int,\n \"i need help main int - yes\": i_need_help_yes,\n \"faq-que-ans-int\": faq_ques_ans,\n \"bloodpal-need-blood-main-int - yes\": blood_pal_yes,\n \"data world int\": world_data_live,\n \"district data int\": district_all_summary,\n \"province data int\": province_all_summary,\n \"province-wise-data\": proviencewise_detail,\n \"dis-vdc data detail int\": dis_vdc_detail,\n \"bloodpal-become-donor-main-int\":blood_pal_donor_yes,\n \"batti-update-intent\":batti_update\n }\n \n def switch(intentname):\n return switcher.get(intentname, default)()\n\n reply = switch(intent)\n return jsonify(reply)\n \n\nif __name__ == '__main__':\n \n app.run()\n",
"step-ids": [
5,
6,
9,
10,
11
]
}
|
[
5,
6,
9,
10,
11
] |
import numpy as np
import cv2
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
# WebCam Initialize
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
# key = cv2.waitKey(1) & 0xFF
# if key == 27:
# break
ret, frame = vidCapture.read()
if ret==True:
# frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# img = np.zeros((512, 512, 3), np.uint8)
# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)
# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)
# cv2.imshow('frame', frame)
vidCapture.release()
out.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "500d6f473f07b35bf2d075d3061ac2e54eab702a",
"index": 4156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.namedWindow('Measure Angle with centerline')\n<mask token>\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\r\nimport cv2\r\n\r\nFRAME_WIDTH = 320\r\nFRAME_HEIGHT = 240\r\n\r\ncv2.namedWindow('Measure Angle with centerline')\r\n\r\n# WebCam Initialize\r\nvidCapture = cv2.VideoCapture(1)\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID') \r\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480)) \r\n\r\nwhile True:\r\n\r\n\t# key = cv2.waitKey(1) & 0xFF\r\n\t# if key == 27:\r\n\t# \tbreak\r\n\r\n\tret, frame = vidCapture.read()\r\n\t\r\n\tif ret==True:\r\n\t\t# frame = cv2.flip(frame,0)\r\n\r\n # write the flipped frame\r\n\t\tout.write(frame)\r\n\r\n\t\tcv2.imshow('frame',frame)\r\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\r\n\t\t\tbreak\r\n\telse:\r\n\t\tbreak\r\n\t# img = np.zeros((512, 512, 3), np.uint8)\r\n\t# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)\r\n\t# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)\r\n\r\n\t# cv2.imshow('frame', frame)\r\n\r\nvidCapture.release()\r\nout.release()\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
auto_duration_sec = 15
teleop_duration_sec = 135
|
normal
|
{
"blob_id": "5229002103379ff10969e64289d5a0f36641c0a3",
"index": 3497,
"step-1": "<mask token>\n",
"step-2": "auto_duration_sec = 15\nteleop_duration_sec = 135\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from bs4 import BeautifulSoup
from pprint import pprint
from scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage
from scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result
from scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS
from scraper import db
from datetime import datetime
import urllib
import json
import time
def scrape_sas():
pprint("Scraping Events")
get_mtb_events()
pprint("Getting categories and stages")
for event in db.session.query(SASEvent):
pprint(event.event_id)
get_categories_and_stages(event.event_reference, event.event_id)
#time.sleep(2)
for event_stage in db.session.query(SASEventStage):
pprint("Getting event stage results")
base_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()
if (base_event_stage.results):
pprint("Event has results")
else:
write_stage_results(event_stage.stage_reference, event_stage.event_stage_id, "event")
for category_stage in db.session.query(SASCategoryStage):
pprint("Getting category stage results")
base_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()
if (base_category_stage.results):
pprint("Category stage has results")
else:
write_stage_results(category_stage.stage_reference, category_stage.category_stage_id, "category")
for category in db.session.query(SASCategory):
pprint("Getting category results")
base_category = db.session.query(Category).filter(Category.id==category.category_id).first()
if (base_category.results):
pprint("Category has results")
else:
if (not base_category.category_stages):
write_category_results(category.stage_reference, category.id)
else:
pprint("No results but has category stages")
pprint("Scrape Complete")
def get_mtb_events():
for year in YEARS:
url = ("%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d" %
(DESTINATION_URL, MTB_EVENT_TYPE, year))
try:
page = urllib.request.urlopen(url)
content = page.read().decode("utf-8")
json_content = json.loads(content)
soup = BeautifulSoup(json_content['HTML'], "html.parser")
anchors = soup.find_all('a')
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
pass
for anchor in anchors:
event_reference = anchor["href"]
divs = anchor.find_all('div')
for div in divs:
if ("event-date" in div["class"]):
event_date = (div.find(text=True))
elif ("event-title" in div["class"]):
event_name = (div.find(text=True))
db_date = datetime.strptime(event_date, '%d %b %Y')
db_event = Event(event_name, db_date)
db_check = db.session.query(Event.title).filter(Event.title==event_name)
if not (db.session.query(db_check.exists()).scalar()):
db.session.add(db_event)
db.session.flush()
sas_event = SASEvent(db_event.id, event_reference)
db.session.add(sas_event)
db.session.commit()
def get_categories_and_stages(event_reference, event_id):
event = db.session.query(Event).filter(Event.id==event_id).first()
if (event.categories or event.event_stages):
pprint("Event Exists")
else:
url = (DESTINATION_URL + event_reference)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.URLError):
return
soup = BeautifulSoup(page, "html.parser")
check_stages = get_categories(soup, event_id)
def get_categories(soup, event_id):
category_div = soup.find('div', attrs={"id" : "category_container"})
#Check to see if event has categories first
if category_div:
divs = category_div.find_all('div')
for div in divs:
if div.has_attr("data-event-category-id"):
#Event has categories
category_reference = div["data-event-category-id"]
category_name = div["data-loading-text"]
category_own_stage_reference = div["data-event-stage-id"]
db_category = Category(category_name, event_id)
#Check both name and event id to allow duplicate names
db_category_check = db.session.query(Category.name).filter(
(Category.name==category_name) &
(Category.event_id==event_id))
#Check SAS category for duplicates as well
db_sas_category_check = db.session.query(SASCategory).filter(
(SASCategory.category_reference==category_reference) &
(SASCategory.stage_reference==category_own_stage_reference))
if not (db.session.query(db_category_check.exists()).scalar()):
db.session.add(db_category)
db.session.flush()
if not (db.session.query(db_sas_category_check.exists()).scalar()):
db_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)
db.session.add(db_sas_category)
db.session.flush()
db.session.commit()
if (div["data-multiple-event-stages"] == "1"):
#Event has stages with their own categories
get_category_stages(soup, db_category.id, category_reference)
else:
#Event does not have categories
get_event_stages(soup, event_id)
def get_category_stages(soup, category_id, category_reference):
stage_group_div = soup.find('div', attrs={"id" : ("ec_" + category_reference)})
stage_divs = stage_group_div.find_all('div')
for stage_div in stage_divs:
if stage_div.has_attr("data-stage-id"):
category_stage_reference = stage_div["data-stage-id"]
category_stage_name = stage_div["data-loading-text"]
db_category_stage = CategoryStage(category_stage_name, category_id)
#Check both name and category id to allow duplicate names
db_category_stage_check = db.session.query(CategoryStage.name).filter(
(CategoryStage.name==category_stage_name) &
(CategoryStage.category_id==category_id))
if not (db.session.query(db_category_stage_check.exists()).scalar()):
db.session.add(db_category_stage)
db.session.flush()
db_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)
db.session.add(db_sas_category_stage)
db.session.flush()
db.session.commit()
def get_event_stages(soup, event_id):
all_event_stage_divs = soup.find('div', class_ = "row categories_stages event-sub-types")
#Check if event has stages
if all_event_stage_divs:
event_stage_divs = all_event_stage_divs.find_all ('div')
for event_stage_div in event_stage_divs:
if event_stage_div.has_attr("data-stage-id"):
#Event has stages and no categories
event_stage_reference = event_stage_div["data-stage-id"]
event_stage_name = event_stage_div["data-loading-text"]
db_event_stage = EventStage(event_stage_name, event_id)
#Check if it exists by name and ID and add if it doesn't
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name==event_stage_name) &
(EventStage.event_id==event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.flush()
db.session.commit()
else:
#Event has no stages or categories
#create new stage for just the overall results, unless event has no results
event_stage_reference_div = soup.find('div', class_ = "result-row load-results")
if event_stage_reference_div:
if event_stage_reference_div.has_attr("data-stage"):
event_stage_reference = event_stage_reference_div["data-stage"]
sas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name=="Overall Results") &
(EventStage.event_id==sas_event.event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db_event_stage = EventStage("Overall Results", sas_event.event_id)
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.commit()
def get_results(event_reference):
url = ("%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999" %
(DESTINATION_URL, event_reference))
pprint(url)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
return
content = page.read().decode("utf-8")
json_content = json.loads(content)
json_results = json_content['rows']
return json_results
def write_stage_results(stage_reference, stage_id, stage_type):
results = get_results(stage_reference)
category_stage_id = None
event_stage_id = None
if (stage_type=="event"):
event_stage_id = stage_id
elif (stage_type=="category"):
category_stage_id = stage_id
if results:
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.event_stage_id==event_stage_id) &
(Result.category_stage_id==category_stage_id))
if not (db.session.query(db_result_check.exists()).scalar()):
if (stage_type=="category"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], None, category_stage_id, None)
elif (stage_type=="event"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], event_stage_id, None, None)
db.session.add(db_result)
db.session.commit()
def write_category_results(category_reference, category_id):
results = get_results(category_reference)
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.category_id==category_id)).first()
if not db_result_check:
db_category_result = Result(result['overall_pos'], participant_id,
result['gender_pos'], result['time_taken_seconds'], None, None, category_id)
db.session.add(db_category_result)
db.session.commit()
def get_participant(result):
if result['date_of_birth']:
birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()
else:
birth_date = None
db_participant_check = db.session.query(Participant).filter(
(Participant.first_name==result['first_name']) &
(Participant.last_name==result['last_name']) &
(Participant.sex==result['person_sex']) &
(Participant.birth_date==birth_date))
if not (db.session.query(db_participant_check.exists()).scalar()):
db_participant = Participant(result['first_name'], result['last_name'],
result['person_sex'], birth_date)
db.session.add(db_participant)
db.session.commit()
return db_participant.id
else:
return db_participant_check.first().id
|
normal
|
{
"blob_id": "ecc351cf95254e0bbc5021eff11c500fa0950bd3",
"index": 2653,
"step-1": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\n<mask token>\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\n<mask token>\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-2": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\n<mask token>\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-3": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\ndef get_mtb_events():\n for year in YEARS:\n url = (\n '%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d'\n % (DESTINATION_URL, MTB_EVENT_TYPE, year))\n try:\n page = urllib.request.urlopen(url)\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n soup = BeautifulSoup(json_content['HTML'], 'html.parser')\n anchors = soup.find_all('a')\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n pass\n for anchor in anchors:\n event_reference = anchor['href']\n divs = anchor.find_all('div')\n for div in divs:\n if 'event-date' in div['class']:\n event_date = div.find(text=True)\n elif 'event-title' in div['class']:\n event_name = div.find(text=True)\n db_date = datetime.strptime(event_date, '%d %b %Y')\n db_event = Event(event_name, db_date)\n db_check = db.session.query(Event.title).filter(Event.title ==\n event_name)\n if not db.session.query(db_check.exists()).scalar():\n db.session.add(db_event)\n db.session.flush()\n sas_event = SASEvent(db_event.id, event_reference)\n db.session.add(sas_event)\n db.session.commit()\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-4": "from bs4 import BeautifulSoup\nfrom pprint import pprint\nfrom scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage\nfrom scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result\nfrom scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS\nfrom scraper import db\nfrom datetime import datetime\nimport urllib\nimport json\nimport time\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\ndef get_mtb_events():\n for year in YEARS:\n url = (\n '%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d'\n % (DESTINATION_URL, MTB_EVENT_TYPE, year))\n try:\n page = urllib.request.urlopen(url)\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n soup = BeautifulSoup(json_content['HTML'], 'html.parser')\n anchors = soup.find_all('a')\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n pass\n for anchor in anchors:\n event_reference = anchor['href']\n divs = anchor.find_all('div')\n for div in divs:\n if 'event-date' in div['class']:\n event_date = div.find(text=True)\n elif 'event-title' in div['class']:\n event_name = div.find(text=True)\n db_date = datetime.strptime(event_date, '%d %b %Y')\n db_event = Event(event_name, db_date)\n db_check = db.session.query(Event.title).filter(Event.title ==\n event_name)\n if not db.session.query(db_check.exists()).scalar():\n db.session.add(db_event)\n db.session.flush()\n sas_event = SASEvent(db_event.id, event_reference)\n db.session.add(sas_event)\n db.session.commit()\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-5": "from bs4 import BeautifulSoup\nfrom pprint import pprint \nfrom scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage\nfrom scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result\nfrom scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS\nfrom scraper import db\nfrom datetime import datetime\nimport urllib\nimport json \nimport time\n\ndef scrape_sas():\n\tpprint(\"Scraping Events\")\n\tget_mtb_events()\n\tpprint(\"Getting categories and stages\")\n\tfor event in db.session.query(SASEvent):\n\t\tpprint(event.event_id)\n\t\tget_categories_and_stages(event.event_reference, event.event_id)\n\t\t#time.sleep(2)\n\tfor event_stage in db.session.query(SASEventStage):\n\t\tpprint(\"Getting event stage results\")\n\t\tbase_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()\n\t\tif (base_event_stage.results):\n\t\t\tpprint(\"Event has results\")\n\t\telse:\n\t\t\twrite_stage_results(event_stage.stage_reference, event_stage.event_stage_id, \"event\")\n\tfor category_stage in db.session.query(SASCategoryStage):\n\t\tpprint(\"Getting category stage results\")\n\t\tbase_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()\n\t\tif (base_category_stage.results):\n\t\t\tpprint(\"Category stage has results\")\n\t\telse: \n\t\t\twrite_stage_results(category_stage.stage_reference, category_stage.category_stage_id, \"category\")\n\tfor category in db.session.query(SASCategory):\n\t\tpprint(\"Getting category results\")\n\t\tbase_category = db.session.query(Category).filter(Category.id==category.category_id).first()\n\t\tif (base_category.results):\n\t\t\tpprint(\"Category has results\")\n\t\telse: \n\t\t\tif (not base_category.category_stages):\n\t\t\t\twrite_category_results(category.stage_reference, category.id)\n\t\t\telse:\n\t\t\t\tpprint(\"No results but has category stages\")\n\tpprint(\"Scrape Complete\")\n\ndef get_mtb_events(): \n\tfor year in YEARS: \n\t\turl = (\"%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d\" % \n\t\t\t (DESTINATION_URL, MTB_EVENT_TYPE, year))\n\t\ttry: \n\t\t\tpage = urllib.request.urlopen(url)\n\t\t\tcontent = page.read().decode(\"utf-8\")\n\t\t\tjson_content = json.loads(content)\n\t\t\tsoup = BeautifulSoup(json_content['HTML'], \"html.parser\")\n\t\t\tanchors = soup.find_all('a')\n\t\texcept (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n\t\t\tpass\n\t\tfor anchor in anchors: \n\t\t\tevent_reference = anchor[\"href\"]\n\t\t\tdivs = anchor.find_all('div')\n\t\t\tfor div in divs:\n\t\t\t\tif (\"event-date\" in div[\"class\"]):\n\t\t\t\t\tevent_date = (div.find(text=True))\n\t\t\t\telif (\"event-title\" in div[\"class\"]):\n\t\t\t\t\tevent_name = (div.find(text=True))\n\t\t\tdb_date = datetime.strptime(event_date, '%d %b %Y')\n\t\t\tdb_event = Event(event_name, db_date)\n\t\t\tdb_check = db.session.query(Event.title).filter(Event.title==event_name)\n\t\t\tif not (db.session.query(db_check.exists()).scalar()):\n\t\t\t\tdb.session.add(db_event)\n\t\t\t\tdb.session.flush()\n\t\t\t\tsas_event = SASEvent(db_event.id, event_reference)\n\t\t\t\tdb.session.add(sas_event)\n\t\t\t\tdb.session.commit()\n\ndef get_categories_and_stages(event_reference, event_id):\n\tevent = db.session.query(Event).filter(Event.id==event_id).first()\n\tif (event.categories or event.event_stages):\n\t\tpprint(\"Event Exists\")\n\telse: \n\t\turl = (DESTINATION_URL + event_reference)\n\t\ttry: \n\t\t\tpage = urllib.request.urlopen(url)\n\t\texcept (urllib.error.HTTPError, urllib.error.URLError):\n\t\t\treturn\n\t\tsoup = BeautifulSoup(page, \"html.parser\")\n\t\tcheck_stages = get_categories(soup, event_id)\n\ndef get_categories(soup, event_id):\n\tcategory_div = soup.find('div', attrs={\"id\" : \"category_container\"})\n\t#Check to see if event has categories first\n\tif category_div:\n\t\tdivs = category_div.find_all('div')\n\t\tfor div in divs: \n\t\t\tif div.has_attr(\"data-event-category-id\"):\n\t\t\t\t#Event has categories\n\t\t\t\tcategory_reference = div[\"data-event-category-id\"]\n\t\t\t\tcategory_name = div[\"data-loading-text\"]\n\t\t\t\tcategory_own_stage_reference = div[\"data-event-stage-id\"]\n\t\t\t\tdb_category = Category(category_name, event_id)\n\t\t\t\t#Check both name and event id to allow duplicate names \n\t\t\t\tdb_category_check = db.session.query(Category.name).filter(\n\t\t\t\t(Category.name==category_name) &\n\t\t\t\t(Category.event_id==event_id))\n\t\t\t\t#Check SAS category for duplicates as well \n\t\t\t\tdb_sas_category_check = db.session.query(SASCategory).filter(\n\t\t\t\t(SASCategory.category_reference==category_reference) &\n\t\t\t\t(SASCategory.stage_reference==category_own_stage_reference))\n\t\t\t\tif not (db.session.query(db_category_check.exists()).scalar()):\n\t\t\t\t\tdb.session.add(db_category)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tif not (db.session.query(db_sas_category_check.exists()).scalar()):\n\t\t\t\t\t\tdb_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)\n\t\t\t\t\t\tdb.session.add(db_sas_category)\n\t\t\t\t\t\tdb.session.flush()\n\t\t\t\t\t\tdb.session.commit()\t\t\t\n\t\t\t\t\tif (div[\"data-multiple-event-stages\"] == \"1\"):\n\t\t\t\t\t\t#Event has stages with their own categories\n\t\t\t\t\t\tget_category_stages(soup, db_category.id, category_reference)\n\telse:\n\t\t#Event does not have categories\n\t\tget_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n\tstage_group_div = soup.find('div', attrs={\"id\" : (\"ec_\" + category_reference)})\n\tstage_divs = stage_group_div.find_all('div')\n\tfor stage_div in stage_divs: \n\t\tif stage_div.has_attr(\"data-stage-id\"):\n\t\t\tcategory_stage_reference = stage_div[\"data-stage-id\"]\n\t\t\tcategory_stage_name = stage_div[\"data-loading-text\"]\n\t\t\tdb_category_stage = CategoryStage(category_stage_name, category_id)\n\t\t\t#Check both name and category id to allow duplicate names \n\t\t\tdb_category_stage_check = db.session.query(CategoryStage.name).filter(\n\t\t\t\t(CategoryStage.name==category_stage_name) &\n\t\t\t\t(CategoryStage.category_id==category_id))\n\t\t\tif not (db.session.query(db_category_stage_check.exists()).scalar()):\n\t\t\t\tdb.session.add(db_category_stage)\n\t\t\t\tdb.session.flush()\n\t\t\t\tdb_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)\n\t\t\t\tdb.session.add(db_sas_category_stage)\n\t\t\t\tdb.session.flush()\n\t\t\t\tdb.session.commit()\n\ndef get_event_stages(soup, event_id):\n\tall_event_stage_divs = soup.find('div', class_ = \"row categories_stages event-sub-types\")\n\t#Check if event has stages\n\tif all_event_stage_divs:\n\t\tevent_stage_divs = all_event_stage_divs.find_all ('div')\n\t\tfor event_stage_div in event_stage_divs: \n\t\t\tif event_stage_div.has_attr(\"data-stage-id\"):\n\t\t\t\t#Event has stages and no categories\n\t\t\t\tevent_stage_reference = event_stage_div[\"data-stage-id\"]\n\t\t\t\tevent_stage_name = event_stage_div[\"data-loading-text\"]\n\t\t\t\tdb_event_stage = EventStage(event_stage_name, event_id)\n\t\t\t\t#Check if it exists by name and ID and add if it doesn't\n\t\t\t\tdb_event_stage_check = db.session.query(EventStage.name).filter(\n\t\t\t\t\t(EventStage.name==event_stage_name) &\n\t\t\t\t\t(EventStage.event_id==event_id))\n\t\t\t\tif not (db.session.query(db_event_stage_check.exists()).scalar()):\n\t\t\t\t\tdb.session.add(db_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)\n\t\t\t\t\tdb.session.add(db_sas_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb.session.commit()\n\telse: \n\t\t#Event has no stages or categories\n\t\t#create new stage for just the overall results, unless event has no results\n\t\tevent_stage_reference_div = soup.find('div', class_ = \"result-row load-results\")\n\t\tif event_stage_reference_div:\n\t\t\tif event_stage_reference_div.has_attr(\"data-stage\"):\n\t\t\t\tevent_stage_reference = event_stage_reference_div[\"data-stage\"]\n\t\t\t\tsas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()\n\t\t\t\tdb_event_stage_check = db.session.query(EventStage.name).filter(\n\t\t\t\t\t(EventStage.name==\"Overall Results\") &\n\t\t\t\t\t(EventStage.event_id==sas_event.event_id))\n\t\t\t\tif not (db.session.query(db_event_stage_check.exists()).scalar()):\n\t\t\t\t\tdb_event_stage = EventStage(\"Overall Results\", sas_event.event_id)\n\t\t\t\t\tdb.session.add(db_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)\n\t\t\t\t\tdb.session.add(db_sas_event_stage)\n\t\t\t\t\tdb.session.commit()\n\ndef get_results(event_reference): \n\turl = (\"%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999\" % \n\t\t\t (DESTINATION_URL, event_reference))\n\tpprint(url)\n\ttry: \n\t\tpage = urllib.request.urlopen(url)\n\texcept (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n\t\treturn\n\tcontent = page.read().decode(\"utf-8\")\n\tjson_content = json.loads(content)\n\tjson_results = json_content['rows']\n\treturn json_results\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n\tresults = get_results(stage_reference)\n\tcategory_stage_id = None\n\tevent_stage_id = None\n\tif (stage_type==\"event\"):\n\t\tevent_stage_id = stage_id\n\telif (stage_type==\"category\"):\n\t\tcategory_stage_id = stage_id\n\tif results:\n\t\tfor result in results: \n\t\t\tparticipant_id = get_participant(result)\n\t\t\tdb_result_check = db.session.query(Result).filter(\n\t\t\t\t(Result.position==result['overall_pos']) &\n\t\t\t\t(Result.gender_position==result['gender_pos']) & \n\t\t\t\t(Result.time==result['time_taken_seconds']) & \n\t\t\t\t(Result.event_stage_id==event_stage_id) &\n\t\t\t\t(Result.category_stage_id==category_stage_id))\n\t\t\tif not (db.session.query(db_result_check.exists()).scalar()):\n\t\t\t\tif (stage_type==\"category\"): \n\t\t\t\t\tdb_result = Result(result['overall_pos'], participant_id, result['gender_pos'],\n\t\t\t\t\tresult['time_taken_seconds'], None, category_stage_id, None)\n\t\t\t\telif (stage_type==\"event\"):\n\t\t\t\t\tdb_result = Result(result['overall_pos'], participant_id, result['gender_pos'],\n\t\t\t\t result['time_taken_seconds'], event_stage_id, None, None)\n\t\t\t\tdb.session.add(db_result)\n\t\t\t\tdb.session.commit()\n\ndef write_category_results(category_reference, category_id):\n\tresults = get_results(category_reference)\n\tfor result in results: \n\t\tparticipant_id = get_participant(result)\n\n\t\tdb_result_check = db.session.query(Result).filter(\n\t\t\t(Result.position==result['overall_pos']) &\n\t\t\t(Result.gender_position==result['gender_pos']) & \n\t\t\t(Result.time==result['time_taken_seconds']) & \n\t\t\t(Result.category_id==category_id)).first()\n\t\tif not db_result_check:\n\t\t\tdb_category_result = Result(result['overall_pos'], participant_id,\n\t\t\tresult['gender_pos'], result['time_taken_seconds'], None, None, category_id)\n\t\t\tdb.session.add(db_category_result)\n\t\t\tdb.session.commit()\n\ndef get_participant(result):\n\tif result['date_of_birth']:\n\t\tbirth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()\n\telse:\n\t\tbirth_date = None\n\tdb_participant_check = db.session.query(Participant).filter(\n\t\t(Participant.first_name==result['first_name']) &\n\t\t(Participant.last_name==result['last_name']) & \n\t\t(Participant.sex==result['person_sex']) & \n\t\t(Participant.birth_date==birth_date))\n\tif not (db.session.query(db_participant_check.exists()).scalar()):\n\t\tdb_participant = Participant(result['first_name'], result['last_name'],\n\t\tresult['person_sex'], birth_date)\n\t\tdb.session.add(db_participant)\n\t\tdb.session.commit()\n\t\treturn db_participant.id\n\telse: \n\t\treturn db_participant_check.first().id\n\n\n\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
from ..translators.translator import Translator
|
normal
|
{
"blob_id": "ab844143ceddf32982682f5092762af0c97db577",
"index": 391,
"step-1": "<mask token>\n",
"step-2": "from ..translators.translator import Translator\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
__author__='rhyschris'
""" Defines the set of actions.
This functions exactly the same as
Actions.cs in the Unity game.
"""
from enum import Enum
class Actions(Enum):
doNothing = 0
crouch = 1
jump = 3
walkTowards = 0x1 << 2
runTowards = 0x2 << 2
moveAway = 0x3 << 2
blockUp = 0x1 << 4
blockDown = 0x2 << 4
attack1 = 0x3 << 4
attack2 = 0x4 << 4
attack3 = 0x5 << 4
attack4 = 0x6 << 4
if __name__ == '__main__':
print "Contents of actions:"
for act in Actions:
print repr(act)
|
normal
|
{
"blob_id": "bc0bfb0ff8eaf21b15b06eea2ea333381c70bc75",
"index": 6775,
"step-1": "__author__='rhyschris'\n\n\"\"\" Defines the set of actions.\n This functions exactly the same as \n Actions.cs in the Unity game.\n\"\"\"\nfrom enum import Enum\n\n\nclass Actions(Enum):\n doNothing = 0\n crouch = 1\n jump = 3\n walkTowards = 0x1 << 2\n runTowards = 0x2 << 2\n moveAway = 0x3 << 2\n blockUp = 0x1 << 4\n blockDown = 0x2 << 4\n attack1 = 0x3 << 4\n attack2 = 0x4 << 4\n attack3 = 0x5 << 4\n attack4 = 0x6 << 4\n\nif __name__ == '__main__':\n print \"Contents of actions:\"\n \n for act in Actions:\n print repr(act)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/local/bin/python3
def printGrid(grid):
for row in grid:
print(row)
print("")
def validFormatting(grid):
if (type(grid) is not list):
return False
elif (len(grid) != 9):
return False
else:
for row in grid:
if (type(row) is not list):
return False
elif (len(row) != 9):
return False
else:
for item in row:
if (type(item) is not int or item < 0 or item > 9):
return False
return True
def validRows(grid):
found_zero = False
for row in range(9):
bit_dict = {}
for col in range(9):
current_item = grid[row][col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in row {1}".format(current_item, row))
return False
else:
bit_dict[current_item] = True
return True
def validCols(grid):
found_zero = False
for col in range(9):
bit_dict = {}
for row in range(len(grid)):
current_item = grid[row][col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in column {1}".format(current_item, row))
return False
else:
bit_dict[current_item] = True
return True
def validBoxes(grid):
start_positions = [[0,0],
[0,3],
[0,6],
[3,0],
[3,3],
[3,6],
[6,0],
[6,3],
[6,6]]
for i in range(9):
x = start_positions[i][0]
y = start_positions[i][1]
bit_dict = {}
for row in range(3):
for col in range(3):
current_item = grid[y+row][x+col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in box ({1},{2})".format(current_item, x//3, y//3))
return False
else:
bit_dict[current_item] = True
return True
def getOpenSpot(grid):
for row in range(9):
for col in range(9):
if (grid[row][col] == 0):
return (row,col)
return None
def checkInRow(grid, row, num):
for col in range(9):
if (grid[row][col] == num):
return True
return False
def checkInCol(grid, col, num):
for row in range(9):
if (grid[row][col] == num):
return True
return False
def checkInBox(grid, startRow, startCol, num):
for row in range(3):
for col in range(3):
if (grid[startRow+row][startCol+col] == num):
return True
return False
def checkIsOkay(grid, row, col, val):
inRow = checkInRow(grid, row, val)
inCol = checkInCol(grid, col, val)
inBox = checkInBox(grid, row - (row%3), col - (col%3), val)
if (not inRow and not inCol and not inBox):
return True
else:
return False
def validGrid(grid):
if (not validFormatting(grid)):
return None
elif (
validRows(grid) and
validCols(grid) and
validBoxes(grid)
):
return True
else:
return False
def solveSudoku(grid):
nextSpot = getOpenSpot(grid)
if (nextSpot == None):
return True
row = nextSpot[0]
col = nextSpot[1]
for digit in range(1,10):
if (checkIsOkay(grid, row, col, digit)):
#print("Selected:", digit)
grid[row][col] = digit
#printGrid(grid)
if (solveSudoku(grid)):
return True
else:
grid[row][col] = 0
return False
|
normal
|
{
"blob_id": "67452f31a49f50cdb2555406287b31e53a994224",
"index": 7906,
"step-1": "<mask token>\n\n\ndef validRows(grid):\n found_zero = False\n for row in range(9):\n bit_dict = {}\n for col in range(9):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\n<mask token>\n\n\ndef getOpenSpot(grid):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return row, col\n return None\n\n\n<mask token>\n\n\ndef checkInBox(grid, startRow, startCol, num):\n for row in range(3):\n for col in range(3):\n if grid[startRow + row][startCol + col] == num:\n return True\n return False\n\n\ndef checkIsOkay(grid, row, col, val):\n inRow = checkInRow(grid, row, val)\n inCol = checkInCol(grid, col, val)\n inBox = checkInBox(grid, row - row % 3, col - col % 3, val)\n if not inRow and not inCol and not inBox:\n return True\n else:\n return False\n\n\ndef validGrid(grid):\n if not validFormatting(grid):\n return None\n elif validRows(grid) and validCols(grid) and validBoxes(grid):\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validFormatting(grid):\n if type(grid) is not list:\n return False\n elif len(grid) != 9:\n return False\n else:\n for row in grid:\n if type(row) is not list:\n return False\n elif len(row) != 9:\n return False\n else:\n for item in row:\n if type(item) is not int or item < 0 or item > 9:\n return False\n return True\n\n\ndef validRows(grid):\n found_zero = False\n for row in range(9):\n bit_dict = {}\n for col in range(9):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\n<mask token>\n\n\ndef getOpenSpot(grid):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return row, col\n return None\n\n\n<mask token>\n\n\ndef checkInCol(grid, col, num):\n for row in range(9):\n if grid[row][col] == num:\n return True\n return False\n\n\ndef checkInBox(grid, startRow, startCol, num):\n for row in range(3):\n for col in range(3):\n if grid[startRow + row][startCol + col] == num:\n return True\n return False\n\n\ndef checkIsOkay(grid, row, col, val):\n inRow = checkInRow(grid, row, val)\n inCol = checkInCol(grid, col, val)\n inBox = checkInBox(grid, row - row % 3, col - col % 3, val)\n if not inRow and not inCol and not inBox:\n return True\n else:\n return False\n\n\ndef validGrid(grid):\n if not validFormatting(grid):\n return None\n elif validRows(grid) and validCols(grid) and validBoxes(grid):\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validFormatting(grid):\n if type(grid) is not list:\n return False\n elif len(grid) != 9:\n return False\n else:\n for row in grid:\n if type(row) is not list:\n return False\n elif len(row) != 9:\n return False\n else:\n for item in row:\n if type(item) is not int or item < 0 or item > 9:\n return False\n return True\n\n\ndef validRows(grid):\n found_zero = False\n for row in range(9):\n bit_dict = {}\n for col in range(9):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\ndef validCols(grid):\n found_zero = False\n for col in range(9):\n bit_dict = {}\n for row in range(len(grid)):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\n<mask token>\n\n\ndef getOpenSpot(grid):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return row, col\n return None\n\n\ndef checkInRow(grid, row, num):\n for col in range(9):\n if grid[row][col] == num:\n return True\n return False\n\n\ndef checkInCol(grid, col, num):\n for row in range(9):\n if grid[row][col] == num:\n return True\n return False\n\n\ndef checkInBox(grid, startRow, startCol, num):\n for row in range(3):\n for col in range(3):\n if grid[startRow + row][startCol + col] == num:\n return True\n return False\n\n\ndef checkIsOkay(grid, row, col, val):\n inRow = checkInRow(grid, row, val)\n inCol = checkInCol(grid, col, val)\n inBox = checkInBox(grid, row - row % 3, col - col % 3, val)\n if not inRow and not inCol and not inBox:\n return True\n else:\n return False\n\n\ndef validGrid(grid):\n if not validFormatting(grid):\n return None\n elif validRows(grid) and validCols(grid) and validBoxes(grid):\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-4": "def printGrid(grid):\n for row in grid:\n print(row)\n print('')\n\n\ndef validFormatting(grid):\n if type(grid) is not list:\n return False\n elif len(grid) != 9:\n return False\n else:\n for row in grid:\n if type(row) is not list:\n return False\n elif len(row) != 9:\n return False\n else:\n for item in row:\n if type(item) is not int or item < 0 or item > 9:\n return False\n return True\n\n\ndef validRows(grid):\n found_zero = False\n for row in range(9):\n bit_dict = {}\n for col in range(9):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\ndef validCols(grid):\n found_zero = False\n for col in range(9):\n bit_dict = {}\n for row in range(len(grid)):\n current_item = grid[row][col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\ndef validBoxes(grid):\n start_positions = [[0, 0], [0, 3], [0, 6], [3, 0], [3, 3], [3, 6], [6, \n 0], [6, 3], [6, 6]]\n for i in range(9):\n x = start_positions[i][0]\n y = start_positions[i][1]\n bit_dict = {}\n for row in range(3):\n for col in range(3):\n current_item = grid[y + row][x + col]\n if current_item != 0 and current_item in bit_dict:\n return False\n else:\n bit_dict[current_item] = True\n return True\n\n\ndef getOpenSpot(grid):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return row, col\n return None\n\n\ndef checkInRow(grid, row, num):\n for col in range(9):\n if grid[row][col] == num:\n return True\n return False\n\n\ndef checkInCol(grid, col, num):\n for row in range(9):\n if grid[row][col] == num:\n return True\n return False\n\n\ndef checkInBox(grid, startRow, startCol, num):\n for row in range(3):\n for col in range(3):\n if grid[startRow + row][startCol + col] == num:\n return True\n return False\n\n\ndef checkIsOkay(grid, row, col, val):\n inRow = checkInRow(grid, row, val)\n inCol = checkInCol(grid, col, val)\n inBox = checkInBox(grid, row - row % 3, col - col % 3, val)\n if not inRow and not inCol and not inBox:\n return True\n else:\n return False\n\n\ndef validGrid(grid):\n if not validFormatting(grid):\n return None\n elif validRows(grid) and validCols(grid) and validBoxes(grid):\n return True\n else:\n return False\n\n\ndef solveSudoku(grid):\n nextSpot = getOpenSpot(grid)\n if nextSpot == None:\n return True\n row = nextSpot[0]\n col = nextSpot[1]\n for digit in range(1, 10):\n if checkIsOkay(grid, row, col, digit):\n grid[row][col] = digit\n if solveSudoku(grid):\n return True\n else:\n grid[row][col] = 0\n return False\n",
"step-5": "#!/usr/local/bin/python3\n\ndef printGrid(grid):\n for row in grid:\n print(row)\n print(\"\")\n\ndef validFormatting(grid):\n if (type(grid) is not list):\n return False\n elif (len(grid) != 9):\n return False\n else:\n for row in grid:\n if (type(row) is not list):\n return False\n elif (len(row) != 9):\n return False\n else:\n for item in row:\n if (type(item) is not int or item < 0 or item > 9):\n return False\n return True\n\ndef validRows(grid):\n found_zero = False\n for row in range(9):\n bit_dict = {}\n for col in range(9):\n current_item = grid[row][col]\n if (current_item != 0 and current_item in bit_dict):\n #print(\"{0} was duplicated in row {1}\".format(current_item, row))\n return False\n else:\n bit_dict[current_item] = True\n return True\n\ndef validCols(grid):\n found_zero = False\n for col in range(9):\n bit_dict = {}\n for row in range(len(grid)):\n current_item = grid[row][col]\n if (current_item != 0 and current_item in bit_dict):\n #print(\"{0} was duplicated in column {1}\".format(current_item, row))\n return False\n else:\n bit_dict[current_item] = True\n return True\n\ndef validBoxes(grid):\n start_positions = [[0,0],\n [0,3],\n [0,6],\n [3,0],\n [3,3],\n [3,6],\n [6,0],\n [6,3],\n [6,6]]\n for i in range(9):\n x = start_positions[i][0]\n y = start_positions[i][1]\n bit_dict = {}\n for row in range(3):\n for col in range(3):\n current_item = grid[y+row][x+col]\n if (current_item != 0 and current_item in bit_dict):\n #print(\"{0} was duplicated in box ({1},{2})\".format(current_item, x//3, y//3))\n return False\n else:\n bit_dict[current_item] = True\n return True\n\ndef getOpenSpot(grid):\n for row in range(9):\n for col in range(9):\n if (grid[row][col] == 0):\n return (row,col)\n return None\n\ndef checkInRow(grid, row, num):\n for col in range(9):\n if (grid[row][col] == num):\n return True\n return False\n\ndef checkInCol(grid, col, num):\n for row in range(9):\n if (grid[row][col] == num):\n return True\n return False\n\ndef checkInBox(grid, startRow, startCol, num):\n for row in range(3):\n for col in range(3):\n if (grid[startRow+row][startCol+col] == num):\n return True\n return False\n\ndef checkIsOkay(grid, row, col, val):\n inRow = checkInRow(grid, row, val)\n inCol = checkInCol(grid, col, val)\n inBox = checkInBox(grid, row - (row%3), col - (col%3), val)\n if (not inRow and not inCol and not inBox):\n return True\n else:\n return False\n\ndef validGrid(grid):\n if (not validFormatting(grid)):\n return None\n elif (\n validRows(grid) and\n validCols(grid) and\n validBoxes(grid)\n ):\n return True\n else:\n return False\n\ndef solveSudoku(grid):\n nextSpot = getOpenSpot(grid)\n if (nextSpot == None):\n return True\n\n row = nextSpot[0]\n col = nextSpot[1]\n for digit in range(1,10):\n if (checkIsOkay(grid, row, col, digit)):\n #print(\"Selected:\", digit)\n grid[row][col] = digit\n #printGrid(grid)\n if (solveSudoku(grid)):\n return True\n else:\n grid[row][col] = 0\n return False\n\n",
"step-ids": [
5,
7,
9,
12,
13
]
}
|
[
5,
7,
9,
12,
13
] |
from django.conf.urls import patterns, include, url
from views.index import Index
from views.configuracoes import Configuracoes
from views.parametros import *
urlpatterns = patterns('',
url(r'^$', Index.as_view(), name='core_index'),
url(r'^configuracoes/', Configuracoes.as_view(), name='core.core_configurations'),
#Parametros
url(r'^parametros/data/$', ParametrosData.as_view(),name='core.list_json_parametro'),
url(r'^parametros/formulario/$', ParametrosCreateForm.as_view(),name='core.add_parametro'),
url(r'^parametros/(?P<pk>\d+)/$', ParametrosUpdateForm.as_view(),name='core.change_parametro'),
url(r'^parametros/remove/(?P<pk>\d+)/$', ParametrosDelete.as_view(),name='core.delete_parametro'),
url(r'^parametros/$', ParametrosList.as_view(), name='core.list_parametros'),
)
|
normal
|
{
"blob_id": "74c60c9e37e4e13ed4c61f631c3426b685b5d38f",
"index": 8875,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^$', Index.as_view(), name='core_index'),\n url('^configuracoes/', Configuracoes.as_view(), name=\n 'core.core_configurations'), url('^parametros/data/$', ParametrosData.\n as_view(), name='core.list_json_parametro'), url(\n '^parametros/formulario/$', ParametrosCreateForm.as_view(), name=\n 'core.add_parametro'), url('^parametros/(?P<pk>\\\\d+)/$',\n ParametrosUpdateForm.as_view(), name='core.change_parametro'), url(\n '^parametros/remove/(?P<pk>\\\\d+)/$', ParametrosDelete.as_view(), name=\n 'core.delete_parametro'), url('^parametros/$', ParametrosList.as_view(),\n name='core.list_parametros'))\n",
"step-3": "from django.conf.urls import patterns, include, url\nfrom views.index import Index\nfrom views.configuracoes import Configuracoes\nfrom views.parametros import *\nurlpatterns = patterns('', url('^$', Index.as_view(), name='core_index'),\n url('^configuracoes/', Configuracoes.as_view(), name=\n 'core.core_configurations'), url('^parametros/data/$', ParametrosData.\n as_view(), name='core.list_json_parametro'), url(\n '^parametros/formulario/$', ParametrosCreateForm.as_view(), name=\n 'core.add_parametro'), url('^parametros/(?P<pk>\\\\d+)/$',\n ParametrosUpdateForm.as_view(), name='core.change_parametro'), url(\n '^parametros/remove/(?P<pk>\\\\d+)/$', ParametrosDelete.as_view(), name=\n 'core.delete_parametro'), url('^parametros/$', ParametrosList.as_view(),\n name='core.list_parametros'))\n",
"step-4": "from django.conf.urls import patterns, include, url\r\n\r\nfrom views.index import Index\r\nfrom views.configuracoes import Configuracoes\r\n\r\nfrom views.parametros import *\r\n\r\nurlpatterns = patterns('',\r\n url(r'^$', Index.as_view(), name='core_index'),\r\n url(r'^configuracoes/', Configuracoes.as_view(), name='core.core_configurations'),\r\n\r\n #Parametros\r\n url(r'^parametros/data/$', ParametrosData.as_view(),name='core.list_json_parametro'),\r\n url(r'^parametros/formulario/$', ParametrosCreateForm.as_view(),name='core.add_parametro'),\r\n url(r'^parametros/(?P<pk>\\d+)/$', ParametrosUpdateForm.as_view(),name='core.change_parametro'),\r\n url(r'^parametros/remove/(?P<pk>\\d+)/$', ParametrosDelete.as_view(),name='core.delete_parametro'),\r\n url(r'^parametros/$', ParametrosList.as_view(), name='core.list_parametros'),\r\n)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import os
from subprocess import PIPE, Popen as popen
from unittest import TestCase
from substra.commands import Config
objective = [[{
'descriptionStorageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/description/',
'key': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'name': 'macro-average recall',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'},
'name': 'Skin Lesion Classification Challenge',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'testDataKeys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1']}, {
'descriptionStorageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/description/',
'key': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'metrics': {'hash': '0bc732c26bafdc41321c2bffd35b6835aa35f7371a4eb02994642c2c3a688f60',
'name': 'macro-average recall',
'storageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/metrics/'},
'name': 'Simplified skin lesion classification',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'testDataKeys': ['2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e',
'533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1']}]]
data_manager = [[{'objectiveKeys': [],
'description': {'hash': '7a90514f88c70002608a9868681dd1589ea598e78d00a8cd7783c3ea0f9ceb09',
'storageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/description/'},
'key': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'name': 'ISIC 2018',
'nbData': 2,
'openerStorageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener/',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'size': 553113, 'type': 'Images'}, {
'objectiveKeys': ['6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f'],
'description': {'hash': '258bef187a166b3fef5cb86e68c8f7e154c283a148cd5bc344fec7e698821ad3',
'storageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/description/'},
'key': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0',
'name': 'Simplified ISIC 2018', 'nbData': 6,
'openerStorageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/opener/',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'size': 1415097, 'type': 'Images'}]]
data = [{'pkhash': 'e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1/0024900.zip'},
{'pkhash': '4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010/0024701.zip'},
{'pkhash': '93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060/0024317.zip'},
{'pkhash': 'eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb/0024316.zip'},
{'pkhash': '2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e/0024315.zip'},
{'pkhash': '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1/0024318.zip'}]
algo = [[{'objectiveKey': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'description': {'hash': '3b1281cbdd6ebfec650d0a9f932a64e45a27262848065d7cecf11fd7191b4b1f',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/description/'},
'key': '7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0',
'name': 'Logistic regression for balanced problem',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': 'b9463411a01ea00869bdffce6e59a5c100a4e635c0a9386266cad3c77eb28e9e',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/description/'},
'key': '0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f', 'name': 'Neural Network',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '124a0425b746d7072282d167b53cb6aab3a31bf1946dae89135c15b0126ebec3',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/description/'},
'key': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f', 'name': 'Logistic regression',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '4acea40c4b51996c88ef279c5c9aa41ab77b97d38c5ca167e978a98b2e402675',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/description/'},
'key': 'f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284', 'name': 'Random Forest',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/file/'}]]
model = [[{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'}, 'trainData': {
'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'perf': 1,
'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]]
traintuple = [{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'},
'trainData': {'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994',
'perf': 1, 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]
# Run this test only after an e2e multi orgs
class TestList(TestCase):
def setUp(self):
Config({
'<url>': 'http://owkin.substrabac:8000',
'<version>': '0.0',
'<user>': os.environ.get('BACK_AUTH_USER', ''),
'<password>': os.environ.get('BACK_AUTH_PASSWORD', ''),
'--config': '/tmp/.substra_e2e'
}).run()
def tearDown(self):
try:
os.remove('/tmp/.substra_e2e')
except:
pass
def test_list_objective(self):
output = popen(['substra', 'list', 'objective', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == objective)
def test_list_data_manager(self):
output = popen(['substra', 'list', 'data_manager', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data_manager)
def test_list_data(self):
output = popen(['substra', 'list', 'data_sample', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data)
def test_list_algo(self):
output = popen(['substra', 'list', 'algo', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == algo)
def test_list_model(self):
output = popen(['substra', 'list', 'model', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == model)
def test_list_traintuple(self):
output = popen(['substra', 'list', 'traintuple', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == traintuple)
|
normal
|
{
"blob_id": "c55b768466309d2e655c9222e0674a6bc2a958b3",
"index": 9899,
"step-1": "<mask token>\n\n\nclass TestList(TestCase):\n <mask token>\n <mask token>\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestList(TestCase):\n <mask token>\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestList(TestCase):\n\n def setUp(self):\n Config({'<url>': 'http://owkin.substrabac:8000', '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''), '<password>':\n os.environ.get('BACK_AUTH_PASSWORD', ''), '--config':\n '/tmp/.substra_e2e'}).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n <mask token>\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-4": "<mask token>\n\n\nclass TestList(TestCase):\n\n def setUp(self):\n Config({'<url>': 'http://owkin.substrabac:8000', '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''), '<password>':\n os.environ.get('BACK_AUTH_PASSWORD', ''), '--config':\n '/tmp/.substra_e2e'}).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n\n def test_list_model(self):\n output = popen(['substra', 'list', 'model',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == model)\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-5": "import json\nimport os\nfrom subprocess import PIPE, Popen as popen\nfrom unittest import TestCase\n\nfrom substra.commands import Config\n\nobjective = [[{\n 'descriptionStorageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/description/',\n 'key': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'name': 'macro-average recall',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'},\n 'name': 'Skin Lesion Classification Challenge',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'testDataKeys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1']}, {\n 'descriptionStorageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/description/',\n 'key': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'metrics': {'hash': '0bc732c26bafdc41321c2bffd35b6835aa35f7371a4eb02994642c2c3a688f60',\n 'name': 'macro-average recall',\n 'storageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/metrics/'},\n 'name': 'Simplified skin lesion classification',\n 'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',\n 'testDataKeys': ['2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e',\n '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1']}]]\n\ndata_manager = [[{'objectiveKeys': [],\n 'description': {'hash': '7a90514f88c70002608a9868681dd1589ea598e78d00a8cd7783c3ea0f9ceb09',\n 'storageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/description/'},\n 'key': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'name': 'ISIC 2018',\n 'nbData': 2,\n 'openerStorageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener/',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'size': 553113, 'type': 'Images'}, {\n 'objectiveKeys': ['6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f'],\n 'description': {'hash': '258bef187a166b3fef5cb86e68c8f7e154c283a148cd5bc344fec7e698821ad3',\n 'storageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/description/'},\n 'key': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0',\n 'name': 'Simplified ISIC 2018', 'nbData': 6,\n 'openerStorageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/opener/',\n 'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',\n 'size': 1415097, 'type': 'Images'}]]\n\ndata = [{'pkhash': 'e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1/0024900.zip'},\n {'pkhash': '4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010/0024701.zip'},\n {'pkhash': '93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060/0024317.zip'},\n {'pkhash': 'eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb/0024316.zip'},\n {'pkhash': '2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e/0024315.zip'},\n {'pkhash': '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1/0024318.zip'}]\n\nalgo = [[{'objectiveKey': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'description': {'hash': '3b1281cbdd6ebfec650d0a9f932a64e45a27262848065d7cecf11fd7191b4b1f',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/description/'},\n 'key': '7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0',\n 'name': 'Logistic regression for balanced problem',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': 'b9463411a01ea00869bdffce6e59a5c100a4e635c0a9386266cad3c77eb28e9e',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/description/'},\n 'key': '0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f', 'name': 'Neural Network',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': '124a0425b746d7072282d167b53cb6aab3a31bf1946dae89135c15b0126ebec3',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/description/'},\n 'key': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f', 'name': 'Logistic regression',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': '4acea40c4b51996c88ef279c5c9aa41ab77b97d38c5ca167e978a98b2e402675',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/description/'},\n 'key': 'f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284', 'name': 'Random Forest',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/file/'}]]\n\nmodel = [[{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',\n 'name': 'Logistic regression',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n 'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},\n 'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',\n 'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',\n 'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},\n 'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',\n 'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',\n 'permissions': 'all', 'startModel': None, 'status': 'done',\n 'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],\n 'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,\n 'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'}, 'trainData': {\n 'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',\n '42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],\n 'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'perf': 1,\n 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]]\n\ntraintuple = [{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',\n 'name': 'Logistic regression',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n 'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},\n 'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',\n 'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',\n 'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},\n 'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',\n 'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',\n 'permissions': 'all', 'startModel': None, 'status': 'done',\n 'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],\n 'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,\n 'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'},\n 'trainData': {'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',\n '42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],\n 'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994',\n 'perf': 1, 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]\n\n\n# Run this test only after an e2e multi orgs\nclass TestList(TestCase):\n\n def setUp(self):\n Config({\n '<url>': 'http://owkin.substrabac:8000',\n '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''),\n '<password>': os.environ.get('BACK_AUTH_PASSWORD', ''),\n '--config': '/tmp/.substra_e2e'\n }).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == algo)\n\n def test_list_model(self):\n output = popen(['substra', 'list', 'model', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == model)\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-ids": [
4,
6,
8,
9,
12
]
}
|
[
4,
6,
8,
9,
12
] |
"""
Read a real number. If it is positive print it's square root, if it's not print the square of it.
"""
import math
print('Insert a number')
num1 = float(input())
if num1 > 0:
print(f'The square root of {num1} is {math.sqrt(num1)}')
else:
print(f'The square of {num1} is {num1**2}')
|
normal
|
{
"blob_id": "a68d682ba6d441b9d7fb69ec1ee318a0ef65ed40",
"index": 3146,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Insert a number')\n<mask token>\nif num1 > 0:\n print(f'The square root of {num1} is {math.sqrt(num1)}')\nelse:\n print(f'The square of {num1} is {num1 ** 2}')\n",
"step-3": "<mask token>\nprint('Insert a number')\nnum1 = float(input())\nif num1 > 0:\n print(f'The square root of {num1} is {math.sqrt(num1)}')\nelse:\n print(f'The square of {num1} is {num1 ** 2}')\n",
"step-4": "<mask token>\nimport math\nprint('Insert a number')\nnum1 = float(input())\nif num1 > 0:\n print(f'The square root of {num1} is {math.sqrt(num1)}')\nelse:\n print(f'The square of {num1} is {num1 ** 2}')\n",
"step-5": "\"\"\"\n\nRead a real number. If it is positive print it's square root, if it's not print the square of it.\n\n\"\"\"\nimport math\n\nprint('Insert a number')\nnum1 = float(input())\n\nif num1 > 0:\n print(f'The square root of {num1} is {math.sqrt(num1)}')\nelse:\n print(f'The square of {num1} is {num1**2}')\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
#===============================================================================
#
# Board Data File Analyzer
#
# Copyright (c) 2017 by QUALCOMM Atheros, Incorporated.
# All Rights Reserved
# QUALCOMM Atheros Confidential and Proprietary
#
# Notifications and licenses are retained for attribution purposes only
#===============================================================================
#--------------
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from array import array
import numpy as np
Description = """
[Description]:
Read WLAN board data file and generate graph per chain.
1 2 3 4
fullmeas_pwr_0_G_0_0
fullmeas_pwr_0_A_0_0
1. Index/Step: a iteration takes 10 steps
2. Band: 'G' is 2.4G and 'A' is 5G.
3. Channel: 14 channels for 2.4G and 32 channels for 5G.
4. Chain: Either chain0 or chain1.
[Input]:
BIN/wlan_proc/wlan/halphy_tools/host/bdfUtil/qca61x0/bdf
[Usage]:
BDFAnalyzer.py input.txt
"""
fullpdadc_val_list = [] # y-axis
fullpwr_val_list = [] # x-axis
fullpwr_tag_list = []
win = pg.GraphicsWindow(title="Chain Analyzer: chain 0 (RED) chain 1 (GREEN)")
win.resize(1000,600)
def backup_calibration(fin):
for index in range(len(fullpwr_tag_list)):
fin.write(fullpwr_tag_list[index])
fin.write(" ")
fin.write(fullpwr_val_list[index])
fin.write(",")
fin.write(fullpdadc_val_list[index])
fin.write("\n")
def plot_render(band, channel):
index_lower = 0
index_upper = 0
X = []
Y = []
if band == "G": # 2.4G
index_lower = channel * 20
index_upper = (channel+1) * 20
elif band == "A": # 5G
index_lower = 280 + channel * 20
index_upper = 280 + (channel+1) * 20
else:
print "Plot render error\n"
for i in range(index_lower, index_upper):
X.append(int(fullpwr_val_list[i], 10))
Y.append(int(fullpdadc_val_list[i], 10))
title_description = "Channel " + str(channel)
pp = win.addPlot(title = title_description)
pp.plot(X[0:10],Y[0:10], title="Chain 0", pen=(255,0,0)) # chain 0 as red line
pp.plot(X[10:20],Y[10:20], title="Chain 1", pen=(0,255,0)) # chain 1 as green line
pp.showGrid(x=True, y=True)
def main():
global fullpwr_tag_list, fullpwr_val_list, fullpdadc_val_list
clpc = open("files/calibration.txt","w")
bdf = open("files/bdwlan30.txt",'r')
# read data
for line in bdf:
if "fullpdadc" in line:
tmp = line.split()
fullpdadc_val_list.append(tmp[1])
if "fullmeas_pwr" in line:
tmp = line.split()
fullpwr_tag_list.append(tmp[0])
fullpwr_val_list.append(tmp[1])
# write calibration backup file
backup_calibration(clpc)
bdf.close()
clpc.close()
# draw plot
plot_render('A', 7)
plot_render('A', 8)
win.nextRow()
plot_render('A', 9)
plot_render('A', 10)
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.exec_()
main()
|
normal
|
{
"blob_id": "5c12ff4f88af991fa275cd08adf3678ee4a678f3",
"index": 8532,
"step-1": "#!/usr/bin/python\n#===============================================================================\n#\n# Board Data File Analyzer\n#\n# Copyright (c) 2017 by QUALCOMM Atheros, Incorporated.\n# All Rights Reserved\n# QUALCOMM Atheros Confidential and Proprietary\n#\n# Notifications and licenses are retained for attribution purposes only\n#===============================================================================\n\n#--------------\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui\nfrom array import array\nimport numpy as np\n\nDescription = \"\"\"\n[Description]:\nRead WLAN board data file and generate graph per chain.\n 1 2 3 4\nfullmeas_pwr_0_G_0_0\nfullmeas_pwr_0_A_0_0\n\n1. Index/Step: a iteration takes 10 steps\n2. Band: 'G' is 2.4G and 'A' is 5G.\n3. Channel: 14 channels for 2.4G and 32 channels for 5G.\n4. Chain: Either chain0 or chain1.\n\n[Input]:\nBIN/wlan_proc/wlan/halphy_tools/host/bdfUtil/qca61x0/bdf\n[Usage]:\nBDFAnalyzer.py input.txt\n\"\"\"\n\nfullpdadc_val_list = [] # y-axis\nfullpwr_val_list = [] # x-axis\nfullpwr_tag_list = [] \n\nwin = pg.GraphicsWindow(title=\"Chain Analyzer: chain 0 (RED) chain 1 (GREEN)\")\nwin.resize(1000,600)\ndef backup_calibration(fin):\n\tfor index in range(len(fullpwr_tag_list)):\n\t\tfin.write(fullpwr_tag_list[index])\n\t\tfin.write(\" \")\n\t\tfin.write(fullpwr_val_list[index])\n\t\tfin.write(\",\")\n\t\tfin.write(fullpdadc_val_list[index])\n\t\tfin.write(\"\\n\")\n\ndef plot_render(band, channel):\n\tindex_lower = 0\n\tindex_upper = 0\n\tX = []\n\tY = []\n\tif band == \"G\": # 2.4G\n\t\tindex_lower = channel * 20\n\t\tindex_upper = (channel+1) * 20 \n\telif band == \"A\": # 5G\n\t\tindex_lower = 280 + channel * 20\n\t\tindex_upper = 280 + (channel+1) * 20 \n\telse:\n\t\tprint \"Plot render error\\n\"\n\t\n\tfor i in range(index_lower, index_upper):\n\t\tX.append(int(fullpwr_val_list[i], 10))\n\t\tY.append(int(fullpdadc_val_list[i], 10))\n\n\ttitle_description = \"Channel \" + str(channel)\n\tpp = win.addPlot(title = title_description)\n\tpp.plot(X[0:10],Y[0:10], title=\"Chain 0\", pen=(255,0,0)) # chain 0 as red line\n\tpp.plot(X[10:20],Y[10:20], title=\"Chain 1\", pen=(0,255,0)) # chain 1 as green line\n\tpp.showGrid(x=True, y=True)\n\t\t\n\ndef main():\n\tglobal fullpwr_tag_list, fullpwr_val_list, fullpdadc_val_list\n\tclpc = open(\"files/calibration.txt\",\"w\")\n\tbdf = open(\"files/bdwlan30.txt\",'r')\n\t# read data\n\tfor line in bdf:\n\t\tif \"fullpdadc\" in line:\n\t\t\ttmp = line.split()\n\t\t\tfullpdadc_val_list.append(tmp[1])\n\t\tif \"fullmeas_pwr\" in line:\n\t\t\ttmp = line.split()\n\t\t\tfullpwr_tag_list.append(tmp[0])\n\t\t\tfullpwr_val_list.append(tmp[1])\n\n\t# write calibration backup file\n\tbackup_calibration(clpc)\n\tbdf.close()\n\tclpc.close()\n\t# draw plot\n\tplot_render('A', 7)\n\tplot_render('A', 8)\n\twin.nextRow()\n\tplot_render('A', 9)\n\tplot_render('A', 10)\n\tif __name__ == '__main__':\n\t\timport sys\n\t\tif sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\t\tQtGui.QApplication.exec_()\n\nmain()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def execute(n,dico):
"""
Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.
Renvoie une liste dont chaque élément est une réponse de la requête.
"""
l = []
import sqlite3
conn = sqlite3.connect('imdb.db')
c = conn.cursor()
c.execute(dico[n][1])
for row in c:
l.append(row)
conn.close()
return l
def taille_plus_grande_reponse(reponses):
"""
Prend en argument une liste.
Renvoie la taille du plus grand élément de la liste.
"""
l = reponses
maxi = 0
for i in range(len(l)):
if len(str(l[i])) > maxi:
maxi = len(str(l[i]))
return maxi
"""affichage question"""""""""""""""""""""""""""
from tkinter import *
def question(dico):
"""
prend en argument un disctionnaire.
Ne renvoie rien.
"""
l = []
for i in range(len(dico)):
l.append(dico[i][0])
affichage_question(dico,l)
def affichage_question(dico, texte, titre = "Question"):
"""
prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.
Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.
"""
fenetre = tkinter.Tk()
fenetre.title(titre)
for i in range(len(texte)):
bouton={}
bouton[i]=Button(fenetre, text=texte[i], command=lambda n=i, dico=dico:requete(n,dico))
bouton[i].pack()
fenetre.mainloop()
""""""""""""""""""""""""""""""""""""""""""""""""
def requete(n,dico):
"""
prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.
ne renvoie rien
"""
r = execute(n,dico)
afficher_table(execute(n,dico),dico[n][0])
import tkinter
import os
def afficher_table(table, titre ="", debut = 0, fin = None):
"""
prend en argument table une liste et titre une chaine de caractère.
ne renvoie rien.
"""
if titre != "":
titre += "\n\n"
#print(titre + texte_table(table, debut, fin))
affichage(titre + texte_table(table, debut, fin), titre)
def texte_table(table, debut = 0, fin = None):
"""
prend en argument table une liste.
renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.
"""
max = taille_plus_grande_reponse(table)
texte = '+' + max * '-' + '+\n'
for i in range(len(table)):
texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))) * ' ' + '|' + '\n+' + max * '-' + '+\n'
return texte
def affichage(texte, titre = "Requêtes tables"):
"""
prend en argument texte une chaîne de caractère et titre une chaine de caractère
renvoie une fenêtre tkinter
"""
root = tkinter.Tk()
root.title(str(titre))
RWidth=root.winfo_screenwidth() - 100
RHeight=root.winfo_screenheight() - 100
root.geometry("%dx%d+50+0"%(RWidth, RHeight))
text=tkinter.Text(root, wrap = 'none')
scroll_x=tkinter.Scrollbar(text.master, orient='horizontal', command = text.xview)
scroll_x.config(command = text.xview)
text.configure(xscrollcommand = scroll_x.set)
scroll_x.pack(side = 'bottom', fill = 'x', anchor = 'w')
scroll_y = tkinter.Scrollbar(text.master)
scroll_y.config(command = text.yview)
text.configure(yscrollcommand = scroll_y.set)
scroll_y.pack(side = tkinter.RIGHT, fill = 'y')
text.insert("1.0", texte)
text.pack(side = tkinter.LEFT, expand = True, fill = tkinter.BOTH)
root.mainloop()
def fichier_txt_en_texte(fichier):
"""
prend en argument le chemin d'un fichier texte
Renvoie le contenu du fichier texte sous forme de chaîne de caractère.
"""
with open(fichier, "r") as requete:
return requete.read()
def chemin(nom, repertoire):
"""
Prend en argument le nom du fichier où est stocké la requête et le nom du répertoire dans lequel est stocké la requête.
Renvoie le chemin de la requête.
"""
return repertoire + '/' + nom
def texte_en_liste(nom_requete, repertoire):
requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))
return requete.split()
def liste_en_texte(liste):
"""
prend en argument une liste et un indice et renvoie la même liste mais l'élement d'indice 'n' est transformé en texte.
"""
texte = ""
for i in range(len(liste)):
texte = texte + str(liste[i]) + " "
return texte
def separer_requete_et_question(nom, repertoire):
"""
prend en argument le numéro de la requête et renvoie la question et la requête sésparé.
"""
requete = texte_en_liste(nom, repertoire) #transforme la requête en tableau
question = ""
for i in range(len(requete)): #cherche le moment où la question s'arrête et sépare la question de la requête
if requete[i] == "?":
question = requete[0:i+1] #stock la question
requete = requete[i+1:len(requete)] #stock la réponse
break #stop la boucle quand la "?" est trouvé
return [liste_en_texte(question),liste_en_texte(requete)]
def creer_dictionnaire_vide():
"""
Ne contient aucun argument et renvoie un dictionnaire vide.
"""
dico = {}
return dico
def nom_element_du_repertoire(repertoire):
"""
prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.
renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.
"""
path = "C:\\Users\\Elève\\Desktop\\projet NSI\\projetsqlKilian\\projetsqlKilian\\" + repertoire
nom_requete = os.listdir(path)
return nom_requete
def stocker_requete(dico, repertoire):
"""
prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.
ne renvoie rien
"""
liste = nom_element_du_repertoire(repertoire)
for i in range(len(liste)):
requete = separer_requete_et_question(liste[i], repertoire)
dico[i] = ['#' + str(i+1) + ') ' + requete[0], requete[1]]
def afficher(dico):
"""
prend en argument un dictionnaire et renvoie ce disctionnaire.
"""
return dico
a = creer_dictionnaire_vide()
stocker_requete(a,'requête')
#print(afficher(a))
question(a)
#print(nom_element_du_repertoire('requête'))
#requete(a)
#print(execute(1,a))
#print(taille_plus_grande_reponse(execute(1,a)))
|
normal
|
{
"blob_id": "7618d7fde3774a04ac2005dad104e54b9988d3e8",
"index": 9487,
"step-1": "<mask token>\n\n\ndef taille_plus_grande_reponse(reponses):\n \"\"\"\n Prend en argument une liste.\n Renvoie la taille du plus grand élément de la liste.\n \"\"\"\n l = reponses\n maxi = 0\n for i in range(len(l)):\n if len(str(l[i])) > maxi:\n maxi = len(str(l[i]))\n return maxi\n\n\n<mask token>\n\n\ndef affichage_question(dico, texte, titre='Question'):\n \"\"\"\n prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.\n Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.\n \"\"\"\n fenetre = tkinter.Tk()\n fenetre.title(titre)\n for i in range(len(texte)):\n bouton = {}\n bouton[i] = Button(fenetre, text=texte[i], command=lambda n=i, dico\n =dico: requete(n, dico))\n bouton[i].pack()\n fenetre.mainloop()\n\n\n<mask token>\n\n\ndef requete(n, dico):\n \"\"\"\n prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.\n ne renvoie rien\n \"\"\"\n r = execute(n, dico)\n afficher_table(execute(n, dico), dico[n][0])\n\n\n<mask token>\n\n\ndef afficher_table(table, titre='', debut=0, fin=None):\n \"\"\"\n prend en argument table une liste et titre une chaine de caractère.\n ne renvoie rien.\n \"\"\"\n if titre != '':\n titre += '\\n\\n'\n affichage(titre + texte_table(table, debut, fin), titre)\n\n\ndef texte_table(table, debut=0, fin=None):\n \"\"\"\n prend en argument table une liste.\n renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.\n \"\"\"\n max = taille_plus_grande_reponse(table)\n texte = '+' + max * '-' + '+\\n'\n for i in range(len(table)):\n texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))\n ) * ' ' + '|' + '\\n+' + max * '-' + '+\\n'\n return texte\n\n\n<mask token>\n\n\ndef texte_en_liste(nom_requete, repertoire):\n requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))\n return requete.split()\n\n\n<mask token>\n\n\ndef stocker_requete(dico, repertoire):\n \"\"\"\n prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.\n ne renvoie rien\n \"\"\"\n liste = nom_element_du_repertoire(repertoire)\n for i in range(len(liste)):\n requete = separer_requete_et_question(liste[i], repertoire)\n dico[i] = ['#' + str(i + 1) + ') ' + requete[0], requete[1]]\n\n\ndef afficher(dico):\n \"\"\"\n prend en argument un dictionnaire et renvoie ce disctionnaire.\n \"\"\"\n return dico\n\n\n<mask token>\n",
"step-2": "def execute(n, dico):\n \"\"\"\n Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.\n Renvoie une liste dont chaque élément est une réponse de la requête.\n \"\"\"\n l = []\n import sqlite3\n conn = sqlite3.connect('imdb.db')\n c = conn.cursor()\n c.execute(dico[n][1])\n for row in c:\n l.append(row)\n conn.close()\n return l\n\n\ndef taille_plus_grande_reponse(reponses):\n \"\"\"\n Prend en argument une liste.\n Renvoie la taille du plus grand élément de la liste.\n \"\"\"\n l = reponses\n maxi = 0\n for i in range(len(l)):\n if len(str(l[i])) > maxi:\n maxi = len(str(l[i]))\n return maxi\n\n\n<mask token>\n\n\ndef affichage_question(dico, texte, titre='Question'):\n \"\"\"\n prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.\n Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.\n \"\"\"\n fenetre = tkinter.Tk()\n fenetre.title(titre)\n for i in range(len(texte)):\n bouton = {}\n bouton[i] = Button(fenetre, text=texte[i], command=lambda n=i, dico\n =dico: requete(n, dico))\n bouton[i].pack()\n fenetre.mainloop()\n\n\n<mask token>\n\n\ndef requete(n, dico):\n \"\"\"\n prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.\n ne renvoie rien\n \"\"\"\n r = execute(n, dico)\n afficher_table(execute(n, dico), dico[n][0])\n\n\n<mask token>\n\n\ndef afficher_table(table, titre='', debut=0, fin=None):\n \"\"\"\n prend en argument table une liste et titre une chaine de caractère.\n ne renvoie rien.\n \"\"\"\n if titre != '':\n titre += '\\n\\n'\n affichage(titre + texte_table(table, debut, fin), titre)\n\n\ndef texte_table(table, debut=0, fin=None):\n \"\"\"\n prend en argument table une liste.\n renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.\n \"\"\"\n max = taille_plus_grande_reponse(table)\n texte = '+' + max * '-' + '+\\n'\n for i in range(len(table)):\n texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))\n ) * ' ' + '|' + '\\n+' + max * '-' + '+\\n'\n return texte\n\n\n<mask token>\n\n\ndef texte_en_liste(nom_requete, repertoire):\n requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))\n return requete.split()\n\n\n<mask token>\n\n\ndef creer_dictionnaire_vide():\n \"\"\"\n Ne contient aucun argument et renvoie un dictionnaire vide.\n \"\"\"\n dico = {}\n return dico\n\n\ndef nom_element_du_repertoire(repertoire):\n \"\"\"\n prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.\n renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.\n \"\"\"\n path = (\n 'C:\\\\Users\\\\Elève\\\\Desktop\\\\projet NSI\\\\projetsqlKilian\\\\projetsqlKilian\\\\'\n + repertoire)\n nom_requete = os.listdir(path)\n return nom_requete\n\n\ndef stocker_requete(dico, repertoire):\n \"\"\"\n prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.\n ne renvoie rien\n \"\"\"\n liste = nom_element_du_repertoire(repertoire)\n for i in range(len(liste)):\n requete = separer_requete_et_question(liste[i], repertoire)\n dico[i] = ['#' + str(i + 1) + ') ' + requete[0], requete[1]]\n\n\ndef afficher(dico):\n \"\"\"\n prend en argument un dictionnaire et renvoie ce disctionnaire.\n \"\"\"\n return dico\n\n\n<mask token>\n",
"step-3": "def execute(n, dico):\n \"\"\"\n Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.\n Renvoie une liste dont chaque élément est une réponse de la requête.\n \"\"\"\n l = []\n import sqlite3\n conn = sqlite3.connect('imdb.db')\n c = conn.cursor()\n c.execute(dico[n][1])\n for row in c:\n l.append(row)\n conn.close()\n return l\n\n\ndef taille_plus_grande_reponse(reponses):\n \"\"\"\n Prend en argument une liste.\n Renvoie la taille du plus grand élément de la liste.\n \"\"\"\n l = reponses\n maxi = 0\n for i in range(len(l)):\n if len(str(l[i])) > maxi:\n maxi = len(str(l[i]))\n return maxi\n\n\n<mask token>\n\n\ndef affichage_question(dico, texte, titre='Question'):\n \"\"\"\n prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.\n Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.\n \"\"\"\n fenetre = tkinter.Tk()\n fenetre.title(titre)\n for i in range(len(texte)):\n bouton = {}\n bouton[i] = Button(fenetre, text=texte[i], command=lambda n=i, dico\n =dico: requete(n, dico))\n bouton[i].pack()\n fenetre.mainloop()\n\n\n<mask token>\n\n\ndef requete(n, dico):\n \"\"\"\n prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.\n ne renvoie rien\n \"\"\"\n r = execute(n, dico)\n afficher_table(execute(n, dico), dico[n][0])\n\n\n<mask token>\n\n\ndef afficher_table(table, titre='', debut=0, fin=None):\n \"\"\"\n prend en argument table une liste et titre une chaine de caractère.\n ne renvoie rien.\n \"\"\"\n if titre != '':\n titre += '\\n\\n'\n affichage(titre + texte_table(table, debut, fin), titre)\n\n\ndef texte_table(table, debut=0, fin=None):\n \"\"\"\n prend en argument table une liste.\n renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.\n \"\"\"\n max = taille_plus_grande_reponse(table)\n texte = '+' + max * '-' + '+\\n'\n for i in range(len(table)):\n texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))\n ) * ' ' + '|' + '\\n+' + max * '-' + '+\\n'\n return texte\n\n\n<mask token>\n\n\ndef fichier_txt_en_texte(fichier):\n \"\"\"\n prend en argument le chemin d'un fichier texte\n Renvoie le contenu du fichier texte sous forme de chaîne de caractère.\n \"\"\"\n with open(fichier, 'r') as requete:\n return requete.read()\n\n\ndef chemin(nom, repertoire):\n \"\"\"\n Prend en argument le nom du fichier où est stocké la requête et le nom du répertoire dans lequel est stocké la requête.\n Renvoie le chemin de la requête.\n \"\"\"\n return repertoire + '/' + nom\n\n\ndef texte_en_liste(nom_requete, repertoire):\n requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))\n return requete.split()\n\n\n<mask token>\n\n\ndef creer_dictionnaire_vide():\n \"\"\"\n Ne contient aucun argument et renvoie un dictionnaire vide.\n \"\"\"\n dico = {}\n return dico\n\n\ndef nom_element_du_repertoire(repertoire):\n \"\"\"\n prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.\n renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.\n \"\"\"\n path = (\n 'C:\\\\Users\\\\Elève\\\\Desktop\\\\projet NSI\\\\projetsqlKilian\\\\projetsqlKilian\\\\'\n + repertoire)\n nom_requete = os.listdir(path)\n return nom_requete\n\n\ndef stocker_requete(dico, repertoire):\n \"\"\"\n prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.\n ne renvoie rien\n \"\"\"\n liste = nom_element_du_repertoire(repertoire)\n for i in range(len(liste)):\n requete = separer_requete_et_question(liste[i], repertoire)\n dico[i] = ['#' + str(i + 1) + ') ' + requete[0], requete[1]]\n\n\ndef afficher(dico):\n \"\"\"\n prend en argument un dictionnaire et renvoie ce disctionnaire.\n \"\"\"\n return dico\n\n\n<mask token>\n",
"step-4": "def execute(n, dico):\n \"\"\"\n Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.\n Renvoie une liste dont chaque élément est une réponse de la requête.\n \"\"\"\n l = []\n import sqlite3\n conn = sqlite3.connect('imdb.db')\n c = conn.cursor()\n c.execute(dico[n][1])\n for row in c:\n l.append(row)\n conn.close()\n return l\n\n\ndef taille_plus_grande_reponse(reponses):\n \"\"\"\n Prend en argument une liste.\n Renvoie la taille du plus grand élément de la liste.\n \"\"\"\n l = reponses\n maxi = 0\n for i in range(len(l)):\n if len(str(l[i])) > maxi:\n maxi = len(str(l[i]))\n return maxi\n\n\n<mask token>\n\n\ndef affichage_question(dico, texte, titre='Question'):\n \"\"\"\n prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.\n Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.\n \"\"\"\n fenetre = tkinter.Tk()\n fenetre.title(titre)\n for i in range(len(texte)):\n bouton = {}\n bouton[i] = Button(fenetre, text=texte[i], command=lambda n=i, dico\n =dico: requete(n, dico))\n bouton[i].pack()\n fenetre.mainloop()\n\n\n<mask token>\n\n\ndef requete(n, dico):\n \"\"\"\n prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.\n ne renvoie rien\n \"\"\"\n r = execute(n, dico)\n afficher_table(execute(n, dico), dico[n][0])\n\n\n<mask token>\n\n\ndef afficher_table(table, titre='', debut=0, fin=None):\n \"\"\"\n prend en argument table une liste et titre une chaine de caractère.\n ne renvoie rien.\n \"\"\"\n if titre != '':\n titre += '\\n\\n'\n affichage(titre + texte_table(table, debut, fin), titre)\n\n\ndef texte_table(table, debut=0, fin=None):\n \"\"\"\n prend en argument table une liste.\n renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.\n \"\"\"\n max = taille_plus_grande_reponse(table)\n texte = '+' + max * '-' + '+\\n'\n for i in range(len(table)):\n texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))\n ) * ' ' + '|' + '\\n+' + max * '-' + '+\\n'\n return texte\n\n\n<mask token>\n\n\ndef fichier_txt_en_texte(fichier):\n \"\"\"\n prend en argument le chemin d'un fichier texte\n Renvoie le contenu du fichier texte sous forme de chaîne de caractère.\n \"\"\"\n with open(fichier, 'r') as requete:\n return requete.read()\n\n\ndef chemin(nom, repertoire):\n \"\"\"\n Prend en argument le nom du fichier où est stocké la requête et le nom du répertoire dans lequel est stocké la requête.\n Renvoie le chemin de la requête.\n \"\"\"\n return repertoire + '/' + nom\n\n\ndef texte_en_liste(nom_requete, repertoire):\n requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))\n return requete.split()\n\n\ndef liste_en_texte(liste):\n \"\"\"\n prend en argument une liste et un indice et renvoie la même liste mais l'élement d'indice 'n' est transformé en texte.\n \"\"\"\n texte = ''\n for i in range(len(liste)):\n texte = texte + str(liste[i]) + ' '\n return texte\n\n\n<mask token>\n\n\ndef creer_dictionnaire_vide():\n \"\"\"\n Ne contient aucun argument et renvoie un dictionnaire vide.\n \"\"\"\n dico = {}\n return dico\n\n\ndef nom_element_du_repertoire(repertoire):\n \"\"\"\n prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.\n renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.\n \"\"\"\n path = (\n 'C:\\\\Users\\\\Elève\\\\Desktop\\\\projet NSI\\\\projetsqlKilian\\\\projetsqlKilian\\\\'\n + repertoire)\n nom_requete = os.listdir(path)\n return nom_requete\n\n\ndef stocker_requete(dico, repertoire):\n \"\"\"\n prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.\n ne renvoie rien\n \"\"\"\n liste = nom_element_du_repertoire(repertoire)\n for i in range(len(liste)):\n requete = separer_requete_et_question(liste[i], repertoire)\n dico[i] = ['#' + str(i + 1) + ') ' + requete[0], requete[1]]\n\n\ndef afficher(dico):\n \"\"\"\n prend en argument un dictionnaire et renvoie ce disctionnaire.\n \"\"\"\n return dico\n\n\n<mask token>\n",
"step-5": "def execute(n,dico):\n \"\"\"\n Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.\n Renvoie une liste dont chaque élément est une réponse de la requête.\n \"\"\"\n l = []\n import sqlite3\n conn = sqlite3.connect('imdb.db')\n c = conn.cursor()\n c.execute(dico[n][1])\n for row in c:\n l.append(row)\n conn.close()\n return l\n\ndef taille_plus_grande_reponse(reponses):\n \"\"\"\n Prend en argument une liste.\n Renvoie la taille du plus grand élément de la liste.\n \"\"\"\n l = reponses\n maxi = 0\n for i in range(len(l)):\n if len(str(l[i])) > maxi:\n maxi = len(str(l[i]))\n return maxi\n\n\"\"\"affichage question\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nfrom tkinter import *\n\ndef question(dico):\n \"\"\"\n prend en argument un disctionnaire.\n Ne renvoie rien.\n \"\"\"\n l = []\n for i in range(len(dico)):\n l.append(dico[i][0])\n affichage_question(dico,l)\n\ndef affichage_question(dico, texte, titre = \"Question\"):\n \"\"\"\n prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.\n Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.\n \"\"\"\n fenetre = tkinter.Tk()\n fenetre.title(titre)\n for i in range(len(texte)):\n bouton={}\n bouton[i]=Button(fenetre, text=texte[i], command=lambda n=i, dico=dico:requete(n,dico))\n bouton[i].pack()\n\n\n fenetre.mainloop()\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef requete(n,dico):\n \"\"\"\n prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.\n ne renvoie rien\n \"\"\"\n r = execute(n,dico)\n afficher_table(execute(n,dico),dico[n][0])\n\nimport tkinter\nimport os\n\ndef afficher_table(table, titre =\"\", debut = 0, fin = None):\n \"\"\"\n prend en argument table une liste et titre une chaine de caractère.\n ne renvoie rien.\n \"\"\"\n if titre != \"\":\n\t titre += \"\\n\\n\"\n\t#print(titre + texte_table(table, debut, fin))\n affichage(titre + texte_table(table, debut, fin), titre)\n \ndef texte_table(table, debut = 0, fin = None):\n \"\"\"\n prend en argument table une liste.\n renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.\n \"\"\"\n max = taille_plus_grande_reponse(table)\n texte = '+' + max * '-' + '+\\n'\n for i in range(len(table)):\n texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))) * ' ' + '|' + '\\n+' + max * '-' + '+\\n'\n return texte\n\ndef affichage(texte, titre = \"Requêtes tables\"):\n \"\"\"\n prend en argument texte une chaîne de caractère et titre une chaine de caractère\n renvoie une fenêtre tkinter\n \"\"\"\n root = tkinter.Tk()\n root.title(str(titre))\n RWidth=root.winfo_screenwidth() - 100\n RHeight=root.winfo_screenheight() - 100\n root.geometry(\"%dx%d+50+0\"%(RWidth, RHeight))\n text=tkinter.Text(root, wrap = 'none')\n scroll_x=tkinter.Scrollbar(text.master, orient='horizontal', command = text.xview)\n scroll_x.config(command = text.xview)\n text.configure(xscrollcommand = scroll_x.set)\n scroll_x.pack(side = 'bottom', fill = 'x', anchor = 'w')\n scroll_y = tkinter.Scrollbar(text.master)\n scroll_y.config(command = text.yview)\n text.configure(yscrollcommand = scroll_y.set)\n scroll_y.pack(side = tkinter.RIGHT, fill = 'y')\n text.insert(\"1.0\", texte)\n text.pack(side = tkinter.LEFT, expand = True, fill = tkinter.BOTH)\n root.mainloop()\n\ndef fichier_txt_en_texte(fichier):\n \"\"\"\n prend en argument le chemin d'un fichier texte\n Renvoie le contenu du fichier texte sous forme de chaîne de caractère.\n \"\"\"\n with open(fichier, \"r\") as requete:\n return requete.read()\n\ndef chemin(nom, repertoire):\n \"\"\"\n Prend en argument le nom du fichier où est stocké la requête et le nom du répertoire dans lequel est stocké la requête.\n Renvoie le chemin de la requête.\n \"\"\"\n return repertoire + '/' + nom\n\ndef texte_en_liste(nom_requete, repertoire):\n requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))\n return requete.split()\n\ndef liste_en_texte(liste):\n \"\"\"\n prend en argument une liste et un indice et renvoie la même liste mais l'élement d'indice 'n' est transformé en texte.\n \"\"\"\n texte = \"\"\n for i in range(len(liste)):\n texte = texte + str(liste[i]) + \" \"\n return texte\n \ndef separer_requete_et_question(nom, repertoire):\n \"\"\"\n prend en argument le numéro de la requête et renvoie la question et la requête sésparé.\n \"\"\"\n requete = texte_en_liste(nom, repertoire) #transforme la requête en tableau\n question = \"\"\n for i in range(len(requete)): #cherche le moment où la question s'arrête et sépare la question de la requête\n if requete[i] == \"?\":\n question = requete[0:i+1] #stock la question\n requete = requete[i+1:len(requete)] #stock la réponse\n break #stop la boucle quand la \"?\" est trouvé\n return [liste_en_texte(question),liste_en_texte(requete)]\n\ndef creer_dictionnaire_vide():\n \"\"\"\n Ne contient aucun argument et renvoie un dictionnaire vide.\n \"\"\"\n dico = {}\n return dico\n\ndef nom_element_du_repertoire(repertoire):\n \"\"\"\n prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.\n renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.\n \"\"\"\n path = \"C:\\\\Users\\\\Elève\\\\Desktop\\\\projet NSI\\\\projetsqlKilian\\\\projetsqlKilian\\\\\" + repertoire\n nom_requete = os.listdir(path) \n return nom_requete\n\ndef stocker_requete(dico, repertoire):\n \"\"\"\n prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.\n ne renvoie rien\n \"\"\"\n liste = nom_element_du_repertoire(repertoire)\n for i in range(len(liste)):\n requete = separer_requete_et_question(liste[i], repertoire)\n dico[i] = ['#' + str(i+1) + ') ' + requete[0], requete[1]]\n \n \ndef afficher(dico):\n \"\"\"\n prend en argument un dictionnaire et renvoie ce disctionnaire.\n \"\"\"\n return dico\n\na = creer_dictionnaire_vide()\nstocker_requete(a,'requête')\n#print(afficher(a))\nquestion(a)\n#print(nom_element_du_repertoire('requête'))\n#requete(a)\n#print(execute(1,a))\n#print(taille_plus_grande_reponse(execute(1,a)))",
"step-ids": [
8,
11,
13,
14,
21
]
}
|
[
8,
11,
13,
14,
21
] |
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from extensions import bcrypt
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(255))
password = db.Column(db.String(255))
posts = db.relationship('Post', backref='user', lazy='dynamic')
def __init__(self, username):
self.username = username
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, password):
return bcrypt.check_password_hash(self.password, password)
def __repr__(self):
return '<User ' + self.username + '>'
tags = db.Table('post_tags', db.Column('post_id', db.Integer(), db.
ForeignKey('post.id')), db.Column('tag_id', db.Integer, db.ForeignKey(
'tag.id')))
class Post(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
text = db.Column(db.Text())
date = db.Column(db.DateTime())
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
tags = db.relationship('Tag', secondary=tags, backref=db.backref(
'posts', lazy='dynamic'))
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Post ' + self.title + '>'
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
text = db.Column(db.Text())
date = db.Column(db.DateTime())
post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Comment ' + self.title + '>'
class Tag(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255))
def __init__(self, title):
self.title = title
def __repr__(self):
return '<Tag ' + self.title + '>'
def sidebar_data():
recent = Post.query.order_by(Post.date.desc()).limit(5).all()
top_tags = db.session.query(Tag, func.count(tags.c.post_id).label('total')
).join(tags).group_by(Tag).order_by('total DESC').limit(5).all()
return recent, top_tags
|
normal
|
{
"blob_id": "dd0e96a1f93cbffedc11262a883dda285f5c224c",
"index": 9703,
"step-1": "<mask token>\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n tags = db.relationship('Tag', secondary=tags, backref=db.backref(\n 'posts', lazy='dynamic'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Post ' + self.title + '>'\n\n\nclass Comment(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Comment ' + self.title + '>'\n\n\nclass Tag(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Tag ' + self.title + '>'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, username):\n self.username = username\n\n def set_password(self, password):\n self.password = bcrypt.generate_password_hash(password)\n\n def check_password(self, password):\n return bcrypt.check_password_hash(self.password, password)\n\n def __repr__(self):\n return '<User ' + self.username + '>'\n\n\n<mask token>\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n tags = db.relationship('Tag', secondary=tags, backref=db.backref(\n 'posts', lazy='dynamic'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Post ' + self.title + '>'\n\n\nclass Comment(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Comment ' + self.title + '>'\n\n\nclass Tag(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Tag ' + self.title + '>'\n\n\n<mask token>\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\n\n\nclass User(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n username = db.Column(db.String(255))\n password = db.Column(db.String(255))\n posts = db.relationship('Post', backref='user', lazy='dynamic')\n\n def __init__(self, username):\n self.username = username\n\n def set_password(self, password):\n self.password = bcrypt.generate_password_hash(password)\n\n def check_password(self, password):\n return bcrypt.check_password_hash(self.password, password)\n\n def __repr__(self):\n return '<User ' + self.username + '>'\n\n\ntags = db.Table('post_tags', db.Column('post_id', db.Integer(), db.\n ForeignKey('post.id')), db.Column('tag_id', db.Integer, db.ForeignKey(\n 'tag.id')))\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n tags = db.relationship('Tag', secondary=tags, backref=db.backref(\n 'posts', lazy='dynamic'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Post ' + self.title + '>'\n\n\nclass Comment(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Comment ' + self.title + '>'\n\n\nclass Tag(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Tag ' + self.title + '>'\n\n\ndef sidebar_data():\n recent = Post.query.order_by(Post.date.desc()).limit(5).all()\n top_tags = db.session.query(Tag, func.count(tags.c.post_id).label('total')\n ).join(tags).group_by(Tag).order_by('total DESC').limit(5).all()\n return recent, top_tags\n",
"step-4": "from flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom extensions import bcrypt\ndb = SQLAlchemy()\n\n\nclass User(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n username = db.Column(db.String(255))\n password = db.Column(db.String(255))\n posts = db.relationship('Post', backref='user', lazy='dynamic')\n\n def __init__(self, username):\n self.username = username\n\n def set_password(self, password):\n self.password = bcrypt.generate_password_hash(password)\n\n def check_password(self, password):\n return bcrypt.check_password_hash(self.password, password)\n\n def __repr__(self):\n return '<User ' + self.username + '>'\n\n\ntags = db.Table('post_tags', db.Column('post_id', db.Integer(), db.\n ForeignKey('post.id')), db.Column('tag_id', db.Integer, db.ForeignKey(\n 'tag.id')))\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n tags = db.relationship('Tag', secondary=tags, backref=db.backref(\n 'posts', lazy='dynamic'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Post ' + self.title + '>'\n\n\nclass Comment(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Comment ' + self.title + '>'\n\n\nclass Tag(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return '<Tag ' + self.title + '>'\n\n\ndef sidebar_data():\n recent = Post.query.order_by(Post.date.desc()).limit(5).all()\n top_tags = db.session.query(Tag, func.count(tags.c.post_id).label('total')\n ).join(tags).group_by(Tag).order_by('total DESC').limit(5).all()\n return recent, top_tags\n",
"step-5": null,
"step-ids": [
12,
17,
20,
21
]
}
|
[
12,
17,
20,
21
] |
from django.db import models
from datetime import datetime
class Folder(models.Model):
folder = models.CharField(max_length=200, default = "misc")
num_of_entries = models.IntegerField(default=0)
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = "Folders/Categories"
class Bookmark(models.Model):
name = models.CharField(max_length=200)
url = models.CharField(max_length=400)
folder = models.ForeignKey(Folder, on_delete=models.CASCADE)
date_of_creation = models.DateTimeField(default=datetime.now())
notes = models.TextField()
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "ca3cdbd5d5d30be4f40925366994c3ea9d9b9614",
"index": 3195,
"step-1": "<mask token>\n\n\nclass Folder(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Folder(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Folder(models.Model):\n folder = models.CharField(max_length=200, default='misc')\n num_of_entries = models.IntegerField(default=0)\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Folder(models.Model):\n folder = models.CharField(max_length=200, default='misc')\n num_of_entries = models.IntegerField(default=0)\n\n def __str__(self):\n return self.folder\n\n\n class Meta:\n verbose_name_plural = 'Folders/Categories'\n\n\nclass Bookmark(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=400)\n folder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n date_of_creation = models.DateTimeField(default=datetime.now())\n notes = models.TextField()\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\nfrom datetime import datetime\n\nclass Folder(models.Model):\n\tfolder = models.CharField(max_length=200, default = \"misc\")\n\tnum_of_entries = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.folder\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Folders/Categories\"\n\nclass Bookmark(models.Model):\n\tname = models.CharField(max_length=200)\n\turl = models.CharField(max_length=400)\n\tfolder = models.ForeignKey(Folder, on_delete=models.CASCADE)\n\tdate_of_creation = models.DateTimeField(default=datetime.now())\n\tnotes = models.TextField()\n\n\tdef __str__(self):\n\t\treturn self.name\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os
from PIL import Image
import urllib
import json
import math
def download_images(a,b):
image_count = 0
k = a
no_of_images = b
baseURL='https://graph.facebook.com/v2.2/'
imgURL='/picture?type=large'
sil_check='/picture?redirect=false'
while image_count<no_of_images:
obj=urllib.urlopen(baseURL+str(k)+sil_check)
data=obj.read()
jsondata=json.loads(data)
if not jsondata['data']['is_silhouette']:
img=urllib.urlopen(baseURL+str(k)+imgURL)
image=img.read()
f=open(str(k)+'.jpg','wb')
f.write(image)
f.close()
print 'Image written to '+str(k)+'.jpg'
image_count+=1
else:
print str(k)+' is Silhouette.'
k+=1
def resize_images():
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.jpg' in f]
print 'Resizing images ...'
for i in files:
img=Image.open(i)
j = i.replace('jpg','png')
img.resize((100,100)).save(j)
img.close()
os.remove(i)
def create_mosaic(b):
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.png' in f]
no_of_images = b
N = int(math.sqrt(no_of_images))
mosaic=Image.new('RGB',(N*100,N*100))
mpixels=mosaic.load()
mX,mY = 0,0
counter=0
print 'Combining images ...'
for img in files:
mX = (counter%N)*100
mY = (counter/N)*100
image=Image.open(img)
pixels=image.load()
for iY in range(100):
mX = (counter%N)*100
for iX in range(100):
try:
mpixels[mX,mY] = pixels[iX,iY]
except:
print mX,mY
mX+=1
mY+=1
counter+=1
image.close()
os.remove(img)
mosaic.save('mosaic.png')
a = int(raw_input('Enter the fb-id from where to begin:'))
b = int(raw_input('Enter the number of images to download (a square):'))
download_images(a,b)
resize_images()
create_mosaic(b)
|
normal
|
{
"blob_id": "533154fe58511ac9c9c693bf07f076146b0c6136",
"index": 4445,
"step-1": "import os\nfrom PIL import Image\nimport urllib\nimport json\nimport math\n\ndef download_images(a,b):\n\timage_count = 0\n\tk = a\n\tno_of_images = b\n\tbaseURL='https://graph.facebook.com/v2.2/'\n\timgURL='/picture?type=large'\n\tsil_check='/picture?redirect=false'\n\twhile image_count<no_of_images:\n\t\tobj=urllib.urlopen(baseURL+str(k)+sil_check)\n\t\tdata=obj.read()\n\t\tjsondata=json.loads(data)\n\t\tif not jsondata['data']['is_silhouette']:\n\t\t\timg=urllib.urlopen(baseURL+str(k)+imgURL)\n\t\t\timage=img.read()\n\t\t\tf=open(str(k)+'.jpg','wb')\n\t\t\tf.write(image)\n\t\t\tf.close()\n\t\t\tprint 'Image written to '+str(k)+'.jpg'\n\t\t\timage_count+=1\n\t\telse:\n\t\t\tprint str(k)+' is Silhouette.'\n\t\tk+=1\ndef resize_images():\n\tfiles=[f for f in os.listdir('.') if os.path.isfile(f) and '.jpg' in f]\n\tprint 'Resizing images ...'\n\tfor i in files:\n\t\timg=Image.open(i)\n\t\tj = i.replace('jpg','png')\n\t\timg.resize((100,100)).save(j)\n\t\timg.close()\n\t\tos.remove(i)\ndef create_mosaic(b):\n\tfiles=[f for f in os.listdir('.') if os.path.isfile(f) and '.png' in f]\n\tno_of_images = b\n\tN = int(math.sqrt(no_of_images))\n\tmosaic=Image.new('RGB',(N*100,N*100))\n\tmpixels=mosaic.load()\n\tmX,mY = 0,0\n\tcounter=0\n\tprint 'Combining images ...'\n\tfor img in files:\n\t\tmX = (counter%N)*100\n\t\tmY = (counter/N)*100\n\t\timage=Image.open(img)\n\t\tpixels=image.load()\n\t\tfor iY in range(100):\n\t\t\tmX = (counter%N)*100\n\t\t\tfor iX in range(100):\n\t\t\t\ttry:\n\t\t\t\t\tmpixels[mX,mY] = pixels[iX,iY]\n\t\t\t\texcept:\n\t\t\t\t\tprint mX,mY\n\t\t\t\tmX+=1\n\t\t\tmY+=1\n\t\tcounter+=1\n\t\timage.close()\n\t\tos.remove(img)\n\tmosaic.save('mosaic.png')\n\na = int(raw_input('Enter the fb-id from where to begin:'))\nb = int(raw_input('Enter the number of images to download (a square):'))\ndownload_images(a,b)\nresize_images()\ncreate_mosaic(b)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
prompt = "Enter a message and I will repeat it to you: "
message = " "
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
# using the 'flag' variable
prompt = "Enter a message and I will repeat it to you: "
# active is the variable used in this case as flag
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
|
normal
|
{
"blob_id": "1a6f84835ec2f5fbbb064aef2cd872c24eb3839d",
"index": 8717,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\n<mask token>\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-3": "prompt = 'Enter a message and I will repeat it to you: '\nmessage = ' '\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\nprompt = 'Enter a message and I will repeat it to you: '\nactive = True\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-4": "prompt = \"Enter a message and I will repeat it to you: \"\n\nmessage = \" \"\n\nwhile message != 'quit':\n message = input(prompt)\n if message != 'quit':\n print(message)\n\n# using the 'flag' variable\n\nprompt = \"Enter a message and I will repeat it to you: \"\n\n# active is the variable used in this case as flag\n\nactive = True\n\nwhile active:\n message = input(prompt)\n \n if message == 'quit':\n active = False\n else:\n print(message)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
from django.utils.text import slugify
# Create your models here.
class SponsorType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Sponsor(models.Model):
type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
image = models.ImageField(upload_to="images",default="default-image.png",blank=True,null=True)
slug = models.SlugField(max_length=200, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args,**kwargs)
class Meta:
verbose_name_plural = 'sponsors'
|
normal
|
{
"blob_id": "81f0119f6f348f6d33e8d22f588fc8c2e0593d3c",
"index": 1536,
"step-1": "<mask token>\n\n\nclass SponsorType(models.Model):\n <mask token>\n <mask token>\n\n\nclass Sponsor(models.Model):\n type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n image = models.ImageField(upload_to='images', default=\n 'default-image.png', blank=True, null=True)\n slug = models.SlugField(max_length=200, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'sponsors'\n",
"step-2": "<mask token>\n\n\nclass SponsorType(models.Model):\n <mask token>\n\n def __str__(self):\n return self.name\n\n\nclass Sponsor(models.Model):\n type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n image = models.ImageField(upload_to='images', default=\n 'default-image.png', blank=True, null=True)\n slug = models.SlugField(max_length=200, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'sponsors'\n",
"step-3": "<mask token>\n\n\nclass SponsorType(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Sponsor(models.Model):\n type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n image = models.ImageField(upload_to='images', default=\n 'default-image.png', blank=True, null=True)\n slug = models.SlugField(max_length=200, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'sponsors'\n",
"step-4": "from django.db import models\nfrom django.utils.text import slugify\n\n\nclass SponsorType(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Sponsor(models.Model):\n type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n image = models.ImageField(upload_to='images', default=\n 'default-image.png', blank=True, null=True)\n slug = models.SlugField(max_length=200, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'sponsors'\n",
"step-5": "from django.db import models\nfrom django.utils.text import slugify\n# Create your models here.\n\nclass SponsorType(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Sponsor(models.Model):\n type = models.ForeignKey(SponsorType, on_delete=models.CASCADE, null=True)\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n image = models.ImageField(upload_to=\"images\",default=\"default-image.png\",blank=True,null=True)\n slug = models.SlugField(max_length=200, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args,**kwargs)\n \n class Meta:\n verbose_name_plural = 'sponsors'\n \n\n\n \n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
'''
Created on Feb 21, 2013
@author: dharadarji
'''
def get_row(row_index):
entry = [1]
if row_index == 0:
return entry
tmp = []
for i in range(1, row_index + 2):
tmp = entry
print "i: ", i, "tmp: ", tmp
entry = []
entry.append(1)
for j in range(1, i-1):
print "j: ", j, "tmp[j]: ", tmp[0]
entry.append(tmp[j-1] + tmp[j])
entry.append(1)
print "entry: ", entry
print entry
get_row(3)
|
normal
|
{
"blob_id": "2579b0c31c5f7cad361ed317f87cb8b0ffcb0098",
"index": 875,
"step-1": "'''\nCreated on Feb 21, 2013\n\n@author: dharadarji\n'''\n\ndef get_row(row_index):\n entry = [1]\n \n if row_index == 0:\n return entry\n \n tmp = []\n \n for i in range(1, row_index + 2):\n tmp = entry\n print \"i: \", i, \"tmp: \", tmp\n\n entry = []\n entry.append(1)\n \n for j in range(1, i-1):\n print \"j: \", j, \"tmp[j]: \", tmp[0]\n entry.append(tmp[j-1] + tmp[j])\n \n entry.append(1)\n print \"entry: \", entry\n print entry\n \nget_row(3)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.